hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b549ebf1f575ae1b2ca0a4f54c933a69dae4422d | 9,154 | py | Python | tests/test_models.py | jscook2345/squad-client | 3b40e3f2a2846c6567a03676a18e3d99eed3d75d | [
"MIT"
] | 3 | 2020-03-30T18:34:57.000Z | 2020-04-12T07:51:25.000Z | tests/test_models.py | jscook2345/squad-client | 3b40e3f2a2846c6567a03676a18e3d99eed3d75d | [
"MIT"
] | 127 | 2020-03-05T23:20:37.000Z | 2022-03-23T18:44:19.000Z | tests/test_models.py | jscook2345/squad-client | 3b40e3f2a2846c6567a03676a18e3d99eed3d75d | [
"MIT"
] | 17 | 2020-02-14T14:37:38.000Z | 2022-03-17T15:59:56.000Z | import unittest
from . import settings
from squad_client.core.api import SquadApi
from squad_client.core.models import Squad, ALL, Project
from squad_client.utils import first
SquadApi.configure(url='http://localhost:%s' % settings.DEFAULT_SQUAD_PORT)
class SquadTest(unittest.TestCase):
def setUp(self):
self.squad = Squad()
def test_groups(self):
groups = self.squad.groups()
self.assertTrue(True, len(groups))
def test_not_found_groups(self):
groups = self.squad.groups(name__startswith='no group with this name')
self.assertEqual(0, len(groups))
def test_groups_with_count(self):
all_groups = self.squad.groups(count=ALL)
self.assertEqual(2, len(all_groups))
one_groups = self.squad.groups(count=1)
self.assertEqual(1, len(one_groups))
def test_not_found_group(self):
not_found_group = self.squad.group('this-group-does-not-really-exist')
self.assertEqual(None, not_found_group)
def test_group(self):
group = self.squad.group('my_group')
self.assertTrue(group is not None)
def test_projects(self):
projects = self.squad.projects()
self.assertTrue(True, len(projects))
def test_builds(self):
builds = self.squad.builds()
self.assertTrue(True, len(builds))
def test_testjobs(self):
testjobs = self.squad.testjobs()
self.assertTrue(True, len(testjobs))
def test_testruns(self):
testruns = self.squad.testruns()
self.assertTrue(True, len(testruns))
def test_tests(self):
tests = self.squad.tests()
self.assertTrue(True, len(tests))
def test_suites(self):
suites = self.squad.suites()
self.assertTrue(True, len(suites))
def test_environments(self):
environments = self.squad.environments()
self.assertTrue(True, len(environments))
def test_backends(self):
backends = self.squad.backends()
self.assertTrue(True, len(backends))
def test_emailtemplates(self):
emailtemplates = self.squad.emailtemplates()
self.assertTrue(True, len(emailtemplates))
def test_knownissues(self):
knownissues = self.squad.knownissues()
self.assertTrue(True, len(knownissues))
def test_suitemetadata(self):
suitemetadata = self.squad.suitemetadata()
self.assertTrue(True, len(suitemetadata))
def test_annotations(self):
annotations = self.squad.annotations()
self.assertTrue(True, len(annotations))
def test_metricthresholds(self):
metricthresholds = self.squad.metricthresholds()
self.assertTrue(True, len(metricthresholds))
def test_reports(self):
reports = self.squad.reports()
self.assertTrue(True, len(reports))
class BuildTest(unittest.TestCase):
def setUp(self):
self.build = first(Squad().builds(version='my_build'))
def test_basic(self):
self.assertTrue(self.build is not None)
def test_build_metadata(self):
metadata = self.build.metadata
self.assertTrue(metadata.__id__ != '')
def test_build_tests(self):
tests = self.build.tests().values()
self.assertEqual(4, len(tests))
def test_build_tests_per_environment(self):
tests = self.build.tests(environment__slug='my_env').values()
self.assertEqual(4, len(tests))
def test_build_tests_per_environment_not_found(self):
tests = self.build.tests(environment__slug='mynonexistentenv').values()
self.assertEqual(0, len(tests))
def test_build_tests_change_cache_on_different_filters(self):
tests = self.build.tests(environment__slug='my_env').values()
self.assertEqual(4, len(tests))
tests = self.build.tests(environment__slug='mynonexistentenv').values()
self.assertEqual(0, len(tests))
def test_build_metrics(self):
tests = self.build.metrics().values()
self.assertEqual(1, len(tests))
def test_build_metrics_per_environment(self):
tests = self.build.metrics(environment__slug='my_env').values()
self.assertEqual(1, len(tests))
def test_build_metrics_per_environment_not_found(self):
tests = self.build.metrics(environment__slug='mynonexistentenv').values()
self.assertEqual(0, len(tests))
def test_build_metrics_change_cache_on_different_filters(self):
tests = self.build.metrics(environment__slug='my_env').values()
self.assertEqual(1, len(tests))
tests = self.build.metrics(environment__slug='mynonexistentenv').values()
self.assertEqual(0, len(tests))
class TestRunTest(unittest.TestCase):
def setUp(self):
self.testruns = Squad().testruns(count=2)
self.testrun = self.testruns[1]
self.testrun_no_metadata = self.testruns[2]
def test_basic(self):
self.assertTrue(self.testrun is not None)
def test_testrun_metadata(self):
self.assertTrue(self.testrun.metadata_file is not None)
self.assertTrue(self.testrun.metadata is not None)
self.assertEqual(self.testrun.metadata.foo, "bar")
self.assertTrue(self.testrun_no_metadata.metadata_file is not None)
self.assertTrue(self.testrun_no_metadata.metadata is None)
def test_testrun_status(self):
status = self.testrun.summary()
self.assertEqual(1, status.tests_fail)
class ProjectTest(unittest.TestCase):
def setUp(self):
SquadApi.configure(url='http://localhost:%s' % settings.DEFAULT_SQUAD_PORT, token='193cd8bb41ab9217714515954e8724f651ef8601')
self.project = first(Squad().projects(slug='my_project'))
self.build = first(Squad().builds(version='my_build'))
self.build2 = first(Squad().builds(version='my_build2'))
def test_basic(self):
self.assertTrue(self.project is not None)
def test_project_environments(self):
environments = self.project.environments()
self.assertEqual(2, len(environments))
environment = self.project.environment('my_env')
self.assertEqual(environment.slug, 'my_env')
def test_project_suites(self):
suites = self.project.suites()
self.assertEqual(2, len(suites))
suite = self.project.suite('my_suite')
self.assertEqual(suite.slug, 'my_suite')
def test_project_thresholds(self):
thresholds = self.project.thresholds()
self.assertEqual(1, len(thresholds))
threshold = first(thresholds)
self.assertEqual(threshold.name, 'my-threshold')
def test_compare_builds_from_same_project(self):
# tests
comparison = self.project.compare_builds(self.build2.id, self.build.id)
self.assertEqual('Cannot report regressions/fixes on non-finished builds', comparison[0])
# metrics
comparison = self.project.compare_builds(self.build2.id, self.build.id, by="metrics")
self.assertEqual('Cannot report regressions/fixes on non-finished builds', comparison[0])
def test_compare_builds_from_same_project_force(self):
comparison = self.project.compare_builds(self.build2.id, self.build.id, force=True)
self.assertEqual({}, comparison['regressions'])
self.assertEqual({}, comparison['fixes'])
comparison = self.project.compare_builds(self.build2.id, self.build.id, by="metrics", force=True)
self.assertEqual({}, comparison['regressions'])
self.assertEqual({}, comparison['fixes'])
def test_create_project(self):
group = Squad().group('my_group')
slug = 'test-create-project'
new_project = Project()
new_project.slug = slug
new_project.group = group
new_project.enabled_plugins_list = ['linux-log-parser']
new_project.save()
check_project = first(Squad().projects(slug=slug, group__slug=group.slug))
self.assertEqual(new_project.id, check_project.id)
new_project.delete()
def test_save_project_settings(self):
settings = 'SETTING: value'
self.project.project_settings = settings
self.project.save()
project = first(Squad().projects(slug=self.project.slug))
self.assertTrue(project is not None)
class GroupTest(unittest.TestCase):
def setUp(self):
SquadApi.configure(url='http://localhost:%s' % settings.DEFAULT_SQUAD_PORT, token='193cd8bb41ab9217714515954e8724f651ef8601')
self.group = first(Squad().groups(slug='my_group'))
def test_create_project(self):
project_slug = 'test-create-project2'
self.group.create_project(slug=project_slug)
check_project = Squad().projects(slug=project_slug, group__slug=self.group.slug)
self.assertEqual(1, len(check_project))
p = first(check_project)
p.delete()
class SuiteTest(unittest.TestCase):
def setUp(self):
self.suite = first(Squad().suites(slug='my_suite'))
def test_basic(self):
self.assertTrue(self.suite is not None)
def test_suite_tests(self):
tests = self.suite.tests()
self.assertEqual(4, len(tests))
| 33.654412 | 133 | 0.680686 |
6c703fa3f75fa753ff170907be8ba14750272fda | 62,752 | py | Python | Lib/importlib/_bootstrap_external.py | gaurav1086/cpython | 10355ed7f132ed10f1e0d8bd64ccb744b86b1cce | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2021-05-06T19:55:27.000Z | 2021-05-06T19:55:27.000Z | Lib/importlib/_bootstrap_external.py | doc22940/cpython | 65ecc390c1fa5acdd6348ae3f9843bbdcd8870d1 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/importlib/_bootstrap_external.py | doc22940/cpython | 65ecc390c1fa5acdd6348ae3f9843bbdcd8870d1 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | """Core implementation of path-based import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
# IMPORTANT: Whenever making changes to this module, be sure to run a top-level
# `make regen-importlib` followed by `make` in order to get the frozen version
# of the module updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module in the early
# stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin'
_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY
+ _CASE_INSENSITIVE_PLATFORMS_STR_KEY)
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY):
key = 'PYTHONCASEOK'
else:
key = b'PYTHONCASEOK'
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return key in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _pack_uint32(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _unpack_uint32(data):
"""Convert 4 bytes in little-endian to an integer."""
assert len(data) == 4
return int.from_bytes(data, 'little')
def _unpack_uint16(data):
"""Convert 2 bytes in little-endian to an integer."""
assert len(data) == 2
return int.from_bytes(data, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _path_isabs(path):
"""Replacement for os.path.isabs.
Considers a Windows drive-relative path (no drive, but starts with slash) to
still be "absolute".
"""
return path.startswith(path_separators) or path[1:3] in _pathseps_with_colon
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
_code_type = type(_write_atomic.__code__)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT
#3021)
# Python 3.1a1: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD #2183)
# Python 3.1a1: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
#4715)
# Python 3.2a1: 3160 (add SETUP_WITH #6101)
# tag: cpython-32
# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225)
# tag: cpython-32
# Python 3.2a3 3180 (add DELETE_DEREF #4617)
# Python 3.3a1 3190 (__class__ super closure changed)
# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448)
# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645)
# Python 3.3a2 3220 (changed PEP 380 implementation #14230)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults #16967)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars #17853)
# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation #19301)
# Python 3.4a4 3300 (more changes to __qualname__ computation #19301)
# Python 3.4rc2 3310 (alter __qualname__ computation #20625)
# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176)
# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292)
# Python 3.5b2 3340 (fix dictionary display evaluation order #11205)
# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400)
# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286)
# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483)
# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107)
# Python 3.6a2 3370 (16 bit wordcode #26647)
# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140)
# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE
# #27095)
# Python 3.6b1 3373 (add BUILD_STRING opcode #27078)
# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes
# #27985)
# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL
#27213)
# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722)
# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257)
# Python 3.6rc1 3379 (more thorough __class__ validation #23722)
# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110)
# Python 3.7a2 3391 (update GET_AITER #31709)
# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650)
# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550)
# Python 3.7b5 3394 (restored docstring as the first stmt in the body;
# this might affected the first line number #32911)
# Python 3.8a1 3400 (move frame block handling to compiler #17611)
# Python 3.8a1 3401 (add END_ASYNC_FOR #33041)
# Python 3.8a1 3410 (PEP570 Python Positional-Only Parameters #36540)
# Python 3.8b2 3411 (Reverse evaluation order of key: value in dict
# comprehensions #35224)
# Python 3.8b2 3412 (Swap the position of positional args and positional
# only args in ast.arguments #37593)
# Python 3.8b4 3413 (Fix "break" and "continue" in "finally" #37830)
# Python 3.9a0 3420 (add LOAD_ASSERTION_ERROR #34880)
# Python 3.9a0 3421 (simplified bytecode for with blocks #32949)
# Python 3.9a0 3422 (remove BEGIN_FINALLY, END_FINALLY, CALL_FINALLY, POP_FINALLY bytecodes #33387)
# Python 3.9a2 3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156)
# Python 3.9a2 3424 (simplify bytecodes for *value unpacking)
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
# due to the addition of new opcodes).
#
# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
# in PC/launcher.c must also be updated.
MAGIC_NUMBER = (3424).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
_OPT = 'opt-'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
BYTECODE_SUFFIXES = ['.pyc']
# Deprecated.
DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES
def cache_from_source(path, debug_override=None, *, optimization=None):
"""Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
The 'optimization' parameter controls the presumed optimization level of
the bytecode file. If 'optimization' is not None, the string representation
of the argument is taken and verified to be alphanumeric (else ValueError
is raised).
The debug_override parameter is deprecated. If debug_override is not None,
a True value is the same as setting 'optimization' to the empty string
while a False value is equivalent to setting 'optimization' to '1'.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if debug_override is not None:
_warnings.warn('the debug_override parameter is deprecated; use '
"'optimization' instead", DeprecationWarning)
if optimization is not None:
message = 'debug_override or optimization must be set to None'
raise TypeError(message)
optimization = '' if debug_override else 1
path = _os.fspath(path)
head, tail = _path_split(path)
base, sep, rest = tail.rpartition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
almost_filename = ''.join([(base if base else rest), sep, tag])
if optimization is None:
if sys.flags.optimize == 0:
optimization = ''
else:
optimization = sys.flags.optimize
optimization = str(optimization)
if optimization != '':
if not optimization.isalnum():
raise ValueError('{!r} is not alphanumeric'.format(optimization))
almost_filename = '{}.{}{}'.format(almost_filename, _OPT, optimization)
filename = almost_filename + BYTECODE_SUFFIXES[0]
if sys.pycache_prefix is not None:
# We need an absolute path to the py file to avoid the possibility of
# collisions within sys.pycache_prefix, if someone has two different
# `foo/bar.py` on their system and they import both of them using the
# same sys.pycache_prefix. Let's say sys.pycache_prefix is
# `C:\Bytecode`; the idea here is that if we get `Foo\Bar`, we first
# make it absolute (`C:\Somewhere\Foo\Bar`), then make it root-relative
# (`Somewhere\Foo\Bar`), so we end up placing the bytecode file in an
# unambiguous `C:\Bytecode\Somewhere\Foo\Bar\`.
if not _path_isabs(head):
head = _path_join(_os.getcwd(), head)
# Strip initial drive from a Windows path. We know we have an absolute
# path here, so the second part of the check rules out a POSIX path that
# happens to contain a colon at the second character.
if head[1] == ':' and head[0] not in path_separators:
head = head[2:]
# Strip initial path separator from `head` to complete the conversion
# back to a root-relative path before joining.
return _path_join(
sys.pycache_prefix,
head.lstrip(path_separators),
filename,
)
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147/488 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
path = _os.fspath(path)
head, pycache_filename = _path_split(path)
found_in_pycache_prefix = False
if sys.pycache_prefix is not None:
stripped_path = sys.pycache_prefix.rstrip(path_separators)
if head.startswith(stripped_path + path_sep):
head = head[len(stripped_path):]
found_in_pycache_prefix = True
if not found_in_pycache_prefix:
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError(f'{_PYCACHE} not bottom-level directory in '
f'{path!r}')
dot_count = pycache_filename.count('.')
if dot_count not in {2, 3}:
raise ValueError(f'expected only 2 or 3 dots in {pycache_filename!r}')
elif dot_count == 3:
optimization = pycache_filename.rsplit('.', 2)[-2]
if not optimization.startswith(_OPT):
raise ValueError("optimization portion of filename does not start "
f"with {_OPT!r}")
opt_level = optimization[len(_OPT):]
if not opt_level.isalnum():
raise ValueError(f"optimization level {optimization!r} is not an "
"alphanumeric value")
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _get_cached(filename):
if filename.endswith(tuple(SOURCE_SUFFIXES)):
try:
return cache_from_source(filename)
except NotImplementedError:
pass
elif filename.endswith(tuple(BYTECODE_SUFFIXES)):
return filename
else:
return None
def _calc_mode(path):
"""Calculate the mode permissions for a bytecode file."""
try:
mode = _path_stat(path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return mode
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader for %s cannot handle %s' %
(self.name, name), name=name)
return method(self, name, *args, **kwargs)
try:
_wrap = _bootstrap._wrap
except NameError:
# XXX yuck
def _wrap(new, old):
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
def _classify_pyc(data, name, exc_details):
"""Perform basic validity checking of a pyc header and return the flags field,
which determines how the pyc should be further validated against the source.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required, though.)
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
ImportError is raised when the magic number is incorrect or when the flags
field is invalid. EOFError is raised when the data is found to be truncated.
"""
magic = data[:4]
if magic != MAGIC_NUMBER:
message = f'bad magic number in {name!r}: {magic!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if len(data) < 16:
message = f'reached EOF while reading pyc header of {name!r}'
_bootstrap._verbose_message('{}', message)
raise EOFError(message)
flags = _unpack_uint32(data[4:8])
# Only the first two flags are defined.
if flags & ~0b11:
message = f'invalid flags {flags!r} in {name!r}'
raise ImportError(message, **exc_details)
return flags
def _validate_timestamp_pyc(data, source_mtime, source_size, name,
exc_details):
"""Validate a pyc against the source last-modified time.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required.)
*source_mtime* is the last modified timestamp of the source file.
*source_size* is None or the size of the source file in bytes.
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
An ImportError is raised if the bytecode is stale.
"""
if _unpack_uint32(data[8:12]) != (source_mtime & 0xFFFFFFFF):
message = f'bytecode is stale for {name!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if (source_size is not None and
_unpack_uint32(data[12:16]) != (source_size & 0xFFFFFFFF)):
raise ImportError(f'bytecode is stale for {name!r}', **exc_details)
def _validate_hash_pyc(data, source_hash, name, exc_details):
"""Validate a hash-based pyc by checking the real source hash against the one in
the pyc header.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required.)
*source_hash* is the importlib.util.source_hash() of the source file.
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
An ImportError is raised if the bytecode is stale.
"""
if data[8:16] != source_hash:
raise ImportError(
f'hash in bytecode doesn\'t match hash of source {name!r}',
**exc_details,
)
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as found in a pyc."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_bootstrap._verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path)
def _code_to_timestamp_pyc(code, mtime=0, source_size=0):
"Produce the data for a timestamp-based pyc."
data = bytearray(MAGIC_NUMBER)
data.extend(_pack_uint32(0))
data.extend(_pack_uint32(mtime))
data.extend(_pack_uint32(source_size))
data.extend(marshal.dumps(code))
return data
def _code_to_hash_pyc(code, source_hash, checked=True):
"Produce the data for a hash-based pyc."
data = bytearray(MAGIC_NUMBER)
flags = 0b1 | checked << 1
data.extend(_pack_uint32(flags))
assert len(source_hash) == 8
data.extend(source_hash)
data.extend(marshal.dumps(code))
return data
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
import tokenize # To avoid bootstrap issues.
source_bytes_readline = _io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
# Module specifications #######################################################
_POPULATE = object()
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
else:
location = _os.fspath(location)
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = _bootstrap.ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
# Loaders #####################################################################
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry."""
REGISTRY_KEY = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}')
REGISTRY_KEY_DEBUG = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}\\Debug')
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except OSError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version='%d.%d' % sys.version_info[:2])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@classmethod
def find_spec(cls, fullname, path=None, target=None):
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_path_stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
spec = _bootstrap.spec_from_loader(fullname,
loader(fullname, filepath),
origin=filepath)
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry.
This method is deprecated. Use exec_module() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is not None:
return spec.loader
else:
return None
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def create_module(self, spec):
"""Use default semantics for module creation."""
def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_bootstrap._call_with_frames_removed(exec, code, module.__dict__)
def load_module(self, fullname):
"""This module is deprecated."""
return _bootstrap._load_module_shim(self, fullname)
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path (a str).
Raises OSError when the path cannot be handled.
"""
raise OSError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified
path (a str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises OSError when the path cannot be handled.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError('source not available through get_data()',
name=fullname) from exc
return decode_source(source_bytes)
def source_to_code(self, data, path, *, _optimize=-1):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
return _bootstrap._call_with_frames_removed(compile, data, path, 'exec',
dont_inherit=True, optimize=_optimize)
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
source_bytes = None
source_hash = None
hash_based = False
check_source = True
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except OSError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
exc_details = {
'name': fullname,
'path': bytecode_path,
}
try:
flags = _classify_pyc(data, fullname, exc_details)
bytes_data = memoryview(data)[16:]
hash_based = flags & 0b1 != 0
if hash_based:
check_source = flags & 0b10 != 0
if (_imp.check_hash_based_pycs != 'never' and
(check_source or
_imp.check_hash_based_pycs == 'always')):
source_bytes = self.get_data(source_path)
source_hash = _imp.source_hash(
_RAW_MAGIC_NUMBER,
source_bytes,
)
_validate_hash_pyc(data, source_hash, fullname,
exc_details)
else:
_validate_timestamp_pyc(
data,
source_mtime,
st['size'],
fullname,
exc_details,
)
except (ImportError, EOFError):
pass
else:
_bootstrap._verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
if source_bytes is None:
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_bootstrap._verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
if hash_based:
if source_hash is None:
source_hash = _imp.source_hash(source_bytes)
data = _code_to_hash_pyc(code_object, source_hash, check_source)
else:
data = _code_to_timestamp_pyc(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
except NotImplementedError:
pass
return code_object
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
# The only reason for this method is for the name check.
# Issue #14857: Avoid the zero-argument form of super so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
if isinstance(self, (SourceLoader, ExtensionFileLoader)):
with _io.open_code(str(path)) as file:
return file.read()
else:
with _io.FileIO(path, 'r') as file:
return file.read()
# ResourceReader ABC API.
@_check_name
def get_resource_reader(self, module):
if self.is_package(module):
return self
return None
def open_resource(self, resource):
path = _path_join(_path_split(self.path)[0], resource)
return _io.FileIO(path, 'r')
def resource_path(self, resource):
if not self.is_resource(resource):
raise FileNotFoundError
path = _path_join(_path_split(self.path)[0], resource)
return path
def is_resource(self, name):
if path_sep in name:
return False
path = _path_join(_path_split(self.path)[0], name)
return _path_isfile(path)
def contents(self):
return iter(_os.listdir(_path_split(self.path)[0]))
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_bootstrap._verbose_message('could not create {!r}: {!r}',
parent, exc)
return
try:
_write_atomic(path, data, _mode)
_bootstrap._verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_bootstrap._verbose_message('could not create {!r}: {!r}', path,
exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
# Call _classify_pyc to do basic validation of the pyc but ignore the
# result. There's no source to check against.
exc_details = {
'name': fullname,
'path': path,
}
_classify_pyc(data, fullname, exc_details)
return _compile_bytecode(
memoryview(data)[16:],
name=fullname,
bytecode_path=path,
)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader(FileLoader, _LoaderBasics):
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
def create_module(self, spec):
"""Create an unitialized extension module"""
module = _bootstrap._call_with_frames_removed(
_imp.create_dynamic, spec)
_bootstrap._verbose_message('extension module {!r} loaded from {!r}',
spec.name, self.path)
return module
def exec_module(self, module):
"""Initialize an extension module"""
_bootstrap._call_with_frames_removed(_imp.exec_dynamic, module)
_bootstrap._verbose_message('extension module {!r} executed from {!r}',
self.name, self.path)
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
spec = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if spec is not None and spec.loader is None:
if spec.submodule_search_locations:
self._path = spec.submodule_search_locations
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __getitem__(self, index):
return self._recalculate()[index]
def __setitem__(self, index, path):
self._path[index] = path
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return '_NamespacePath({!r})'.format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
# We use this exclusively in module_from_spec() for backward-compatibility.
class _NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (namespace)>'.format(module.__name__)
def is_package(self, fullname):
return True
def get_source(self, fullname):
return ''
def get_code(self, fullname):
return compile('', '<string>', 'exec', dont_inherit=True)
def create_module(self, spec):
"""Use default semantics for module creation."""
def exec_module(self, module):
pass
def load_module(self, fullname):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
# The import system never calls this method.
_bootstrap._verbose_message('namespace module loaded with path {!r}',
self._path)
return _bootstrap._load_module_shim(self, fullname)
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for name, finder in list(sys.path_importer_cache.items()):
if finder is None:
del sys.path_importer_cache[name]
elif hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sys.path_hooks for a finder for 'path'."""
if sys.path_hooks is not None and not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
try:
path = _os.getcwd()
except FileNotFoundError:
# Don't cache the failure as the cwd can easily change to
# a valid directory later on.
return None
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return _bootstrap.spec_from_loader(fullname, loader)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""Try to find a spec for 'fullname' on sys.path or 'path'.
The search is based on sys.path_hooks and sys.path_importer_cache.
"""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a spec which
# can create the namespace package.
spec.origin = None
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
@classmethod
def find_distributions(cls, *args, **kwargs):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
from importlib.metadata import MetadataPathFinder
return MetadataPathFinder.find_distributions(*args, **kwargs)
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a spec for the specified module.
Returns the matching spec, or None if not found.
"""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_bootstrap._verbose_message('trying {}', full_path, verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path,
None, target)
if is_namespace:
_bootstrap._verbose_message('possible namespace for {}', base_path)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
# Import setup ###############################################################
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def _setup(_bootstrap_module):
"""Setup the path-based importers for importlib by importing needed
built-in modules and injecting them into the global namespace.
Other components are extracted from the core bootstrap module.
"""
global sys, _imp, _bootstrap
_bootstrap = _bootstrap_module
sys = _bootstrap.sys
_imp = _bootstrap._imp
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = _bootstrap._builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = _bootstrap._builtin_from_name(builtin_os)
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
setattr(self_module, '_pathseps_with_colon', {f':{s}' for s in path_separators})
# Directly load the _thread module (needed during bootstrap).
thread_module = _bootstrap._builtin_from_name('_thread')
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _bootstrap._builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
# Directly load the winreg module (needed during bootstrap).
if builtin_os == 'nt':
winreg_module = _bootstrap._builtin_from_name('winreg')
setattr(self_module, '_winreg', winreg_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(_bootstrap_module):
"""Install the path-based import components."""
_setup(_bootstrap_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(PathFinder)
| 38.147112 | 105 | 0.624618 |
c371d88057187c656d9f769587a5e69d0b81b2f1 | 915 | py | Python | oscar/lib/python2.7/site-packages/phonenumbers/data/region_GL.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/phonenumbers/data/region_GL.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/phonenumbers/data/region_GL.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | """Auto-generated file, do not edit by hand. GL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GL = PhoneMetadata(id='GL', country_code=299, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-689]\\d{5}', possible_length=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:19|3[1-6]|6[14689]|8[14-79]|9\\d)\\d{4}', example_number='321000', possible_length=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='[245][2-9]\\d{4}', example_number='221234', possible_length=(6,)),
toll_free=PhoneNumberDesc(national_number_pattern='80\\d{4}', example_number='801234', possible_length=(6,)),
voip=PhoneNumberDesc(national_number_pattern='3[89]\\d{4}', example_number='381234', possible_length=(6,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3')])
| 83.181818 | 149 | 0.71694 |
8657d90fe7092bbdb91cfe26101bae5ad4366000 | 808 | py | Python | migrations/versions/816ea3631582_add_topics.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 9 | 2021-01-12T07:28:30.000Z | 2021-12-30T09:27:04.000Z | migrations/versions/816ea3631582_add_topics.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 16 | 2021-03-28T16:31:42.000Z | 2022-03-21T00:18:30.000Z | migrations/versions/816ea3631582_add_topics.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 1 | 2021-07-18T20:49:19.000Z | 2021-07-18T20:49:19.000Z | """add topics
Revision ID: 816ea3631582
Revises: 37a124b0099b
Create Date: 2021-03-13 14:20:10.044131
"""
from alembic import op
import sqlalchemy as sa
import bot
# revision identifiers, used by Alembic.
revision = "816ea3631582"
down_revision = "37a124b0099b"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"topics",
sa.Column("content", sa.Text(), nullable=False),
sa.Column("last_synced_at", bot.database.TIMESTAMP(timezone=True), nullable=True),
sa.PrimaryKeyConstraint("content"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("topics")
# ### end Alembic commands ###
| 23.085714 | 90 | 0.674505 |
7d54fbcd31a227c9893bdf6b082f900c065d73a9 | 220 | py | Python | 210915/Q10951.py | JongGuk/BOJ | dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15 | [
"MIT"
] | null | null | null | 210915/Q10951.py | JongGuk/BOJ | dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15 | [
"MIT"
] | null | null | null | 210915/Q10951.py | JongGuk/BOJ | dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15 | [
"MIT"
] | null | null | null | '''
Two integers A and B will given, please print result of A+B.
Input cases consists with several lines.
'''
import sys
lines = sys.stdin.readlines()
for line in lines:
A, B = map(int, line.split())
print(A+B) | 20 | 60 | 0.677273 |
377e7350c6abf673eb2b63e3adb971b5490c0fe3 | 1,880 | py | Python | connectomics/data/augmentation/__init__.py | mouradbelo/pytorch_connectomics | bbed3a879ba2b4bcfa215eb2aba04b59533e180a | [
"MIT"
] | 1 | 2020-07-22T12:56:09.000Z | 2020-07-22T12:56:09.000Z | connectomics/data/augmentation/__init__.py | mouradbelo/pytorch_connectomics | bbed3a879ba2b4bcfa215eb2aba04b59533e180a | [
"MIT"
] | null | null | null | connectomics/data/augmentation/__init__.py | mouradbelo/pytorch_connectomics | bbed3a879ba2b4bcfa215eb2aba04b59533e180a | [
"MIT"
] | null | null | null | from .composition import Compose
from .augmentor import DataAugment
from .test_augmentor import TestAugmentor
# augmentation methods
from .warp import Elastic
from .grayscale import Grayscale
from .flip import Flip
from .rotation import Rotate
from .rescale import Rescale
from .misalign import MisAlignment
from .missing_section import MissingSection
from .missing_parts import MissingParts
__all__ = ['Compose',
'DataAugment',
'Elastic',
'Grayscale',
'Rotate',
'Rescale',
'MisAlignment',
'MissingSection',
'MissingParts',
'Flip',
'TestAugmentor']
def build_train_augmentor(cfg):
aug_list = []
#1. rotate
if cfg.AUGMENTOR.ROTATE:
aug_list.append(Rotate(p=cfg.AUGMENTOR.ROTATE_P))
#2. rescale
if cfg.AUGMENTOR.RESCALE:
aug_list.append(Rescale(p=cfg.AUGMENTOR.RESCALE_P))
#3. flip
if cfg.AUGMENTOR.FLIP:
aug_list.append(Flip(p=cfg.AUGMENTOR.FLIP_P, do_ztrans=cfg.AUGMENTOR.FLIP_DO_ZTRANS))
#4. elastic
if cfg.AUGMENTOR.ELASTIC:
aug_list.append(Elastic(alpha=cfg.AUGMENTOR.ELASTIC_ALPHA, sigma = cfg.AUGMENTOR.ELASTIC_SIGMA, p=cfg.AUGMENTOR.ELASTIC_P))
#5. grayscale
if cfg.AUGMENTOR.GRAYSCALE:
aug_list.append(Grayscale(p=cfg.AUGMENTOR.GRAYSCALE_P))
#6. missingparts
if cfg.AUGMENTOR.MISSINGPARTS:
aug_list.append(MissingParts(p=cfg.AUGMENTOR.MISSINGPARTS_P))
#7. missingsection
if cfg.AUGMENTOR.MISSINGSECTION:
aug_list.append(MissingSection(p=cfg.AUGMENTOR.MISSINGSECTION_P))
#8. misalignment
if cfg.AUGMENTOR.MISALIGNMENT:
aug_list.append(MisAlignment(p=cfg.AUGMENTOR.MISALIGNMENT_P, displacement=cfg.AUGMENTOR.MISALIGNMENT_DISPLACEMENT))
augmentor = Compose(aug_list, input_size = cfg.MODEL.INPUT_SIZE)
return augmentor
| 32.413793 | 131 | 0.704255 |
ab758a9ff77cc679fdb6f1d327c1e927e36f7ddb | 423 | py | Python | spikey/snn/__init__.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | 4 | 2021-02-25T20:53:41.000Z | 2022-01-18T15:27:07.000Z | spikey/snn/__init__.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | 5 | 2021-03-06T05:35:10.000Z | 2021-03-31T09:27:57.000Z | spikey/snn/__init__.py | SpikeyCNS/spikey | 03a49073491974eff01bc017fd8eadb822e13f0d | [
"MIT"
] | null | null | null | """
SNN __init__.py
"""
try:
import spikey.snn.input
import spikey.snn.neuron
import spikey.snn.readout
import spikey.snn.reward
import spikey.snn.synapse
import spikey.snn.weight
from spikey.snn.network import Network, RLNetwork, ActiveRLNetwork
from spikey.snn.modifier import Modifier, LinearDecay, DropOff
except ImportError as e:
raise ImportError(f"snn/__init__.py failed: {e}")
| 24.882353 | 70 | 0.732861 |
7334e809c1322db094a33f4a6c913d1cd0b7a466 | 292 | py | Python | misc/tempConversion.py | cmulliss/turtles-doing-things | 70c8241bcf6d3b37104a59e92b5cf5a002fcb0bf | [
"CC0-1.0"
] | null | null | null | misc/tempConversion.py | cmulliss/turtles-doing-things | 70c8241bcf6d3b37104a59e92b5cf5a002fcb0bf | [
"CC0-1.0"
] | null | null | null | misc/tempConversion.py | cmulliss/turtles-doing-things | 70c8241bcf6d3b37104a59e92b5cf5a002fcb0bf | [
"CC0-1.0"
] | null | null | null | # program to convert degrees f to degrees c
# need to use (degF - 32) * 5/9
user = input('Hello, what is your name? ')
print('Hello', user)
degF = int(input('Enter a temperature in degrees F: '))
degC = (degF -32) * 5/9
print('{} ,degrees F converts to , {} ,degrees C'.format(degF, (degC)))
| 36.5 | 71 | 0.64726 |
efb2c233341fbbe4d8823cf277952d746dcc00dc | 1,087 | py | Python | pytuber/utils.py | Starz0r/pytuber | 5bb53edde6a39cedec48c4a8f41ba22db21d4727 | [
"MIT"
] | 8 | 2019-01-27T00:52:20.000Z | 2021-07-15T15:57:19.000Z | pytuber/utils.py | Starz0r/pytuber | 5bb53edde6a39cedec48c4a8f41ba22db21d4727 | [
"MIT"
] | 22 | 2019-01-25T14:57:08.000Z | 2021-12-13T19:55:04.000Z | pytuber/utils.py | Starz0r/pytuber | 5bb53edde6a39cedec48c4a8f41ba22db21d4727 | [
"MIT"
] | 4 | 2019-02-17T09:56:30.000Z | 2021-04-17T17:53:13.000Z | import contextlib
from datetime import datetime
from typing import Optional
import click
from yaspin import yaspin
from pytuber.storage import Registry
def magenta(text):
return click.style(str(text), fg="magenta")
@contextlib.contextmanager
def spinner(text):
sp = yaspin(text=text)
sp.start()
try:
yield sp
sp.green.ok("✔")
except Exception as e:
sp.red.fail("✘")
click.secho(str(e))
finally:
sp.stop()
def timestamp():
return int(datetime.utcnow().strftime("%s"))
def date(timestamp: Optional[int] = None):
if not timestamp:
return "-"
return datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M")
def init_registry(path: str, version: str):
Registry.from_file(path)
current_version = Registry.get("version", default="0")
if current_version == "0":
if Registry.exists("configuration", "youtube", "data"):
Registry.set(
"configuration", "youtube", "data", "quota_limit", 1000000
)
Registry.set("version", version)
| 21.74 | 74 | 0.633855 |
9ad0cec0c9a2cac5ee0655195c24575470255216 | 3,900 | py | Python | Algorithm.Python/Alphas/TripleLeverageETFPairVolatilityDecayAlpha.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 4 | 2020-03-30T06:00:05.000Z | 2020-06-29T02:51:25.000Z | Algorithm.Python/Alphas/TripleLeverageETFPairVolatilityDecayAlpha.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 30 | 2020-03-16T07:27:37.000Z | 2021-05-17T05:51:04.000Z | Algorithm.Python/Alphas/TripleLeverageETFPairVolatilityDecayAlpha.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 1 | 2020-03-10T03:12:12.000Z | 2020-03-10T03:12:12.000Z | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
from System import *
from QuantConnect import *
from QuantConnect.Data.UniverseSelection import *
from QuantConnect.Orders.Fees import ConstantFeeModel
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Portfolio import EqualWeightingPortfolioConstructionModel
from QuantConnect.Algorithm.Framework.Selection import ManualUniverseSelectionModel
from datetime import timedelta
#
# Leveraged ETFs (LETF) promise a fixed leverage ratio with respect to an underlying asset or an index.
# A Triple-Leveraged ETF allows speculators to amplify their exposure to the daily returns of an underlying index by a factor of 3.
#
# Increased volatility generally decreases the value of a LETF over an extended period of time as daily compounding is amplified.
#
# This alpha emits short-biased insight to capitalize on volatility decay for each listed pair of TL-ETFs, by rebalancing the
# ETFs with equal weights each day.
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open sourced so the community and client funds can see an example of an alpha.
#
class TripleLeverageETFPairVolatilityDecayAlpha(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2018, 1, 1)
self.SetCash(100000)
# Set zero transaction fees
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0)))
# 3X ETF pair tickers
ultraLong = Symbol.Create("UGLD", SecurityType.Equity, Market.USA)
ultraShort = Symbol.Create("DGLD", SecurityType.Equity, Market.USA)
# Manually curated universe
self.UniverseSettings.Resolution = Resolution.Daily
self.SetUniverseSelection(ManualUniverseSelectionModel([ultraLong, ultraShort]))
# Select the demonstration alpha model
self.SetAlpha(RebalancingTripleLeveragedETFAlphaModel(ultraLong, ultraShort))
## Set Equal Weighting Portfolio Construction Model
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
## Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
## Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
class RebalancingTripleLeveragedETFAlphaModel(AlphaModel):
'''
Rebalance a pair of 3x leveraged ETFs and predict that the value of both ETFs in each pair will decrease.
'''
def __init__(self, ultraLong, ultraShort):
# Giving an insight period 1 days.
self.period = timedelta(1)
self.magnitude = 0.001
self.ultraLong = ultraLong
self.ultraShort = ultraShort
self.Name = "RebalancingTripleLeveragedETFAlphaModel"
def Update(self, algorithm, data):
return Insight.Group(
[
Insight.Price(self.ultraLong, self.period, InsightDirection.Down, self.magnitude),
Insight.Price(self.ultraShort, self.period, InsightDirection.Down, self.magnitude)
] ) | 41.489362 | 163 | 0.753846 |
933c1474daa9456141cf505919944abbc827af85 | 3,301 | py | Python | backend/main.py | willshiao/dub-hacks-2020 | 081aa32b50d61ad171388951354678505e20fa1e | [
"MIT"
] | 1 | 2020-10-18T19:05:33.000Z | 2020-10-18T19:05:33.000Z | backend/main.py | willshiao/dub-hacks-2020 | 081aa32b50d61ad171388951354678505e20fa1e | [
"MIT"
] | null | null | null | backend/main.py | willshiao/dub-hacks-2020 | 081aa32b50d61ad171388951354678505e20fa1e | [
"MIT"
] | 1 | 2021-06-09T03:54:37.000Z | 2021-06-09T03:54:37.000Z | from os import path
from flask import Flask, request
from flask_cors import CORS
import youtube_dl
import ujson
from pose_extractor import PoseExtractor
import mysql.connector
from preprocessing import normalize
import re
VID_DIR='./videos'
YT_REGEX = re.compile(r'^.*(?:(?:youtu\.be\/|v\/|vi\/|u\/\w\/|embed\/)|(?:(?:watch)?\?v(?:i)?=|\&v(?:i)?=))([^#\&\?]*).*')
pe = PoseExtractor()
# DB behind ACL, kinda okay to expose pass
mydb = mysql.connector.connect(
host="10.67.16.3",
user="root",
password="oBY8AF6p8L3ZqLXNAeXv",
database='fitto'
)
insert_sql = 'INSERT INTO poses (timestamp, video_str, pose_info) VALUES (%s, %s, %s)'
app = Flask(__name__)
CORS(app)
@app.route('/')
def index():
return 'bleh'
# render_template('index.html')
@app.route('/session')
def hello():
return 'return session page'
@app.route('/download')
def download():
yt_url = input('URL here pls:')
ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s.%(ext)s', 'format': 'bestvideo'})
with ydl:
result = ydl.extract_info(
yt_url,
download=True
)
if 'entries' in result:
# Can be a playlist or a list of videos
video = result['entries'][0]
else:
# Just a video
video = result
print(video)
video_url = video['url']
print(video_url)
@app.route('/video', methods=['POST'])
def video_route():
print(request.json['url'])
if request.json is None:
return 'ERROR: no JSON in request body'
yt_url = request.json['url']
if not yt_url.startswith('https://youtube.com/') and not yt_url.startswith('https://www.youtube.com/'):
return { 'success': False, 'message': 'Invalid URL' }
match = YT_REGEX.search(yt_url)
yt_id = match.group(1)
cursor = mydb.cursor()
cursor.execute('SELECT * FROM poses WHERE video_str=%s', (yt_id,))
sql_result = cursor.fetchall()
for x in sql_result:
return { 'success': True, 'message': 'Video already exists', 'videoUrl': yt_url}
ydl = youtube_dl.YoutubeDL({'outtmpl': path.join(VID_DIR, '%(id)s.%(ext)s'), 'format': 'mp4'})
with ydl:
result = ydl.extract_info(
yt_url,
download=True
)
# Can be a playlist or a list of videos
if 'entries' in result:
video = result['entries'][0]
else:
video = result
vid_id = result['id']
filename = path.join(VID_DIR, f'{vid_id}.mp4')
def process_frame(pose_scores, keypoint_scores, keypoint_coords, frame_num, fps, call_cnt):
seconds = frame_num / fps
normalized = normalize(pose_scores, keypoint_scores, keypoint_coords)
if not normalized:
return True
print(f'Inserting #{call_cnt}')
cursor.execute(insert_sql, (seconds, vid_id, ujson.dumps(normalized)))
if frame_num % 1000 == 0:
print('Committing...')
mydb.commit()
pe.get_poses_from_video(filename, process_frame, skip_every_frames=1, skip_first=0)
mydb.commit()
print('Done getting poses for {vid_id}')
return { 'success': True, 'videoUrl': yt_url }
if __name__ == '__main__':
app.run(debug=True, port=3000, host='0.0.0.0')
| 30.850467 | 123 | 0.605877 |
69161d8ffb3e9acb284c0610f6f55f215abbec0a | 1,051 | py | Python | Error.tweets-try-except.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 4 | 2019-05-04T00:33:25.000Z | 2021-05-29T20:37:59.000Z | Error.tweets-try-except.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | null | null | null | Error.tweets-try-except.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 3 | 2020-05-05T13:14:28.000Z | 2022-02-03T16:18:37.000Z | #import pandas
import pandas as pd
#import twitter data to dataframe
tweets_df = pd.read_csv("tweets.csv")
# Define count_entries()
def count_entries(df, col_name='lang'):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Raise a ValueError if col_name is NOT in DataFrame
if col_name not in df.columns:
raise ValueError("The DataFrame does not have a " + col_name + " column.")
# Initialize an empty dictionary: cols_count
cols_count = {}
# Extract column from DataFrame: col
col = df[col_name]
# Iterate over the column in DataFrame
for entry in col:
# If entry is in cols_count, add 1
if entry in cols_count.keys():
cols_count[entry] += 1
# Else add the entry to cols_count, set the value to 1
else:
cols_count[entry] = 1
# Return the cols_count dictionary
return cols_count
# Call count_entries(): result1
result1 = count_entries(tweets_df, col_name="lang")
# Print result1
print(result1) | 26.275 | 82 | 0.666984 |
1c507404f6ee68c5084d8477d1dcb7b9a764ed52 | 1,548 | py | Python | integrationtest/vm/windows/suite_setup.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/windows/suite_setup.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/windows/suite_setup.py | bgerxx/woodpecker | fdc51245945cc9be4d1f028988079213eb99b2ad | [
"Apache-2.0"
] | null | null | null | '''
@author: SyZhao
'''
import os.path
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.config_operations as config_operations
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
def test():
#If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run()
test_lib.setup_plan.deploy_test_agent()
test_lib.setup_plan.execute_plan_without_deploy_test_agent()
deploy_operations.deploy_initial_database(test_lib.deploy_config)
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
if test_lib.lib_get_ha_selffencer_maxattempts() != None:
test_lib.lib_set_ha_selffencer_maxattempts('60')
test_lib.lib_set_ha_selffencer_storagechecker_timeout('60')
test_lib.lib_set_primary_storage_imagecache_gc_interval(1)
test_lib.lib_set_provision_storage_rate(6)
test_lib.lib_set_provision_memory_rate(3)
test_lib.lib_set_vm_numa('true')
test_util.test_pass('Suite Setup Success')
| 46.909091 | 221 | 0.802326 |
997dd3eaf452bc400eb1a45fb80c935e99bb1acb | 7,244 | py | Python | src/datadog_api_client/v1/model/monitor_overall_states.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v1/model/monitor_overall_states.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | src/datadog_api_client/v1/model/monitor_overall_states.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class MonitorOverallStates(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'ALERT': "Alert",
'IGNORED': "Ignored",
'NO_DATA': "No Data",
'OK': "OK",
'SKIPPED': "Skipped",
'UNKNOWN': "Unknown",
'WARN': "Warn",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""MonitorOverallStates - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): The different states your monitor can be in.., must be one of ["Alert", "Ignored", "No Data", "OK", "Skipped", "Unknown", "Warn", ] # noqa: E501
Keyword Args:
value (str): The different states your monitor can be in.., must be one of ["Alert", "Ignored", "No Data", "OK", "Skipped", "Unknown", "Warn", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| 38.737968 | 172 | 0.569023 |
14528a7881d641b41a1e2d920c226594b0e5e6be | 794 | py | Python | tests/test_excel.py | ourresearch/jump-api | 5252da53656389f9bd53811929d5156ff8f7620c | [
"MIT"
] | 15 | 2019-11-07T09:57:56.000Z | 2022-01-03T22:45:28.000Z | tests/test_excel.py | ourresearch/jump-api | 5252da53656389f9bd53811929d5156ff8f7620c | [
"MIT"
] | 6 | 2021-03-19T23:15:14.000Z | 2021-09-23T16:46:19.000Z | tests/test_excel.py | ourresearch/jump-api | 5252da53656389f9bd53811929d5156ff8f7620c | [
"MIT"
] | 1 | 2021-12-17T05:50:47.000Z | 2021-12-17T05:50:47.000Z | import pytest
import os
import filecmp
from excel import convert_xls_to_xlsx, convert_spreadsheet_to_csv
def test_convert_xls_to_xlsx():
x = convert_xls_to_xlsx('tests/test_files/counter/counter4_jr1_2018_01.xls')
assert isinstance(x, str)
assert '.xlsx' in x
assert os.path.isfile(x)
def test_convert_spreadsheet_to_csv():
# FIXME: parsed=True is never used in the codebase, so just testing False for now
x = convert_spreadsheet_to_csv('tests/test_files/counter/counter4_jr1_2018_01.xlsx', parsed=False)
assert isinstance(x, list)
assert isinstance(x[0], str)
assert os.path.isfile(x[0])
# FIXME: The below should be True, but dates are messed up in the output csv
# assert filecmp.cmp(x[0], 'tests/test_files/counter/counter4_jr1_2018_01.csv')
| 37.809524 | 102 | 0.756927 |
884deee4ef5c4e088a59b8432155f75855f155e9 | 7,775 | py | Python | ingestion/tests/integration/ometa/test_ometa_dashboard_api.py | Shreshtha13/OpenMetadata | cd65037ac76a2d44ece2e3cf456789864ee363da | [
"Apache-2.0"
] | 864 | 2021-08-13T23:48:45.000Z | 2022-03-31T18:36:30.000Z | ingestion/tests/integration/ometa/test_ometa_dashboard_api.py | Shreshtha13/OpenMetadata | cd65037ac76a2d44ece2e3cf456789864ee363da | [
"Apache-2.0"
] | 2,701 | 2021-08-14T06:05:12.000Z | 2022-03-31T23:48:32.000Z | ingestion/tests/integration/ometa/test_ometa_dashboard_api.py | Shreshtha13/OpenMetadata | cd65037ac76a2d44ece2e3cf456789864ee363da | [
"Apache-2.0"
] | 144 | 2021-08-16T20:44:08.000Z | 2022-03-29T14:12:30.000Z | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenMetadata high-level API Dashboard test
"""
import uuid
from unittest import TestCase
from metadata.generated.schema.api.data.createDashboard import (
CreateDashboardEntityRequest,
)
from metadata.generated.schema.api.services.createDashboardService import (
CreateDashboardServiceEntityRequest,
)
from metadata.generated.schema.api.teams.createUser import CreateUserEntityRequest
from metadata.generated.schema.entity.data.dashboard import Dashboard
from metadata.generated.schema.entity.services.dashboardService import (
DashboardService,
DashboardServiceType,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
class OMetaDashboardTest(TestCase):
"""
Run this integration test with the local API available
Install the ingestion package before running the tests
"""
service_entity_id = None
server_config = MetadataServerConfig(api_endpoint="http://localhost:8585/api")
metadata = OpenMetadata(server_config)
assert metadata.health_check()
user = metadata.create_or_update(
data=CreateUserEntityRequest(name="random-user", email="random@user.com"),
)
owner = EntityReference(id=user.id, type="user")
service = CreateDashboardServiceEntityRequest(
name="test-service-dashboard",
serviceType=DashboardServiceType.Superset,
dashboardUrl="https://localhost:1000",
)
service_type = "dashboardService"
@classmethod
def setUpClass(cls) -> None:
"""
Prepare ingredients
"""
cls.service_entity = cls.metadata.create_or_update(data=cls.service)
cls.entity = Dashboard(
id=uuid.uuid4(),
name="test",
service=EntityReference(id=cls.service_entity.id, type=cls.service_type),
fullyQualifiedName="test-service-dashboard.test",
)
cls.create = CreateDashboardEntityRequest(
name="test",
service=EntityReference(id=cls.service_entity.id, type=cls.service_type),
)
@classmethod
def tearDownClass(cls) -> None:
"""
Clean up
"""
_id = str(
cls.metadata.get_by_name(
entity=Dashboard, fqdn="test-service-dashboard.test"
).id.__root__
)
service_id = str(
cls.metadata.get_by_name(
entity=DashboardService, fqdn="test-service-dashboard"
).id.__root__
)
cls.metadata.delete(entity=Dashboard, entity_id=_id)
cls.metadata.delete(entity=DashboardService, entity_id=service_id)
def test_create(self):
"""
We can create a Dashboard and we receive it back as Entity
"""
res = self.metadata.create_or_update(data=self.create)
self.assertEqual(res.name, self.entity.name)
self.assertEqual(res.service.id, self.entity.service.id)
self.assertEqual(res.owner, None)
def test_update(self):
"""
Updating it properly changes its properties
"""
res_create = self.metadata.create_or_update(data=self.create)
updated = self.create.dict(exclude_unset=True)
updated["owner"] = self.owner
updated_entity = CreateDashboardEntityRequest(**updated)
res = self.metadata.create_or_update(data=updated_entity)
# Same ID, updated algorithm
self.assertEqual(res.service.id, updated_entity.service.id)
self.assertEqual(res_create.id, res.id)
self.assertEqual(res.owner.id, self.user.id)
def test_get_name(self):
"""
We can fetch a Dashboard by name and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.get_by_name(
entity=Dashboard, fqdn=self.entity.fullyQualifiedName
)
self.assertEqual(res.name, self.entity.name)
def test_get_id(self):
"""
We can fetch a Dashboard by ID and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
# First pick up by name
res_name = self.metadata.get_by_name(
entity=Dashboard, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res = self.metadata.get_by_id(entity=Dashboard, entity_id=res_name.id)
self.assertEqual(res_name.id, res.id)
def test_list(self):
"""
We can list all our Dashboards
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.list_entities(entity=Dashboard, limit=100)
# Fetch our test Database. We have already inserted it, so we should find it
data = next(
iter(ent for ent in res.entities if ent.name == self.entity.name), None
)
assert data
def test_delete(self):
"""
We can delete a Dashboard by ID
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=Dashboard, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res_id = self.metadata.get_by_id(
entity=Dashboard, entity_id=str(res_name.id.__root__)
)
# Delete
self.metadata.delete(entity=Dashboard, entity_id=str(res_id.id.__root__))
# Then we should not find it
res = self.metadata.list_entities(entity=Dashboard)
assert not next(
iter(
ent
for ent in res.entities
if ent.fullyQualifiedName == self.entity.fullyQualifiedName
),
None,
)
def test_list_versions(self):
"""
test list dashboard entity versions
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=Dashboard, fqdn=self.entity.fullyQualifiedName
)
res = self.metadata.get_list_entity_versions(
entity=Dashboard, entity_id=res_name.id.__root__
)
assert res
def test_get_entity_version(self):
"""
test get dashboard entity version
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=Dashboard, fqdn=self.entity.fullyQualifiedName
)
res = self.metadata.get_entity_version(
entity=Dashboard, entity_id=res_name.id.__root__, version=0.1
)
# check we get the correct version requested and the correct entity ID
assert res.version.__root__ == 0.1
assert res.id == res_name.id
def test_get_entity_ref(self):
"""
test get EntityReference
"""
res = self.metadata.create_or_update(data=self.create)
entity_ref = self.metadata.get_entity_reference(
entity=Dashboard, fqdn=res.fullyQualifiedName
)
assert res.id == entity_ref.id
| 31.734694 | 85 | 0.653248 |
64b049c970a8018d6e899811ed6595dba2f1db5a | 792 | py | Python | folks/models/post.py | marinintim/folks | 2dce457c9d57da34626717667b942fa91f62385f | [
"MIT"
] | 4 | 2019-12-02T20:04:55.000Z | 2020-04-30T22:14:30.000Z | folks/models/post.py | marinintim/folks | 2dce457c9d57da34626717667b942fa91f62385f | [
"MIT"
] | null | null | null | folks/models/post.py | marinintim/folks | 2dce457c9d57da34626717667b942fa91f62385f | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, DateTime, String, ForeignKey
from sqlalchemy.orm import relationship
from datetime import datetime
from database import Base
class Post(Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key=True)
body = Column(String())
posted_at = Column(DateTime, index=True, default=datetime.utcnow)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship('User')
writer_feed_id = Column(Integer, ForeignKey('writer_feeds.id'))
writer_feed = relationship('WriterFeed')
hat_id = Column(Integer, ForeignKey('hats.id'))
hat = relationship('Hat')
attachments = relationship('Attachment', backref='post')
def __repr__(self):
return '<Post {} by {}>'.format(self.id, self.author.username)
| 36 | 70 | 0.713384 |
c3d7f78164c0aa9b802ee22368c2a254e8c91d6c | 8,298 | py | Python | dnnlib/tflib/autosummary.py | Di-Is/stylegan2-ada | c1228c08a27fda80e512cfecf3b10c3c93c8b6d3 | [
"BSD-Source-Code"
] | null | null | null | dnnlib/tflib/autosummary.py | Di-Is/stylegan2-ada | c1228c08a27fda80e512cfecf3b10c3c93c8b6d3 | [
"BSD-Source-Code"
] | null | null | null | dnnlib/tflib/autosummary.py | Di-Is/stylegan2-ada | c1228c08a27fda80e512cfecf3b10c3c93c8b6d3 | [
"BSD-Source-Code"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper for adding automatically tracked values to Tensorboard.
Autosummary creates an identity op that internally keeps track of the input
values and automatically shows up in TensorBoard. The reported value
represents an average over input components. The average is accumulated
constantly over time and flushed when save_summaries() is called.
Notes:
- The output tensor must be used as an input for something else in the
graph. Otherwise, the autosummary op will not get executed, and the average
value will not get accumulated.
- It is perfectly fine to include autosummaries with the same name in
several places throughout the graph, even if they are executed concurrently.
- It is ok to also pass in a python scalar or numpy array. In this case, it
is added to the average immediately.
"""
from collections import OrderedDict
import numpy as np
import tensorflow.compat.v1 as tensorflow
tf = tensorflow
tf.disable_v2_behavior()
from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2
from . import tfutil
from .tfutil import TfExpression
from .tfutil import TfExpressionEx
# Enable "Custom scalars" tab in TensorBoard for advanced formatting.
# Disabled by default to reduce tfevents file size.
enable_custom_scalars = False
_dtype = tf.float64
_vars = OrderedDict() # name => [var, ...]
_immediate = OrderedDict() # name => update_op, update_value
_finalized = False
_merge_op = None
def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
"""Internal helper for creating autosummary accumulators."""
assert not _finalized
name_id = name.replace("/", "_")
v = tf.cast(value_expr, _dtype)
if v.shape.is_fully_defined():
size = np.prod(v.shape.as_list())
size_expr = tf.constant(size, dtype=_dtype)
else:
size = None
size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
if size == 1:
if v.shape.ndims != 0:
v = tf.reshape(v, [])
v = [size_expr, v, tf.square(v)]
else:
v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None):
var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _vars:
_vars[name].append(var)
else:
_vars[name] = [var]
return update_op
def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None, condition: TfExpressionEx = True) -> TfExpressionEx:
"""Create a new autosummary.
Args:
name: Name to use in TensorBoard
value: TensorFlow expression or python value to track
passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node.
Example use of the passthru mechanism:
n = autosummary('l2loss', loss, passthru=n)
This is a shorthand for the following code:
with tf.control_dependencies([autosummary('l2loss', loss)]):
n = tf.identity(n)
"""
tfutil.assert_tf_initialized()
name_id = name.replace("/", "_")
if tfutil.is_tf_expression(value):
with tf.name_scope("summary_" + name_id), tf.device(value.device):
condition = tf.convert_to_tensor(condition, name='condition')
update_op = tf.cond(condition, lambda: tf.group(_create_var(name, value)), tf.no_op)
with tf.control_dependencies([update_op]):
return tf.identity(value if passthru is None else passthru)
else: # python scalar or numpy array
assert not tfutil.is_tf_expression(passthru)
assert not tfutil.is_tf_expression(condition)
if condition:
if name not in _immediate:
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(_dtype)
update_op = _create_var(name, update_value)
_immediate[name] = update_op, update_value
update_op, update_value = _immediate[name]
tfutil.run(update_op, {update_value: value})
return value if passthru is None else passthru
def finalize_autosummaries() -> None:
"""Create the necessary ops to include autosummaries in TensorBoard report.
Note: This should be done only once per graph.
"""
global _finalized
tfutil.assert_tf_initialized()
if _finalized:
return None
_finalized = True
tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
# Create summary ops.
with tf.device(None), tf.control_dependencies(None):
for name, vars_list in _vars.items():
name_id = name.replace("/", "_")
with tfutil.absolute_name_scope("Autosummary/" + name_id):
moments = tf.add_n(vars_list)
moments /= moments[0]
with tf.control_dependencies([moments]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
mean = moments[1]
std = tf.sqrt(moments[2] - tf.square(moments[1]))
tf.summary.scalar(name, mean)
if enable_custom_scalars:
tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)
# Setup layout for custom scalars.
layout = None
if enable_custom_scalars:
cat_dict = OrderedDict()
for series_name in sorted(_vars.keys()):
p = series_name.split("/")
cat = p[0] if len(p) >= 2 else ""
chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
if cat not in cat_dict:
cat_dict[cat] = OrderedDict()
if chart not in cat_dict[cat]:
cat_dict[cat][chart] = []
cat_dict[cat][chart].append(series_name)
categories = []
for cat_name, chart_dict in cat_dict.items():
charts = []
for chart_name, series_names in chart_dict.items():
series = []
for series_name in series_names:
series.append(layout_pb2.MarginChartContent.Series(
value=series_name,
lower="xCustomScalars/" + series_name + "/margin_lo",
upper="xCustomScalars/" + series_name + "/margin_hi"))
margin = layout_pb2.MarginChartContent(series=series)
charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
categories.append(layout_pb2.Category(title=cat_name, chart=charts))
layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
return layout
def save_summaries(file_writer, global_step=None):
"""Call FileWriter.add_summary() with all summaries in the default graph,
automatically finalizing and merging them on the first call.
"""
global _merge_op
tfutil.assert_tf_initialized()
if _merge_op is None:
layout = finalize_autosummaries()
if layout is not None:
file_writer.add_summary(layout)
with tf.device(None), tf.control_dependencies(None):
_merge_op = tf.summary.merge_all()
file_writer.add_summary(_merge_op.eval(), global_step)
| 42.336735 | 135 | 0.660521 |
c760e3fd90e023379edae411160d261785661078 | 5,192 | py | Python | n5k/calculator_base.py | c-d-leonard/N5K | 99d844621f2436aaf56fc98484e309043d4b7bd1 | [
"BSD-3-Clause"
] | null | null | null | n5k/calculator_base.py | c-d-leonard/N5K | 99d844621f2436aaf56fc98484e309043d4b7bd1 | [
"BSD-3-Clause"
] | null | null | null | n5k/calculator_base.py | c-d-leonard/N5K | 99d844621f2436aaf56fc98484e309043d4b7bd1 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
class N5KCalculatorBase(object):
name = 'Base'
needed_fields = ['output_prefix']
def __init__(self, fname_config):
if isinstance(fname_config, dict):
self.config = fname_config
else:
import yaml
with open(fname_config) as f:
self.config = yaml.safe_load(f)
self.nb_g = 10
self.nb_s = 5
if 'select_cl' in self.config:
self.nb_g = len(self.config['select_cl'])
if 'select_sh' in self.config:
self.nb_s = len(self.config['select_sh'])
self._check_config_sanity()
def _check_config_sanity(self):
for name in self.needed_fields:
if not self.config.get(name):
raise ValueError("You must provide %s"%(name))
def get_pk(self):
return np.load('input/pk.npz')
def get_background(self):
return np.load('input/background.npz')
def get_cosmological_parameters(self):
return {'Omega_m': 0.3156,
'Omega_b': 0.0492,
'w0': -1.0,
'h': 0.6727,
'A_s': 2.12107E-9,
'n_s': 0.9645}
def get_tracer_parameters(self):
# Per-bin galaxy bias
b_g = np.array([1.376695, 1.451179, 1.528404,
1.607983, 1.689579, 1.772899,
1.857700, 1.943754, 2.030887,
2.118943])
return {'b_g': b_g}
def get_tracer_dndzs(self):
filename = self.config.get('dndz_file', 'input/dNdzs_fullwidth.npz')
dNdz_file = np.load(filename)
z_sh = dNdz_file['z_sh']
dNdz_sh = dNdz_file['dNdz_sh']
z_cl = dNdz_file['z_cl']
dNdz_cl = dNdz_file['dNdz_cl']
if 'select_cl' in self.config:
dNdz_cl = np.array([dNdz_cl[:, i] for i in self.config['select_cl']]).T
if 'select_sh' in self.config:
dNdz_sh = np.array([dNdz_sh[:, i] for i in self.config['select_sh']]).T
return {'z_sh': z_sh, 'dNdz_sh': dNdz_sh,
'z_cl': z_cl, 'dNdz_cl': dNdz_cl}
def get_noise_biases(self):
from scipy.integrate import simps
# Lens sample: 40 gals/arcmin^2
ndens_c = 40.
# Source sample: 27 gals/arcmin^2
ndens_s = 27.
# Ellipticity scatter per component
e_rms = 0.28
ndic = self.get_tracer_dndzs()
nc_ints = np.array([simps(n, x=ndic['z_cl'])
for n in ndic['dNdz_cl'].T])
ns_ints = np.array([simps(n, x=ndic['z_sh'])
for n in ndic['dNdz_sh'].T])
nc_ints *= ndens_c / np.sum(nc_ints)
ns_ints *= ndens_s / np.sum(ns_ints)
tosrad = (180*60/np.pi)**2
nl_cl = 1./(nc_ints*tosrad)
nl_sh = e_rms**2/(ns_ints*tosrad)
return nl_cl, nl_sh
def get_tracer_kernels(self):
filename = self.config.get('kernel_file', 'input/kernels_fullwidth.npz')
d = np.load(filename)
kernels_cl = d['kernels_cl']
kernels_sh = d['kernels_sh']
if 'select_cl' in self.config:
kernels_cl = np.array([kernels_cl[i, :] for i in self.config['select_cl']])
if 'select_sh' in self.config:
kernels_sh = np.array([kernels_sh[i, :] for i in self.config['select_sh']])
return {'z_cl': d['z_cl'],
'chi_cl': d['chi_cl'],
'kernels_cl': kernels_cl,
'z_sh': d['z_sh'],
'chi_sh': d['chi_sh'],
'kernels_sh': kernels_sh}
def get_ells(self):
return np.unique(np.geomspace(2, 2000, 128).astype(int)).astype(float)
def get_nmodes_fullsky(self):
""" Returns the number of modes in each ell bin"""
ls = self.get_ells()
nmodes = list(ls[1:]**2-ls[:-1]**2)
lp = ls[-1]**2/ls[-2]
nmodes.append(lp**2-ls[-1]**2)
return np.array(nmodes)*0.5
def get_num_cls(self):
ngg = (self.nb_g * (self.nb_g + 1)) // 2
nss = (self.nb_s * (self.nb_s + 1)) // 2
ngs = self.nb_g * self.nb_s
return ngg, ngs, nss
def write_output(self):
ls = self.get_ells()
nl = len(ls)
ngg, ngs, nss = self.get_num_cls()
if self.cls_gg.shape != (ngg, nl):
raise ValueError("Incorrect G-G spectra shape")
if self.cls_gs.shape != (ngs, nl):
raise ValueError("Incorrect G-S spectra shape")
if self.cls_ss.shape != (nss, nl):
raise ValueError("Incorrect S-S spectra shape")
np.savez(self.config['output_prefix'] + '_clgg.npz',
ls=ls, cls=self.cls_gg)
np.savez(self.config['output_prefix'] + '_clgs.npz',
ls=ls, cls=self.cls_gs)
np.savez(self.config['output_prefix'] + '_clss.npz',
ls=ls, cls=self.cls_ss)
def teardown(self):
pass
def setup(self):
pass
def run(self):
nl = len(self.get_ells())
ngg, ngs, nss = self.get_num_cls()
self.cls_gg = np.zeros((ngg, nl))
self.cls_gs = np.zeros((ngs, nl))
self.cls_ss = np.zeros((nss, nl))
| 34.157895 | 87 | 0.540254 |
7756514fa4197b5a6c91505483790ac1c8a85d01 | 4,514 | py | Python | src/sagemaker/deprecations.py | eitansela/sagemaker-python-sdk | aa54102b5113b1d39bbbd4d9d341775f84641681 | [
"Apache-2.0"
] | 1 | 2021-07-22T00:23:51.000Z | 2021-07-22T00:23:51.000Z | src/sagemaker/deprecations.py | eitansela/sagemaker-python-sdk | aa54102b5113b1d39bbbd4d9d341775f84641681 | [
"Apache-2.0"
] | 24 | 2021-05-18T07:10:27.000Z | 2021-05-28T13:36:51.000Z | src/sagemaker/deprecations.py | eitansela/sagemaker-python-sdk | aa54102b5113b1d39bbbd4d9d341775f84641681 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Module for deprecation abstractions."""
from __future__ import absolute_import
import logging
import warnings
logger = logging.getLogger(__name__)
V2_URL = "https://sagemaker.readthedocs.io/en/stable/v2.html"
def _warn(msg):
"""Generic warning raiser referencing V2
Args:
phrase: The phrase to include in the warning.
"""
full_msg = f"{msg} in sagemaker>=2.\nSee: {V2_URL} for details."
warnings.warn(full_msg, DeprecationWarning, stacklevel=2)
logger.warning(full_msg)
def removed_warning(phrase):
"""Raise a warning for a no-op in sagemaker>=2
Args:
phrase: the prefix phrase of the warning message.
"""
_warn(f"{phrase} is a no-op")
def renamed_warning(phrase):
"""Raise a warning for a rename in sagemaker>=2
Args:
phrase: the prefix phrase of the warning message.
"""
_warn(f"{phrase} has been renamed")
def renamed_kwargs(old_name, new_name, value, kwargs):
"""Checks if the deprecated argument is in kwargs
Raises warning, if present.
Args:
old_name: name of deprecated argument
new_name: name of the new argument
value: value associated with new name, if supplied
kwargs: keyword arguments dict
Returns:
value of the keyword argument, if present
"""
if old_name in kwargs:
value = kwargs.get(old_name, value)
kwargs[new_name] = value
renamed_warning(old_name)
return value
def removed_arg(name, arg):
"""Checks if the deprecated argument is populated.
Raises warning, if not None.
Args:
name: name of deprecated argument
arg: the argument to check
"""
if arg is not None:
removed_warning(name)
def removed_kwargs(name, kwargs):
"""Checks if the deprecated argument is in kwargs
Raises warning, if present.
Args:
name: name of deprecated argument
kwargs: keyword arguments dict
"""
if name in kwargs:
removed_warning(name)
def removed_function(name):
"""A no-op deprecated function factory."""
def func(*args, **kwargs): # pylint: disable=W0613
removed_warning(f"The function {name}")
return func
def deprecated_function(func, name):
"""Wrap a function with a deprecation warning.
Args:
func: Function to wrap in a deprecation warning.
name: The name that has been deprecated.
Returns:
The modified function
"""
def deprecate(*args, **kwargs):
renamed_warning(f"The {name}")
return func(*args, **kwargs)
return deprecate
def deprecated_serialize(instance, name):
"""Modifies a serializer instance serialize method.
Args:
instance: Instance to modify serialize method.
name: The name that has been deprecated.
Returns:
The modified instance
"""
instance.serialize = deprecated_function(instance.serialize, name)
return instance
def deprecated_deserialize(instance, name):
"""Modifies a deserializer instance deserialize method.
Args:
instance: Instance to modify deserialize method.
name: The name that has been deprecated.
Returns:
The modified instance
"""
instance.deserialize = deprecated_function(instance.deserialize, name)
return instance
def deprecated_class(cls, name):
"""Returns a class based on super class with a deprecation warning.
Args:
cls: The class to derive with a deprecation warning on __init__
name: The name of the class.
Returns:
The modified class.
"""
class DeprecatedClass(cls):
"""Provides a warning for the class name."""
def __init__(self, *args, **kwargs):
"""Provides a warning for the class name."""
renamed_warning(f"The class {name}")
super(DeprecatedClass, self).__init__(*args, **kwargs)
return DeprecatedClass
| 25.794286 | 74 | 0.671688 |
6e9b2333ed8b6a85508655d296209459ea63e8ed | 1,001 | py | Python | travel/docs/Amadeus-master/pactravel-master/python-client/test/test_train_search_itinerary.py | shopglobal/api | 176e1858d3f93e8e7854ba194698b6b9825841da | [
"CC-BY-4.0"
] | null | null | null | travel/docs/Amadeus-master/pactravel-master/python-client/test/test_train_search_itinerary.py | shopglobal/api | 176e1858d3f93e8e7854ba194698b6b9825841da | [
"CC-BY-4.0"
] | 1 | 2021-06-01T22:04:28.000Z | 2021-06-01T22:04:28.000Z | travel/docs/Amadeus-master/pactravel-master/python-client/test/test_train_search_itinerary.py | shopglobal/api | 176e1858d3f93e8e7854ba194698b6b9825841da | [
"CC-BY-4.0"
] | null | null | null | # coding: utf-8
"""
Amadeus Travel Innovation Sandbox
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.train_search_itinerary import TrainSearchItinerary
class TestTrainSearchItinerary(unittest.TestCase):
""" TrainSearchItinerary unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testTrainSearchItinerary(self):
"""
Test TrainSearchItinerary
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.train_search_itinerary.TrainSearchItinerary()
pass
if __name__ == '__main__':
unittest.main()
| 22.244444 | 105 | 0.722278 |
ed2d295b2003c7fbb69716c1e27cfde6f36318ad | 690 | py | Python | src/nexpy/api/frills/functions/polynomial.py | tschoonj/nexpy | c95b0602f09eac55bd1d7eda8ddd729f42a2d5c5 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/nexpy/api/frills/functions/polynomial.py | tschoonj/nexpy | c95b0602f09eac55bd1d7eda8ddd729f42a2d5c5 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/nexpy/api/frills/functions/polynomial.py | tschoonj/nexpy | c95b0602f09eac55bd1d7eda8ddd729f42a2d5c5 | [
"BSD-3-Clause-Clear"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2013, NeXpy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING, distributed with this software.
#-----------------------------------------------------------------------------
function_name = 'Polynomial'
parameters = ['P0', 'P1', 'P2', 'P3', 'P4']
def values(x, p):
p0, p1, p2, p3, p4 = p
return p0 + x*(p1 + x*(p2 + x*(p3 + x*p4)))
def guess(x, y):
slope = (y[-1]-y[0]) / (x[-1]-x[0])
constant = y[0] - slope*x[0]
return constant, slope, 0.0, 0.0, 0.0
| 31.363636 | 78 | 0.456522 |
b7b9faf7efa667269a2cf1c4aaaac0723056131f | 2,954 | py | Python | geopandas/tools/tests/test_tools.py | raphacosta27/geopandas | 2c22a26bd40ec48536026b160c54c6fe523d22d7 | [
"BSD-3-Clause"
] | 2,914 | 2015-01-01T14:27:43.000Z | 2022-03-31T22:26:39.000Z | geopandas/tools/tests/test_tools.py | raphacosta27/geopandas | 2c22a26bd40ec48536026b160c54c6fe523d22d7 | [
"BSD-3-Clause"
] | 2,040 | 2015-01-16T11:34:26.000Z | 2022-03-31T12:13:39.000Z | geopandas/tools/tests/test_tools.py | raphacosta27/geopandas | 2c22a26bd40ec48536026b160c54c6fe523d22d7 | [
"BSD-3-Clause"
] | 758 | 2015-01-21T20:23:32.000Z | 2022-03-31T17:22:53.000Z | from distutils.version import LooseVersion
from shapely.geometry import LineString, MultiPoint, Point
import pyproj
from pyproj import CRS
from geopandas import GeoSeries
from geopandas.tools import collect
from geopandas.tools.crs import epsg_from_crs, explicit_crs_from_epsg
import pytest
# pyproj 2.3.1 fixed a segfault for the case working in an environment with
# 'init' dicts (https://github.com/pyproj4/pyproj/issues/415)
PYPROJ_LT_231 = LooseVersion(pyproj.__version__) < LooseVersion("2.3.1")
class TestTools:
def setup_method(self):
self.p1 = Point(0, 0)
self.p2 = Point(1, 1)
self.p3 = Point(2, 2)
self.mpc = MultiPoint([self.p1, self.p2, self.p3])
self.mp1 = MultiPoint([self.p1, self.p2])
self.line1 = LineString([(3, 3), (4, 4)])
def test_collect_single(self):
result = collect(self.p1)
assert self.p1.equals(result)
def test_collect_single_force_multi(self):
result = collect(self.p1, multi=True)
expected = MultiPoint([self.p1])
assert expected.equals(result)
def test_collect_multi(self):
result = collect(self.mp1)
assert self.mp1.equals(result)
def test_collect_multi_force_multi(self):
result = collect(self.mp1)
assert self.mp1.equals(result)
def test_collect_list(self):
result = collect([self.p1, self.p2, self.p3])
assert self.mpc.equals(result)
def test_collect_GeoSeries(self):
s = GeoSeries([self.p1, self.p2, self.p3])
result = collect(s)
assert self.mpc.equals(result)
def test_collect_mixed_types(self):
with pytest.raises(ValueError):
collect([self.p1, self.line1])
def test_collect_mixed_multi(self):
with pytest.raises(ValueError):
collect([self.mpc, self.mp1])
@pytest.mark.skipif(PYPROJ_LT_231, reason="segfault")
def test_epsg_from_crs(self):
with pytest.warns(FutureWarning):
assert epsg_from_crs({"init": "epsg:4326"}) == 4326
assert epsg_from_crs({"init": "EPSG:4326"}) == 4326
assert epsg_from_crs("+init=epsg:4326") == 4326
@pytest.mark.skipif(PYPROJ_LT_231, reason="segfault")
def test_explicit_crs_from_epsg(self):
with pytest.warns(FutureWarning):
assert explicit_crs_from_epsg(epsg=4326) == CRS.from_epsg(4326)
assert explicit_crs_from_epsg(epsg="4326") == CRS.from_epsg(4326)
assert explicit_crs_from_epsg(crs={"init": "epsg:4326"}) == CRS.from_dict(
{"init": "epsg:4326"}
)
assert explicit_crs_from_epsg(crs="+init=epsg:4326") == CRS.from_proj4(
"+init=epsg:4326"
)
@pytest.mark.filterwarnings("ignore:explicit_crs_from_epsg:FutureWarning")
def test_explicit_crs_from_epsg__missing_input(self):
with pytest.raises(ValueError):
explicit_crs_from_epsg()
| 34.348837 | 86 | 0.659783 |
7e36ad78a0505b2b2f4fc2f3b69e3cd8b1fd9d0e | 5,526 | py | Python | progressive_billing/progressive_billing/progressive_billing.py | hafeeserp/Progressive-Billing | 8e6f38b5a76bf432629a331ee3df174fe5366b85 | [
"MIT"
] | null | null | null | progressive_billing/progressive_billing/progressive_billing.py | hafeeserp/Progressive-Billing | 8e6f38b5a76bf432629a331ee3df174fe5366b85 | [
"MIT"
] | null | null | null | progressive_billing/progressive_billing/progressive_billing.py | hafeeserp/Progressive-Billing | 8e6f38b5a76bf432629a331ee3df174fe5366b85 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import frappe, erpnext
import frappe.defaults
from frappe.utils import cint, flt
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.controllers.stock_controller import update_gl_entries_after
from frappe.model.mapper import get_mapped_doc
from erpnext.accounts.doctype.sales_invoice.pos import update_multi_mode_option
from erpnext.controllers.selling_controller import SellingController
from erpnext.accounts.utils import get_account_currency
from erpnext.stock.doctype.delivery_note.delivery_note import update_billed_amount_based_on_so
from erpnext.projects.doctype.timesheet.timesheet import get_projectwise_timesheet_data
from erpnext.assets.doctype.asset.depreciation \
import get_disposal_account_and_cost_center, get_gl_entries_on_asset_disposal
from erpnext.stock.doctype.batch.batch import set_batch_nos
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos, get_delivery_note_serial_no
from erpnext.setup.doctype.company.company import update_company_current_month_sales
from erpnext.accounts.general_ledger import get_round_off_account_and_cost_center
from erpnext import get_company_currency, get_default_company
from erpnext.projects.doctype.project.project import Project
@frappe.whitelist()
def get_progressive_invoice_data(sales_order='', project='',test=''):
pgrsve_data = frappe.db.sql("""select sum(grand_total),sum(outstanding_amount) from `tabSales Invoice` where project_progressive=%s and sales_order=%s and docstatus=1""",(project,sales_order))[0]
if pgrsve_data:
return {'total_invoiced':pgrsve_data[0],'outstanding_amount':pgrsve_data[1]}
else:
return {'total_invoiced':0.00,'outstanding_amount':0.00}
@frappe.whitelist()
def get_item_data(sales_order='', project='',test=''):
task_data = frappe.db.sql("""select subject,progress,rate,qty,item_amount,uom from `tabTask` where project=%s""",project)
lst_item_data = []
default_uom = frappe.db.get_single_value('Stock Settings','stock_uom')
income_account = frappe.db.get_value('Company',get_default_company(),'default_income_account')
for task in task_data:
item_data = {}
sales_invoices = frappe.db.sql("""select name from `tabSales Invoice` where sales_order=%s and docstatus = 1""",sales_order)
invoice_list = []
if sales_invoices:
for invoice in sales_invoices:
invoice_list.append(invoice[0])
#frappe.msgprint(frappe.as_json(invoice_list))
#total_progress = frappe.db.sql("""select sum(progress) from `tabSales Invoice Item` where parent in (%s) and item_name=%s and sales_order=%s""" %(', '.join(["%s"]*len(invoice_list))), tuple(invoice_list),'%s','%s'),)[0][0]
#total_progress = frappe.db.sql("""select sum(progress) from `tabSales Invoice Item` where sales_order=%s and item_name=%s""",(sales_order,task[0]))[0][0]
#items = list(set([d for d in invoice_list]))
items = invoice_list
if items:
total_progress = frappe.db.sql("""select sum(progress) from `tabSales Invoice Item` where item_name=%s and sales_order=%s and parent in ({0})""".format(", ".join(["%s"] * len(items))), [task[0]] +[sales_order]+items)[0][0]
else:
total_progress = 0
item_data['subject']=task[0]
if total_progress:
item_data['current_progress']=task[1]
item_data['progress']=task[1]-total_progress
else:
item_data['current_progress']=0
item_data['progress']=task[1]
item_data['uom'] = task[5]
item_data['income_account'] = income_account
item_data['rate'] = task[2]
item_data['item_rate'] = task[4]
item_data['qty'] = task[3]
lst_item_data.append(item_data)
return tuple(lst_item_data)
@frappe.whitelist()
def make_project(source_name, target_doc=None):
def postprocess(source, doc):
doc.project_type = "External"
doc.project_name = source.name
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Project",
"validation": {
"docstatus": ["=", 1]
},
"field_map":{
"name" : "sales_order",
"base_grand_total" : "estimated_costing",
}
},
"Sales Order Item": {
"doctype": "Project Task",
"field_map": {
"item_name": "title",
"qty":"qty",
"amount":"rate",
"rate":"item_amount",
"uom":"uom"
},
}
}, target_doc, postprocess)
return doc
def validate_app(doc,method):
Project.validate = validate
def validate(self):
task_total = 0
for task in self.tasks:
task_total += task.rate
self.validate_project_name()
self.validate_dates()
self.validate_weights()
total_sales_amount = frappe.db.sql("""select grand_total from `tabSales Order` where name=%s""",self.sales_order)[0]
if total_sales_amount:
if task_total > total_sales_amount[0]:
frappe.throw(_("Task Total Can Not Be Greater Than Sales Order Amount"))
self.sync_tasks()
self.tasks = []
self.send_welcome_email()
'''total_sales_amount = frappe.db.sql("""select grand_total from `tabSales Order` where name=%s""",self.sales_order)[0]
if total_sales_amount:
if task_total > total_sales_amount[0]:
frappe.throw(_("Task Total Can Not Be Greater Than Sales Order Amount"))'''
| 42.183206 | 239 | 0.696526 |
36b6f40e317d94e4c15356713f64478915c17112 | 10,910 | py | Python | test/azure/low-level/Expected/AcceptanceTests/AzureSpecialsLowLevel/azurespecialpropertieslowlevel/rest/skip_url_encoding/_request_builders.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | null | null | null | test/azure/low-level/Expected/AcceptanceTests/AzureSpecialsLowLevel/azurespecialpropertieslowlevel/rest/skip_url_encoding/_request_builders.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | null | null | null | test/azure/low-level/Expected/AcceptanceTests/AzureSpecialsLowLevel/azurespecialpropertieslowlevel/rest/skip_url_encoding/_request_builders.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 1 | 2022-03-28T08:58:03.000Z | 2022-03-28T08:58:03.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.rest import HttpRequest
from msrest import Serializer
from ..._vendor import _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional
_SERIALIZER = Serializer()
# fmt: off
def build_get_method_path_valid_request(
unencoded_path_param, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get method with unencoded path parameter with value 'path1/path2/path3'.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:param unencoded_path_param: Unencoded path parameter with value 'path1/path2/path3'.
:type unencoded_path_param: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/azurespecials/skipUrlEncoding/method/path/valid/{unencodedPathParam}'
path_format_arguments = {
"unencodedPathParam": _SERIALIZER.url("unencoded_path_param", unencoded_path_param, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_path_valid_request(
unencoded_path_param, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get method with unencoded path parameter with value 'path1/path2/path3'.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:param unencoded_path_param: Unencoded path parameter with value 'path1/path2/path3'.
:type unencoded_path_param: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = '/azurespecials/skipUrlEncoding/path/path/valid/{unencodedPathParam}'
path_format_arguments = {
"unencodedPathParam": _SERIALIZER.url("unencoded_path_param", unencoded_path_param, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_swagger_path_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get method with unencoded path parameter with value 'path1/path2/path3'.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword unencoded_path_param: An unencoded path parameter with value 'path1/path2/path3'. The
default value is "path1/path2/path3". Note that overriding this default value may result in
unsupported behavior.
:paramtype unencoded_path_param: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
unencoded_path_param = kwargs.pop('unencoded_path_param', "path1/path2/path3") # type: str
accept = "application/json"
# Construct URL
url = '/azurespecials/skipUrlEncoding/swagger/path/valid/{unencodedPathParam}'
path_format_arguments = {
"unencodedPathParam": _SERIALIZER.url("unencoded_path_param", unencoded_path_param, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_method_query_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get method with unencoded query parameter with value 'value1&q2=value2&q3=value3'.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword q1: Unencoded query parameter with value 'value1&q2=value2&q3=value3'.
:paramtype q1: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
q1 = kwargs.pop('q1') # type: str
accept = "application/json"
# Construct URL
url = '/azurespecials/skipUrlEncoding/method/query/valid'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['q1'] = _SERIALIZER.query("q1", q1, 'str', skip_quote=True)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_method_query_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get method with unencoded query parameter with value null.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword q1: Unencoded query parameter with value null.
:paramtype q1: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
q1 = kwargs.pop('q1', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/azurespecials/skipUrlEncoding/method/query/null'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if q1 is not None:
query_parameters['q1'] = _SERIALIZER.query("q1", q1, 'str', skip_quote=True)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_path_query_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get method with unencoded query parameter with value 'value1&q2=value2&q3=value3'.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword q1: Unencoded query parameter with value 'value1&q2=value2&q3=value3'.
:paramtype q1: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
q1 = kwargs.pop('q1') # type: str
accept = "application/json"
# Construct URL
url = '/azurespecials/skipUrlEncoding/path/query/valid'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['q1'] = _SERIALIZER.query("q1", q1, 'str', skip_quote=True)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_swagger_query_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get method with unencoded query parameter with value 'value1&q2=value2&q3=value3'.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword q1: An unencoded query parameter with value 'value1&q2=value2&q3=value3'. The default
value is "value1&q2=value2&q3=value3". Note that overriding this default value may result in
unsupported behavior.
:paramtype q1: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
q1 = kwargs.pop('q1', "value1&q2=value2&q3=value3") # type: str
accept = "application/json"
# Construct URL
url = '/azurespecials/skipUrlEncoding/swagger/query/valid'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['q1'] = _SERIALIZER.query("q1", q1, 'str', skip_quote=True)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
| 35.888158 | 116 | 0.681027 |
352fbd3ba304831c09b1a6f8d6d73b5e52771b6f | 58 | py | Python | HackerRank Python Solutions/001. Say Hello World with Python.py | aravind-alpha/Hackerrank-Solutions | 489ea4760d0f67bf257d2f20320e24f1cba5ec11 | [
"MIT"
] | 1 | 2021-05-02T06:05:35.000Z | 2021-05-02T06:05:35.000Z | HackerRank Python Solutions/001. Say Hello World with Python.py | aravind-alpha/Hackerrank-Solutions | 489ea4760d0f67bf257d2f20320e24f1cba5ec11 | [
"MIT"
] | null | null | null | HackerRank Python Solutions/001. Say Hello World with Python.py | aravind-alpha/Hackerrank-Solutions | 489ea4760d0f67bf257d2f20320e24f1cba5ec11 | [
"MIT"
] | 2 | 2021-12-01T06:42:21.000Z | 2021-12-29T17:12:53.000Z | # Say "Hello,World!" with Python:
print("Hello, World!")
| 14.5 | 33 | 0.655172 |
ae2029dbb5d1cd2c9778b4125ff645bb4ad95980 | 7,944 | py | Python | http_server/manage_data_handler.py | JX-Wang/obtaining_domain_valid_dns | fac037fd0be6dd4f1a6ca1a9f1d5cd71aa25cd72 | [
"MIT"
] | 1 | 2019-07-02T02:44:55.000Z | 2019-07-02T02:44:55.000Z | http_server/manage_data_handler.py | JX-Wang/obtaining_domain_valid_dns | fac037fd0be6dd4f1a6ca1a9f1d5cd71aa25cd72 | [
"MIT"
] | null | null | null | http_server/manage_data_handler.py | JX-Wang/obtaining_domain_valid_dns | fac037fd0be6dd4f1a6ca1a9f1d5cd71aa25cd72 | [
"MIT"
] | null | null | null | # encoding:utf-8
"""
主节点控制功能
"""
import tornado.web
import hashlib
import time
import json
from system_parameter import *
from Logger import Logger
from async_fetch import async_fetch,async_post
from tornado import gen
logger = Logger(file_path='./query_log/',show_terminal=True) # 日志配置
class RespDomainResultHandler(tornado.web.RequestHandler):
"""
根据文件名,服务器返回请求的文件内容
"""
def get(self, file_name):
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + file_name)
with open("./verified_domain_data/" + file_name, "r") as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
self.finish() # 记得要finish
class TaskConfirmHandler(tornado.web.RequestHandler):
"""
接收完成探测请求,以及告知对方服务器完成的任务id和文件的url连接
"""
def save_file(self, domain_ns, file_name):
"""
根据文件名称,将数据保存到本地
"""
path = './verified_domain_data/'
with open(path+file_name,'w') as fp:
fp.write(domain_ns)
@gen.coroutine
def post(self):
"""
接收探测任务完成的post请求,并将结果保存本地文件后,将文件链接地址告知对方服务器
"""
param = self.request.body.decode("utf-8")
param = json.loads(param)
print param
print param
file_name = param['file_name'] # 文件名称
task_id = param['task_id'] # 任务id
domain_ns = param['domain_ns'] # 域名ns的数据
task_type = param['task_type'] # 任务类型,sec/query
self.save_file(domain_ns, file_name) # 将域名ns数据保存到本地
file_md5 = hashlib.md5(domain_ns.encode("utf-8")).hexdigest() # 生成md5值
ip, port = read_server('../system.conf') # 读取主服务器ip地址
remote_ip, remote_port = read_remote_ip('../system.conf') # 远程的IP地址
remote_url = "http://{ip}:{port}/notify/{task_type}/result_list".format(ip=remote_ip, port=str(remote_port), task_type=task_type) # 远程访问的地址
file_url = "http://{ip}:{port}/file/{file_name}".format(ip=ip, port=str(port), file_name=file_name) # 文件存储url
post_body = {
"id": task_id,
"time": time.time(),
"file_url": file_url,
"file_md5": file_md5
}
print remote_url
for i in range(3): # 最多重试3次
respond = yield async_post(remote_url,
json_data=post_body,
data_type="json")
if "ERROR" in respond:
excpetion = respond[1]
logger.logger.error('向对方发送域名ns结果文件失败:'+str(excpetion))
return
resp_code = respond['code']
if resp_code == 1:
logger.logger.info('对方接收域名ns结果文件成功')
break
else:
logger.logger.warning('对方接收域名ns结果文件失败,次数:'+str(i)+'/3')
'''
try:
response = requests.post(url=remote_url, json=post_body)
except requests.exceptions.RequestException, e:
logger.logger.error('向对方发送域名ns结果文件失败:'+str(e))
return
resp_code = json.loads(response.content)['code']
if resp_code == 1: # 1为对方接收成功
logger.logger.info('对方接收域名ns结果文件成功')
break
else:
logger.logger.warning('对方接收域名ns结果文件失败,次数:'+str(i)+'/3')
'''
class RecvDomainRequestHandler(tornado.web.RequestHandler):
"""
接收来自对方服务器的域名实时/非实时验证请求
"""
@gen.coroutine
def post(self, task_type):
param = self.request.body.decode('utf-8')
param = json.loads(param)
try:
file_url = param['file_url']
task_id = param['id']
request_time = param['time']
file_md5 = param['file_md5']
except Exception, e: # 解析失败
logger.logger.error('请求内容不符合要求:'+str(e))
self.write({'time': time.time(), 'code': 2}) # 请求内容不符合要求
self.finish()
return
domain_data = yield async_fetch(file_url, "text")
if "ERROR" in domain_data:
exception = str(domain_data[1])
logger.logger.error('获取要探测的域名失败:' + exception)
self.write({'time': request_time, 'code': 2}) # 获取失败
self.finish()
return
'''
try:
domain_data = requests.get(file_url).content # 获取要探测的域名数据
except Exception, e: # 获取域名的数据失败,
logger.logger.error('获取要探测的域名失败:'+str(e))
self.write({'time': request_time, 'code': 2}) # 获取失败
self.finish()
return
'''
domain_md5 = hashlib.md5(domain_data.encode("utf-8")).hexdigest() # 数据自身的md5值
# 校验数据是否一致
if domain_md5 == file_md5:
if task_type in ('sec', 'query'):
self.write({'time': request_time, 'code': 1}) # 校验一致
self.finish()
else:
logger.logger.error('错误的查询类型:' + str(task_type))
self.write({'time': request_time, 'code': 2})
self.finish()
return
else:
logger.logger.error('域名数据校验不一致')
self.write({'time': request_time, 'code': 2}) # 校验不一致
self.finish()
return
original_file_name = file_url.split('/')[-1] # 远程文件的名称
local_file_name = original_file_name + '_' + task_type + '_' + str(task_id) # 保存到本地的文件名称
with open("./unverified_domain_data/" + local_file_name, "w") as f: # 将要验证的域名数据保存到本地
f.writelines(domain_data)
if task_type == 'sec':
periodic_domain_request(domain_data, task_id, local_file_name) # 执行定时查询节点
elif task_type == 'query':
query_domain_request(domain_data, task_id, local_file_name) # 执行实时查询节点
@gen.coroutine
def query_domain_request(domains, task_id, file_name):
"""
将需要实时查询的域名传递给实时查询http_client_realtime
"""
query_ip, port = read_client_realtime('../system.conf') # 获取实时探测点的ip和端口
url = 'http://' + query_ip + ':' + str(port)+'/domain_ns_realtime/'
request_data = {
'domains': domains,
'id': task_id,
'file_name': file_name
}
for i in range(3):
respond = yield async_post(url, json_data=request_data, data_type="str")
if respond == 'OK':
break
elif "ERROR" in respond:
excpetion = respond[1]
logger.logger.error('向实时查询节点发送域名数据失败%s/3' % str(i))
'''
try:
respond = requests.post(url, json=request_data, timeout=5)
if respond.text == 'OK':
break
logger.logger.error('向实时查询节点发送域名数据失败%s/3' % str(i))
except requests.exceptions.RequestException, e:
logger.logger.error('实时节点连接失败:' + str(e))
'''
@gen.coroutine
def periodic_domain_request(domains, task_id, file_name):
"""
将需要实时查询的域名传递给定时查询http_client_sec
"""
periodic_ip, port = read_client_periodic('../system.conf') # 获取ip地址和端口号
url = 'http://' + periodic_ip + ':' + str(port) +'/domain_ns_periodic/'
request_data = {
'domains': domains,
'id': task_id,
'file_name': file_name
}
for i in range(3):
respond = yield async_post(url, json_data=request_data, data_type="str")
if respond == 'OK':
break
elif "ERROR" in respond:
excpetion = respond[1]
logger.logger.error('向定时查询节点发送域名数据失败%s/3' % str(i))
'''
try:
respond = requests.post(url, json=request_data, timeout=5)
if respond.text == 'OK':
break
logger.logger.error('向定时查询节点发送域名数据失败%s/3' % str(i))
except requests.exceptions.RequestException, e:
logger.logger.error('定时节点连接失败:' + str(e))
''' | 34.094421 | 148 | 0.55929 |
b93d11843e4ad40beda64350aed7be28a2fd85ab | 953 | py | Python | setup.py | maroba/physix | be8b6aabb8b784511906e4a0e531db10072137a5 | [
"MIT"
] | null | null | null | setup.py | maroba/physix | be8b6aabb8b784511906e4a0e531db10072137a5 | [
"MIT"
] | null | null | null | setup.py | maroba/physix | be8b6aabb8b784511906e4a0e531db10072137a5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import exists
from setuptools import setup, find_packages
author = 'Matthias Baer'
email = 'matthias.r.baer@googlemail.com'
description = 'A Physics toolbox in Python'
name = 'physix'
year = '2022'
url = 'https://github.com/maroba/physix'
version = '0.0.2'
setup(
name=name,
author=author,
author_email=email,
url=url,
version=version,
packages=find_packages(),
package_dir={name: name},
include_package_data=True,
license='MIT',
description=description,
long_description=open('README.md').read() if exists('README.md') else '',
long_description_content_type="text/markdown",
install_requires=['sphinx', 'numpy', 'sympy', 'scipy'
],
python_requires=">=3.6",
classifiers=['Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
platforms=['ALL'],
)
| 27.228571 | 77 | 0.636936 |
0f7eaf2c49e0736fd3b67d09243e8fba6d038f8c | 6,296 | py | Python | docs/source/conf.py | RCTimms/rockets | 61099b44b306ab8dabf746c4aa6e1ad6d118823d | [
"BSD-3-Clause"
] | null | null | null | docs/source/conf.py | RCTimms/rockets | 61099b44b306ab8dabf746c4aa6e1ad6d118823d | [
"BSD-3-Clause"
] | null | null | null | docs/source/conf.py | RCTimms/rockets | 61099b44b306ab8dabf746c4aa6e1ad6d118823d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rockets documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'sphinx_copybutton',
]
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'rockets'
copyright = '2019, Ryan Timms'
author = 'Ryan Timms'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import rockets
# The short X.Y version.
version = rockets.__version__
# The full version, including alpha/beta/rc tags.
release = rockets.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'rockets'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rockets.tex', 'rockets Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rockets', 'rockets Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rockets', 'rockets Documentation',
author, 'rockets', 'Python package for designing rockets.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
}
| 31.014778 | 135 | 0.695362 |
86c9121072ecb48bdf98f6b415c37e862c6f6c5e | 2,761 | py | Python | custom_components/pvoutput_exporter/config_flow.py | jmbrunskill/ha-energy-pvoutput | b5a054121f80113f358eb7b24424e1d66e0de9e8 | [
"MIT"
] | null | null | null | custom_components/pvoutput_exporter/config_flow.py | jmbrunskill/ha-energy-pvoutput | b5a054121f80113f358eb7b24424e1d66e0de9e8 | [
"MIT"
] | null | null | null | custom_components/pvoutput_exporter/config_flow.py | jmbrunskill/ha-energy-pvoutput | b5a054121f80113f358eb7b24424e1d66e0de9e8 | [
"MIT"
] | null | null | null | """Config flow for PV Output Exporter."""
from __future__ import annotations
from typing import Any
from homeassistant.helpers.config_validation import boolean
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_NAME, CONF_API_KEY
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from .const import CONF_SYSTEM_ID, DOMAIN
class PVOutputExporterFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for PVOutput Exporter."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> PVOutputExporterOptionFlowHandler:
"""Get the options flow for this handler."""
return PVOutputExporterFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
if user_input is not None:
return self.async_create_entry(
title=user_input[CONF_NAME],
data = {},
options={
CONF_API_KEY: user_input[CONF_API_KEY],
CONF_SYSTEM_ID: user_input[CONF_SYSTEM_ID]
},
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=self.hass.config.location_name
): str,
vol.Required(CONF_API_KEY, default=""): str,
vol.Required(CONF_SYSTEM_ID, default=""): str,
}
),
)
class PVOutputExporterOptionFlowHandler(OptionsFlow):
"""Handle options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(
CONF_API_KEY,
default=self.config_entry.options.get(CONF_API_KEY),
): str,
vol.Required(
CONF_SYSTEM_ID,
default=self.config_entry.options.get(CONF_SYSTEM_ID),
): str,
}
),
)
| 31.375 | 78 | 0.5795 |
fce0796ef4d9cf4b9c3c117385f798580905513d | 17,669 | py | Python | cliquetree.py | georgeAO/IterativeCoarsening | 0bf22fd74b93218fac6116c0a52332f36d29d26e | [
"MIT"
] | null | null | null | cliquetree.py | georgeAO/IterativeCoarsening | 0bf22fd74b93218fac6116c0a52332f36d29d26e | [
"MIT"
] | null | null | null | cliquetree.py | georgeAO/IterativeCoarsening | 0bf22fd74b93218fac6116c0a52332f36d29d26e | [
"MIT"
] | null | null | null | from copy import deepcopy
import networkx as nx
from networkx import NetworkXNoPath
'''
Copyright (c) 2016, cliquetree contributors,
Charalampos Mavroforakis <cmav@bu.edu>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
class CliqueTree:
"""Defines the data structure that will be used to decide if edge addition
respects graph chordality.
"""
def __init__(self):
self.G = nx.Graph()
self.cliquetree = nx.Graph()
self.node_in_cliques = {} # cliques in which the node participates in
self.nodes_in_clique = {} # the set of nodes in each clique
self.uid = 1
self.insertable = set()
self.deletable = set()
def __deepcopy__(self, memo):
obj = CliqueTree()
obj.G = deepcopy(self.G, memo)
obj.cliquetree = deepcopy(self.cliquetree, memo)
obj.node_in_cliques = deepcopy(self.node_in_cliques, memo)
obj.nodes_in_clique = deepcopy(self.nodes_in_clique, memo)
obj.uid = self.uid
obj.insertable = deepcopy(self.insertable, memo)
obj.deletable = deepcopy(self.deletable, memo)
return obj
def copy(self):
return deepcopy(self)
def _clique_is_maximal(self, nodes):
"""Returns True if the list of given nodes form a maximal clique
"""
nodeset = set(nodes)
if not nodeset and len(self.G) > 0:
# empty clique is not maximal, unless graph is empty
return False
checked_neighbors = set()
for x in nodes:
neighbors_x = set(self.G[x])
# check if nodeset is clique for x
if len(nodeset - neighbors_x) > 1:
return False
for neighbor in neighbors_x:
if neighbor in checked_neighbors or neighbor in nodeset:
continue
two_hop_neighbors = set(self.G[neighbor])
if len(two_hop_neighbors.intersection(nodeset)) == len(nodeset):
return False
else:
checked_neighbors.add(neighbor)
return True
def _add_clique_node(self, uid, nodes):
"""Adds a new node in the cliquetree that represents a given node set.
"""
self.cliquetree.add_node(uid)
if uid not in self.nodes_in_clique:
self.nodes_in_clique[uid] = set()
for node in nodes:
self.nodes_in_clique[uid].add(node)
if node not in self.node_in_cliques:
self.node_in_cliques[node] = set()
self.node_in_cliques[node].add(uid)
def add_edge(self, x, y, update_insertable=True):
"""Adds an edge to the clique tree and updates the data structures.
"""
# Start by checking if the edge can be inserted
# if not self.query_edge(e):
# return False
if self._edge(x, y) in self.G.edges():
return
if x in self.G:
neighbors_x = set(self.G[x])
else:
neighbors_x = set()
self.G.add_node(x)
if y in self.G:
neighbors_y = set(self.G[y])
else:
neighbors_y = set()
self.G.add_node(y)
K1 = None
if x in self.node_in_cliques:
for clq in self.node_in_cliques[x]:
K1 = clq
break
K2 = None
if y in self.node_in_cliques:
for clq in self.node_in_cliques[y]:
K2 = clq
break
changed_edges = []
if (K1 and not K2) or (not K1 and K2):
self._add_clique_node(self.uid, neighbors_x.intersection(neighbors_y).union(set([x, y])))
if K1 and not K2:
sep = self.nodes_in_clique[K1]\
.intersection(self.nodes_in_clique[self.uid])
self.cliquetree.add_edge(K1, self.uid, nodes=sep)
changed_edges.append((K1, self.uid))
elif not K1 and K2:
sep = self.nodes_in_clique[K2]\
.intersection(self.nodes_in_clique[self.uid])
self.cliquetree.add_edge(K2, self.uid, nodes=sep)
changed_edges.append((K2, self.uid))
elif K1 and K2:
Kx = None
Ky = None
# figure out Kx and Ky
try:
path = nx.shortest_path(self.cliquetree, source=K1, target=K2)
min_edge_weight = 1e100
min_edge = None
first_node = True
found_Kx = False
for clq1, clq2 in zip(path[:-1], path[1:]):
if first_node:
if x in self.nodes_in_clique[clq1]:
Kx = clq1
first_node = False
if not Ky:
if y in self.nodes_in_clique[clq2]:
Ky = clq2
if x in self.nodes_in_clique[clq2]:
Kx = clq2
else:
# first time to not find x in clq2, Kx = clq1
found_Kx = True
if found_Kx:
sep = self.cliquetree[clq1][clq2]['nodes']
if len(sep) < min_edge_weight:
min_edge_weight = len(sep)
min_edge = (clq1, clq2)
if found_Kx and Ky:
break
except NetworkXNoPath:
# The two nodes belong to disconnected components, so it
# is safe to add the edge.
# sep = self.nodes_in_clique[K1]\
# .intersection(self.nodes_in_clique[K2])
# self.cliquetree.add_edge(K1, K2, nodes=sep)
# changed_edges.append((K1, K2))
Kx = K1
Ky = K2
min_edge_weight = 0
Kx_nodes = self.nodes_in_clique[Kx]
Ky_nodes = self.nodes_in_clique[Ky]
I = Kx_nodes.intersection(Ky_nodes)
if Ky not in self.cliquetree[Kx]:
if min_edge_weight > len(I):
return False
if Ky in self.cliquetree[Kx] or (min_edge_weight == len(I) and
Ky not in self.cliquetree[Kx] and min_edge_weight > 0):
# replace min_edge with (Kx, Ky)
self.cliquetree.remove_edge(*min_edge)
c1, c2 = self._edge(Kx, Ky)
self.cliquetree.add_edge(c1, c2, nodes=I)
# Step 2
# Add the cliquetree node now, because we might have aborted above
self._add_clique_node(self.uid,
I.union(set([x, y])))
edge_to_remove = self._edge(Kx, Ky)
if Ky in self.cliquetree[Kx]:
self.cliquetree.remove_edge(*edge_to_remove)
to_remove = []
to_keep = []
for clq in [Kx, Ky]:
clq_nodes = self.nodes_in_clique[clq]
if len(clq_nodes) > len(I) + 1:
to_keep.append(clq)
else:
to_remove.append(clq)
for clq in to_remove:
# clq is not maximal in the new graph
for v in self.cliquetree[clq]:
if v == self.uid or v in [Kx, Ky]:
continue
sep = self.nodes_in_clique[v]\
.intersection(self.nodes_in_clique[self.uid])
self.cliquetree.add_edge(v, self.uid, nodes=sep)
c1, c2 = self._edge(v, clq)
self.cliquetree.remove_node(clq)
del self.nodes_in_clique[clq]
for v in self.node_in_cliques:
if clq in self.node_in_cliques[v]:
self.node_in_cliques[v].remove(clq)
for clq in to_keep:
sep = self.nodes_in_clique[clq]\
.intersection(self.nodes_in_clique[self.uid])
self.cliquetree.add_edge(clq, self.uid, nodes=sep)
changed_edges.append((clq, self.uid))
else:
# not K1 and not K2
self._add_clique_node(self.uid,
neighbors_x.intersection(neighbors_y)
.union(set([x, y])))
# Update the actual graph
self.G.add_edge(x, y)
# if (x, y) in self.insertable:
# self.insertable.remove((x, y))
self.uid += 1
self.insertable = set()
if update_insertable:
for v in self.G:
self.update_insertable(v)
return True
def update_insertable(self, v, stop_at=None):
"""Updates the insertable edges in the graph.
For early stopping, set stop_at to k. Then, the function will return
after when k edges have been added to the insertable set.
"""
K1 = 0
Kx = None
cliques_visited = set()
nodes_seen = []
min_weights = []
v_cliques = self.node_in_cliques[v]
for clq in self.node_in_cliques[v]:
K1 = clq
break
cliques_visited.add(K1)
for clq1, clq2, data in \
nx.dfs_labeled_edges(self.cliquetree, source=K1):
if data is 'nontree' or (clq1 == K1 and clq2 == K1):
continue
clq_min, clq_max = self._edge(clq1, clq2)
sep = self.cliquetree[clq_min][clq_max]['nodes']
if data is 'forward':
cliques_visited.add(clq2)
if clq1 in v_cliques and clq2 not in v_cliques:
Kx = clq1
Kx_nodes = self.nodes_in_clique[clq1]
if Kx:
w_e = len(sep)
if not min_weights or w_e <= min_weights[-1]:
# w(e) = w(x, y)
min_weights.append(w_e)
# is it a possible Ky?
Ky_nodes = self.nodes_in_clique[clq2]
if min_weights[-1] == len(Kx_nodes.intersection(Ky_nodes)):
for u in self.nodes_in_clique[clq2]:
if (not nodes_seen or u not in nodes_seen[-1]) \
and u not in self.G[v] and u != v:
# Ky for u
self.insertable.add(self._edge(u, v))
if u == v:
raise ValueError('u is equal to v')
if stop_at is not None and \
len(self.insertable) >= stop_at:
return
else:
min_weights.append(min_weights[-1])
if nodes_seen:
seen_previous = nodes_seen[-1]
else:
seen_previous = set()
nodes_seen.append(self.nodes_in_clique[clq2]
.union(seen_previous))
elif data is 'reverse':
first_Kx = False
if clq1 in v_cliques and clq2 not in v_cliques:
Kx = None
Kx_nodes = None
first_Kx = True
if Kx is not None or first_Kx:
min_weights.pop()
nodes_seen.pop()
for clq in self.cliquetree:
# if clique is in another component, edge is insertable
if clq not in cliques_visited:
for u in self.nodes_in_clique[clq]:
self.insertable.add(self._edge(u, v))
if stop_at is not None and \
len(self.insertable) >= stop_at:
return
def update_deletable(self):
self.deletable = set()
for u_index, u in enumerate(self.G):
for v_index, v in enumerate(self.G):
if u_index >= v_index:
continue
if self._edge(u, v) in self.deletable:
continue
clq_u = self.node_in_cliques[u]
clq_v = self.node_in_cliques[v]
if len(clq_u.intersection(clq_v)) == 1:
self.deletable.add(self._edge(u, v))
def from_graph(self, G):
self.G = G.copy()
cliques = nx.clique.find_cliques(G)
cliquegraph = nx.clique.make_max_clique_graph(G)
clique_dict = {}
for v, clq in zip(cliquegraph.nodes(), cliques):
clique_dict[v] = clq
for u, v, data in cliquegraph.edges(data=True):
cliquegraph.remove_edge(u, v)
sep = set(clique_dict[u]).intersection(set(clique_dict[v]))
w = len(sep)
cliquegraph.add_edge(u, v, nodes=sep, weight=-w)
self.cliquetree = nx.minimum_spanning_tree(cliquegraph)
for v in self.G:
self.node_in_cliques[v] = set()
for v in clique_dict:
self.nodes_in_clique[v] = set()
for node in clique_dict[v]:
self.nodes_in_clique[v].add(node)
self.node_in_cliques[node].add(v)
self.uid = len(G) + 1
self.insertable = set()
for v in self.G:
self.update_insertable(v)
def remove_edge(self, u, v):
Kx = self.node_in_cliques[u].intersection(self.node_in_cliques[v])
if len(Kx) == 0:
raise ValueError('Edge (%s, %s) was not found in the graph.' %
(u, v))
if len(Kx) > 1:
raise ValueError('Edge (%s, %s) belongs to more than one cliques' %
(u, v))
(Kx, ) = Kx # get single element from the intersection
Kux_nodes = set(self.nodes_in_clique[Kx])
Kux_nodes.remove(v)
Kvx_nodes = set(self.nodes_in_clique[Kx])
Kvx_nodes.remove(u)
Nu = []
Nv = []
Nuv = []
Kux = None
Kvx = None
for clq in self.cliquetree[Kx]:
found_u = False
found_v = False
clq_nodes = self.nodes_in_clique[clq]
if u in clq_nodes:
found_u = True
Nu.append(clq)
# if Kux is subset of clq, replace Kux with clq
if Kux_nodes.issubset(clq_nodes):
Kux = clq
elif v in clq_nodes:
found_v = True
Nv.append(clq)
# if Kvx is subset of clq, replace Kux with clq
if Kvx_nodes.issubset(clq_nodes):
Kvx = clq
if not found_u and not found_v:
Nuv.append(clq)
# Add to Kux all the nodes in Nu and the Nuv
Nu.extend(Nuv)
if Kux is None:
# there is at least one neighbor of Kux and
# Kux has not been replaces by any of its neighbors
self._add_clique_node(self.uid, Kux_nodes)
Kux = self.uid
self.uid += 1
if Kvx is None:
self._add_clique_node(self.uid, Kvx_nodes)
Kvx = self.uid
self.uid += 1
for clq in Nu:
if clq == Kux:
continue
clq_min, clq_max = self._edge(clq, Kx)
sep = self.cliquetree[clq_min][clq_max]['nodes']
self.cliquetree.add_edge(clq, Kux, nodes=sep)
for clq in Nv:
if clq == Kvx:
continue
clq_min, clq_max = self._edge(clq, Kx)
sep = self.cliquetree[clq_min][clq_max]['nodes']
self.cliquetree.add_edge(clq, Kvx, nodes=sep)
# Add an edge between Kux and Kvx
if Kux is not None and Kvx is not None:
sep = self.nodes_in_clique[Kux]\
.intersection(self.nodes_in_clique[Kvx])
if len(sep) > 0:
# the edge deletion will not disconnect the tree
clq_min, clq_max = self._edge(Kux, Kvx)
self.cliquetree.add_edge(clq_min, clq_max, nodes=sep)
# Delete Kx
self.cliquetree.remove_node(Kx)
del self.nodes_in_clique[Kx]
for t in self.node_in_cliques:
if Kx in self.node_in_cliques[t]:
self.node_in_cliques[t].remove(Kx)
self.G.remove_edge(u, v)
self.insertable = set()
for t in self.G:
self.update_insertable(t)
def query_edge(self, x, y):
e = self._edge(x, y)
if not self.insertable or e in self.insertable:
return True
return False
def _edge(self, x, y):
return (min(x, y), max(x, y))
def clique_tostr(self, v):
return ', '.join(map(str, list(self.nodes_in_clique[v])))
| 39.975113 | 101 | 0.513442 |
26ba81263fac141d57986de6591fb54730875f7f | 204 | py | Python | python/flareapps/django/homepage/urls.py | flarebyte/wonderful-bazar | 810514cd7d73505b11d738f8b84d91842d18d074 | [
"MIT"
] | null | null | null | python/flareapps/django/homepage/urls.py | flarebyte/wonderful-bazar | 810514cd7d73505b11d738f8b84d91842d18d074 | [
"MIT"
] | null | null | null | python/flareapps/django/homepage/urls.py | flarebyte/wonderful-bazar | 810514cd7d73505b11d738f8b84d91842d18d074 | [
"MIT"
] | null | null | null | from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^(en|fr)/$', 'flareteam.homepage.views.index'),
(r'^(en|fr)/([A-Za-z0-9-_]+).html$', 'flareteam.homepage.views.section'),
)
| 25.5 | 77 | 0.627451 |
3fb4f541451cf2432a0223b611c777a803943b55 | 2,306 | py | Python | examples/spend_p2sh_transaction.py | doersf/python-bitcoin-utils | ab558513aba706d0215463fffc615772a955a142 | [
"MIT"
] | null | null | null | examples/spend_p2sh_transaction.py | doersf/python-bitcoin-utils | ab558513aba706d0215463fffc615772a955a142 | [
"MIT"
] | null | null | null | examples/spend_p2sh_transaction.py | doersf/python-bitcoin-utils | ab558513aba706d0215463fffc615772a955a142 | [
"MIT"
] | 2 | 2021-09-20T23:55:53.000Z | 2021-12-14T16:06:25.000Z | # Copyright (C) 2018-2020 The python-bitcoin-utils developers
#
# This file is part of python-bitcoin-utils
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoin-utils, including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
from bitcoinutils.setup import setup
from bitcoinutils.utils import to_satoshis
from bitcoinutils.transactions import Transaction, TxInput, TxOutput
from bitcoinutils.keys import P2pkhAddress, P2shAddress, PrivateKey
from bitcoinutils.script import Script
def main():
# always remember to setup the network
setup('testnet')
#
# This script spends from a P2SH address containing a P2PK script
#
# create transaction input from tx id of UTXO (contained 0.1 tBTC)
txin = TxInput('7db363d5a7fabb64ccce154e906588f1936f34481223ea8c1f2c935b0a0c945b', 0)
# secret key needed to spend P2PK that is wrapped by P2SH
p2pk_sk = PrivateKey('cRvyLwCPLU88jsyj94L7iJjQX5C2f8koG4G2gevN4BeSGcEvfKe9')
p2pk_pk = p2pk_sk.get_public_key().to_hex()
# create the redeem script - needed to sign the transaction
redeem_script = Script([p2pk_pk, 'OP_CHECKSIG'])
to_addr = P2pkhAddress('n4bkvTyU1dVdzsrhWBqBw8fEMbHjJvtmJR')
txout = TxOutput(to_satoshis('0.09'), to_addr.to_script_pub_key() )
# no change address - the remaining 0.01 tBTC will go to miners)
# create transaction from inputs/outputs -- default locktime is used
tx = Transaction([txin], [txout])
# print raw transaction
print("\nRaw unsigned transaction:\n" + tx.serialize())
# use the private key corresponding to the address that contains the
# UTXO we are trying to spend to create the signature for the txin -
# note that the redeem script is passed to replace the scriptSig
sig = p2pk_sk.sign_input(tx, 0, redeem_script )
#print(sig)
# set the scriptSig (unlocking script)
txin.script_sig = Script([sig, redeem_script.to_hex()])
signed_tx = tx.serialize()
# print raw signed transaction ready to be broadcasted
print("\nRaw signed transaction:\n" + signed_tx)
print("\nTxId:", tx.get_txid())
if __name__ == "__main__":
main()
| 35.476923 | 89 | 0.736774 |
d6a2294729639aa006555cc91e50ad834050bff4 | 1,035 | py | Python | scripts/checktest.py | iondbproject/utility-scripts | 36f10931ec831a6805feff1766000709060b9c3b | [
"Apache-2.0"
] | null | null | null | scripts/checktest.py | iondbproject/utility-scripts | 36f10931ec831a6805feff1766000709060b9c3b | [
"Apache-2.0"
] | null | null | null | scripts/checktest.py | iondbproject/utility-scripts | 36f10931ec831a6805feff1766000709060b9c3b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import argparse
import re
import os
test_files = []
for root, dirnames, filenames in os.walk("src"):
for filename in filenames:
if re.search(r"test_.*\.c(?:pp)?", filename):
test_files.append(os.path.join(root, filename))
for filename in test_files:
with open(filename, "r") as f:
contents = f.read()
test_defns = re.findall(r"void\n(.*)\(\n\s*planck_unit_test_t \*.*?\n\)", contents)
if not test_defns:
print("Note: {} didn't have any valid test defns".format(filename))
continue
suite_adds = re.findall(r"PLANCK_UNIT_ADD_TO_SUITE\(.*?, (.*?)\);", contents)
if not suite_adds:
print("Note: {} didn't have any valid suite defns".format(filename))
continue
defn_set = set(test_defns)
suite_set = set(suite_adds)
missing_set = defn_set - suite_set
print("[{}]".format(filename))
for missing in missing_set:
print("\t{}".format(missing))
| 30.441176 | 91 | 0.592271 |
1cdee373f46fe7596e75b60fcce7cd397b5c5f46 | 2,647 | py | Python | external/vcm/vcm/calc/histogram.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 1 | 2021-12-14T23:43:35.000Z | 2021-12-14T23:43:35.000Z | external/vcm/vcm/calc/histogram.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 195 | 2021-09-16T05:47:18.000Z | 2022-03-31T22:03:15.000Z | external/vcm/vcm/calc/histogram.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | null | null | null | from typing import Any, Hashable, Mapping, Tuple
import numpy as np
import xarray as xr
def histogram(da: xr.DataArray, **kwargs) -> Tuple[xr.DataArray, xr.DataArray]:
"""Compute histogram and return tuple of counts and bin widths.
Args:
da: input data
kwargs: optional parameters to pass on to np.histogram
Return:
counts, bin_widths tuple of xr.DataArrays. The coordinate of both arrays is
equal to the left side of the histogram bins.
"""
coord_name = f"{da.name}_bins" if da.name is not None else "bins"
count, bins = np.histogram(da, **kwargs)
coords: Mapping[Hashable, Any] = {coord_name: bins[:-1]}
width = bins[1:] - bins[:-1]
width_da = xr.DataArray(width, coords=coords, dims=[coord_name])
count_da = xr.DataArray(count, coords=coords, dims=[coord_name])
if "units" in da.attrs:
count_da[coord_name].attrs["units"] = da.units
width_da[coord_name].attrs["units"] = da.units
width_da.attrs["units"] = da.units
if "long_name" in da.attrs:
count_da[coord_name].attrs["long_name"] = da.long_name
return count_da, width_da
def histogram2d(
x: xr.DataArray, y: xr.DataArray, **kwargs
) -> Tuple[xr.DataArray, xr.DataArray, xr.DataArray]:
"""Compute 2D histogram and return tuple of counts and bin widths.
Args:
x: input data
y: input data
kwargs: optional parameters to pass on to np.histogram
Return:
counts, x_bin_widths, y_bin_widths tuple of xr.DataArrays. The coordinate of all
arrays is equal to the left side of the histogram bins.
"""
xcoord_name = f"{x.name}_bins" if x.name is not None else "xbins"
ycoord_name = f"{y.name}_bins" if y.name is not None else "ybins"
count, xedges, yedges = np.histogram2d(
x.values.ravel(), y.transpose(*x.dims).values.ravel(), **kwargs
)
xcoord: Mapping[Hashable, Any] = {xcoord_name: xedges[:-1]}
ycoord: Mapping[Hashable, Any] = {ycoord_name: yedges[:-1]}
xwidth = xedges[1:] - xedges[:-1]
ywidth = yedges[1:] - yedges[:-1]
xwidth_da = xr.DataArray(xwidth, coords=xcoord, dims=[xcoord_name])
ywidth_da = xr.DataArray(ywidth, coords=ycoord, dims=[ycoord_name])
count_da = xr.DataArray(
count, coords={**xcoord, **ycoord}, dims=[xcoord_name, ycoord_name]
)
if "units" in x.attrs:
xwidth_da[xcoord_name].attrs["units"] = x.units
xwidth_da.attrs["units"] = x.units
if "units" in y.attrs:
ywidth_da[ycoord_name].attrs["units"] = y.units
ywidth_da.attrs["units"] = y.units
return count_da, xwidth_da, ywidth_da
| 38.362319 | 88 | 0.656215 |
9d7253b2b76140f5ec0c964c08163e12d001eaef | 898 | py | Python | PyInstaller/hooks/hook-PIL.py | IASEngineers/pyinstaller | 5a3c8585b1f4147da7ef4d823b8194baddedbb1f | [
"Apache-2.0"
] | 1 | 2020-07-21T15:25:22.000Z | 2020-07-21T15:25:22.000Z | PyInstaller/hooks/hook-PIL.py | IASEngineers/pyinstaller | 5a3c8585b1f4147da7ef4d823b8194baddedbb1f | [
"Apache-2.0"
] | null | null | null | PyInstaller/hooks/hook-PIL.py | IASEngineers/pyinstaller | 5a3c8585b1f4147da7ef4d823b8194baddedbb1f | [
"Apache-2.0"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# This hook was tested with Pillow 2.9.0 (Maintained fork of PIL):
# https://pypi.python.org/pypi/Pillow
# Ignore tkinter to prevent inclusion of Tcl/Tk library and other GUI
# libraries.
# Assume that if people are really using tkinter in their application, they
# will also import it directly and thus PyInstaller bundles the right GUI
# library.
excludedimports = ['tkinter', 'PyQt5']
| 40.818182 | 78 | 0.631403 |
fd18e2ca6ce0992ce83d2ea4f861c40596d885bb | 1,633 | py | Python | tests/_site/myauth/models.py | hlongmore/django-oscar | 21da1301d41597d470d4292678718e37556ee186 | [
"BSD-3-Clause"
] | null | null | null | tests/_site/myauth/models.py | hlongmore/django-oscar | 21da1301d41597d470d4292678718e37556ee186 | [
"BSD-3-Clause"
] | null | null | null | tests/_site/myauth/models.py | hlongmore/django-oscar | 21da1301d41597d470d4292678718e37556ee186 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import re
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core import validators
from django.contrib.auth.models import BaseUserManager
from oscar.apps.customer.abstract_models import AbstractUser
class CustomUserManager(BaseUserManager):
def create_user(self, username, email, password):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=CustomUserManager.normalize_email(email),
username=username,
is_active=True,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password=password)
u.is_admin = True
u.is_staff = True
u.save(using=self._db)
return u
class User(AbstractUser):
"""
Custom user based on Oscar's AbstractUser
"""
username = models.CharField(
_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile(r'^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
extra_field = models.CharField(
_('Nobody needs me'), max_length=5, blank=True)
objects = CustomUserManager()
class Meta:
app_label = 'myauth'
| 28.155172 | 106 | 0.630741 |
74b02368d2191173f12880c22637d1e1343f5c07 | 654 | py | Python | backend/edw/management/commands/rebuild_datamart.py | MMotionMan/django-edw | 0f686429d29e0f40409a3b2318664973b2844c08 | [
"BSD-3-Clause"
] | 4 | 2019-09-18T05:51:12.000Z | 2020-10-23T08:50:00.000Z | backend/edw/management/commands/rebuild_datamart.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 10 | 2020-04-29T11:46:44.000Z | 2022-03-11T23:38:27.000Z | backend/edw/management/commands/rebuild_datamart.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 13 | 2020-04-09T07:49:48.000Z | 2022-03-02T07:06:28.000Z | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
"""
``rebuild_datamart2``
---------------------
``rebuild_datamart2`` rebuilds your mptt pointers. Only use in emergencies.
"""
from django.core.management.base import BaseCommand
from edw.models.data_mart import DataMartModel
class Command(BaseCommand):
help = "Run this manually to rebuild your mptt pointers. Only use in emergencies."
def handle(self, **options):
#print("Rebuilding MPTT pointers for DataMartModel")
DataMartModel._tree_manager.rebuild2()
| 29.727273 | 86 | 0.547401 |
d6ede213b46e64cc5702bc3f1ce660b5f07fefe4 | 116 | py | Python | data_analysis/Matplotlib/11Graficos_a_partir_do_numpy.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | data_analysis/Matplotlib/11Graficos_a_partir_do_numpy.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | data_analysis/Matplotlib/11Graficos_a_partir_do_numpy.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
plt.scatter(np.arange(50), np.random.randn(50))
plt.show()
| 19.333333 | 48 | 0.724138 |
7b0653be2e337d78718d855ba9446c7b9670282b | 833 | py | Python | robot_ws/src/nssc_web_interface/setup.py | NS-Robotics/NSRA2 | 86dbd504ed268fa951c61b010924bea6faff5a43 | [
"BSD-3-Clause"
] | null | null | null | robot_ws/src/nssc_web_interface/setup.py | NS-Robotics/NSRA2 | 86dbd504ed268fa951c61b010924bea6faff5a43 | [
"BSD-3-Clause"
] | null | null | null | robot_ws/src/nssc_web_interface/setup.py | NS-Robotics/NSRA2 | 86dbd504ed268fa951c61b010924bea6faff5a43 | [
"BSD-3-Clause"
] | null | null | null | import os
from glob import glob
from setuptools import setup
package_name = 'nssc_web_interface'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'templates'), glob('templates/*.html')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='ros2',
maintainer_email='noa.sendlhofer@gmail.com',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'nssc_web_interface = nssc_web_interface.nssc_web_interface:main',
],
},
)
| 27.766667 | 85 | 0.642257 |
2737609ffae0ceb0c12acbd5ac914869ae6aff18 | 379 | py | Python | ELAB02/02-11.py | tawanchaiii/01204111_63 | edf1174f287f5174d93729d9b5c940c74d3b6553 | [
"WTFPL"
] | null | null | null | ELAB02/02-11.py | tawanchaiii/01204111_63 | edf1174f287f5174d93729d9b5c940c74d3b6553 | [
"WTFPL"
] | null | null | null | ELAB02/02-11.py | tawanchaiii/01204111_63 | edf1174f287f5174d93729d9b5c940c74d3b6553 | [
"WTFPL"
] | null | null | null | import math
l = [0,0,0,0,0]
l[0] = int(input("Input a: "))
l[1] = int(input("Input b: "))
l[2] = int(input("Input c: "))
l[3] = int(input("Input d: "))
l[4] = int(input("Input e: "))
mean = float((l[0]+l[1]+l[2]+l[3]+l[4])/5)
print(f"mean: {mean:.3f}")
sum = 0
for i in range(5):
sum +=(l[i]-mean)*(l[i]-mean)
want = float(math.sqrt(sum/5))
print(f"sd: {want:.3f}") | 27.071429 | 43 | 0.522427 |
0d51b01fc3373f35d4d6aec22d640550b50ac42a | 5,095 | py | Python | aucc/main.py | FlameOfIgnis/avell-unofficial-control-center | e87652b8b6def1aa2544149735761aab7fa1cc53 | [
"MIT"
] | 1 | 2021-07-07T10:20:10.000Z | 2021-07-07T10:20:10.000Z | aucc/main.py | ignis-sec/avell-unofficial-control-center | e87652b8b6def1aa2544149735761aab7fa1cc53 | [
"MIT"
] | null | null | null | aucc/main.py | ignis-sec/avell-unofficial-control-center | e87652b8b6def1aa2544149735761aab7fa1cc53 | [
"MIT"
] | null | null | null | """
Copyright (c) 2019, Rodrigo Gomes.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 22, 2019
@author: @rodgomesc
"""
import argparse
import sys, os
from aucc.core.handler import DeviceHandler
import time
from aucc.core.colors import (get_mono_color_vector,
get_h_alt_color_vector,
get_v_alt_color_vector,
_colors_available)
# ? ?
# style template: (0x08, 0x02, STYLE_ID, DELAY_TIME, 0x24, 0x08, 0x00, 0x00)
light_style = {
'rainbow': 0x05,
'reactive': 0x04,
'raindrop': 0x0A,
'marquee': 0x09,
'aurora': 0x0E,
'pulse': 0x02,
'wave': 0x03,
'drop': 0x06,
'firework': 0x11
}
# keybpoard brightness have 4 variations 0x08,0x16,0x24,0x32
brightness_map = {
1: 0x08,
2: 0x16,
3: 0x24,
4: 0x32
}
class ControlCenter(DeviceHandler):
def __init__(self, vendor_id, product_id):
super(ControlCenter, self).__init__(vendor_id, product_id)
self.brightness = None
def disable_keyboard(self):
self.ctrl_write(0x08, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
def keyboard_style(self,style,delay, rotation, brightness):
self.ctrl_write(0x08, 0x02, light_style[style], delay, brightness, 0x08, rotation, 0x00)
def adjust_brightness(self, brightness=None):
if brightness:
self.brightness = brightness
self.ctrl_write(0x08, 0x02, 0x33, 0x00,
brightness_map[self.brightness], 0x00, 0x00, 0x00)
else:
self.adjust_brightness(4)
def color_scheme_setup(self):
self.ctrl_write(0x12, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00)
def mono_color_setup(self, color_scheme):
if self.brightness:
self.color_scheme_setup()
color_vector = get_mono_color_vector(color_scheme)
self.bulk_write(times=8, payload=color_vector)
else:
self.adjust_brightness()
self.mono_color_setup(color_scheme)
def h_alt_color_setup(self, color_scheme_a, color_scheme_b):
self.color_scheme_setup()
color_vector = get_h_alt_color_vector(color_scheme_a, color_scheme_b)
self.bulk_write(times=8, payload=color_vector)
def v_alt_color_setup(self, color_scheme_a, color_scheme_b):
self.color_scheme_setup()
color_vector = get_v_alt_color_vector(color_scheme_a, color_scheme_b)
self.bulk_write(times=8, payload=color_vector)
def main():
from elevate import elevate
if not os.geteuid() == 0:
elevate()
control = ControlCenter(vendor_id=0x048d, product_id=0xce00)
parser = argparse.ArgumentParser(
description="Supply at least one of the options [-c|-H|-V|-s|-d]. "
"Colors available: "
"[red|green|blue|teal|pink|purple|white|yellow|orange]")
parser.add_argument('-c', '--color', help='Single color')
parser.add_argument('-b', '--brightness', help='1, 2, 3 or 4')
parser.add_argument('-H', '--h-alt', nargs=2,
help='Horizontal alternating colors')
parser.add_argument('-V', '--v-alt', nargs=2,
help='Vertical alternating colors')
parser.add_argument('-s', '--style',
help='one of (rainbow, reactive, raindrop, marquee, aurora)')
parser.add_argument('-S', '--speed',
help='style speed, only to be used with -s (0-5)')
parser.add_argument('-r', '--rotation',
help='style rotation, only to be used with -s, (1-4)')
parser.add_argument('-sd', '--styleDebug',
help='style byte directly from parameter')
parser.add_argument('-d', '--disable', action='store_true',
help='turn keyboard backlight off'),
parsed = parser.parse_args()
if parsed.disable:
control.disable_keyboard()
if parsed.brightness:
control.adjust_brightness(int(parsed.brightness))
if parsed.color:
control.mono_color_setup(parsed.color)
elif parsed.h_alt:
control.h_alt_color_setup(*parsed.h_alt)
elif parsed.v_alt:
control.v_alt_color_setup(*parsed.v_alt)
elif parsed.style:
speed=3
brightness=0x32
rotation=1
if parsed.speed and int(parsed.speed) <=5 and int(parsed.speed) >=0:
speed=5-int(parsed.speed)
if parsed.rotation and int(parsed.rotation) <=4 and int(parsed.rotation) >=0:
rotation=int(parsed.rotation)
if parsed.brightness and int(parsed.brightness) <=4 and int(parsed.brightness) >=1:
brightness=brightness_map[int(parsed.brightness)]
control.keyboard_style(parsed.style,speed,rotation, brightness)
elif parsed.styleDebug:
control.keyboard_styleDebug(parsed.styleDebug)
else:
print("Invalid or absent command")
if __name__ == "__main__":
main()
| 34.659864 | 96 | 0.627478 |
ca945a1a6c7a4509dcbc0efa32fe9421f372f222 | 429 | py | Python | gammapy/extern/__init__.py | aaguasca/gammapy | b1a4e9dbaeec23b3eaca1c874752e92432920a42 | [
"BSD-3-Clause"
] | null | null | null | gammapy/extern/__init__.py | aaguasca/gammapy | b1a4e9dbaeec23b3eaca1c874752e92432920a42 | [
"BSD-3-Clause"
] | null | null | null | gammapy/extern/__init__.py | aaguasca/gammapy | b1a4e9dbaeec23b3eaca1c874752e92432920a42 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains `extern` code, i.e. code that we just copied
here into the `gammapy.extern` package, because we wanted to use it,
but not have an extra dependency (these are single-file external packages).
* ``xmltodict.py`` for easily converting XML from / to Python dicts
Origin: https://github.com/martinblech/xmltodict/blob/master/xmltodict.py
"""
| 42.9 | 75 | 0.757576 |
de03538555f79009d83e86957b8c66d02c1db359 | 865 | py | Python | Models/PowerPlantsLoad/dummy_data/SPG StandortDaten/model.py | schmocker/Pyjamas | 52a72d6e8b915f77a2194d4e7d53c46d0ec28c17 | [
"MIT"
] | 2 | 2018-05-31T15:02:08.000Z | 2018-07-11T11:02:44.000Z | Models/PowerPlantsLoad/dummy_data/SPG StandortDaten/model.py | schmocker/Pyjamas | 52a72d6e8b915f77a2194d4e7d53c46d0ec28c17 | [
"MIT"
] | null | null | null | Models/PowerPlantsLoad/dummy_data/SPG StandortDaten/model.py | schmocker/Pyjamas | 52a72d6e8b915f77a2194d4e7d53c46d0ec28c17 | [
"MIT"
] | null | null | null | from pyjamas_core import Supermodel
from pyjamas_core.util import Input, Output, Property
import numpy as np
# define the model class and inherit from class "Supermodel"
class Model(Supermodel):
# model constructor
def __init__(self, id, name: str):
# instantiate supermodel
super(Model, self).__init__(id, name)
# define outputs
self.outputs['standorte'] = Output('StandorteSPG')
# define persistent variables
self.kw_data = None
async def func_birth(self):
self.standorte = {"Baden": {"Lat": 47.47256, "Lon": 8.30850},
"Brugg": {"Lat": 47.48420, "Lon": 8.20706},
"Olten": {"Lat": 47.35212, "Lon": 7.90801}}
async def func_peri(self, prep_to_peri=None):
# set output
self.set_output("standorte", self.standorte)
| 26.212121 | 69 | 0.611561 |
0a7165880908fc970966d400ece92e844791b149 | 1,135 | py | Python | asyncapi_schema_pydantic/v2_3_0/stomp_bindings.py | albertnadal/asyncapi-schema-pydantic | 83966bdc11f2d465a10b52cec5ff79d18fa6f5fe | [
"MIT"
] | null | null | null | asyncapi_schema_pydantic/v2_3_0/stomp_bindings.py | albertnadal/asyncapi-schema-pydantic | 83966bdc11f2d465a10b52cec5ff79d18fa6f5fe | [
"MIT"
] | null | null | null | asyncapi_schema_pydantic/v2_3_0/stomp_bindings.py | albertnadal/asyncapi-schema-pydantic | 83966bdc11f2d465a10b52cec5ff79d18fa6f5fe | [
"MIT"
] | null | null | null | from pydantic import BaseModel, Extra
class StompChannelBinding(BaseModel):
"""
This document defines how to describe STOMP-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class StompMessageBinding(BaseModel):
"""
This document defines how to describe STOMP-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class StompOperationBinding(BaseModel):
"""
This document defines how to describe STOMP-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class StompServerBinding(BaseModel):
"""
This document defines how to describe STOMP-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
| 22.7 | 85 | 0.705727 |
deee073b8462bea284029639d15b97d251300e93 | 2,037 | py | Python | data/munge.py | datamade/ihs-price-index | 2ccab93b10277ce75e26196c541f0ee4855ae4cb | [
"MIT"
] | 2 | 2015-12-06T22:52:23.000Z | 2016-09-12T12:43:33.000Z | data/munge.py | datamade/ihs-price-index | 2ccab93b10277ce75e26196c541f0ee4855ae4cb | [
"MIT"
] | 1 | 2015-05-05T14:45:51.000Z | 2015-05-05T21:44:08.000Z | data/munge.py | datamade/ihs-price-index | 2ccab93b10277ce75e26196c541f0ee4855ae4cb | [
"MIT"
] | null | null | null | import csv
file_prefix = "2021_q2_"
with open(file_prefix + 'price_index_by_quarter.csv', 'r') as f:
reader = csv.reader(f)
puma_ids = next(reader)[1:]
puma_ids = [i[1:].zfill(5) for i in puma_ids]
puma_names = next(reader)[1:]
rotated_rows = [[] for i in range(len(puma_ids) + 1)]
for row in reader:
for cell_index, cell in enumerate(row):
rotated_rows[cell_index].append(cell)
quarters = rotated_rows.pop(0)
baseline_index = quarters.index('2000Q1')
changes = []
for row in rotated_rows:
changeified_row = []
for index, cell in enumerate(row):
if index == baseline_index:
changeified_row.append(0)
else:
change = (float(cell) - float(row[baseline_index]))
changeified_row.append(change)
changeified_row = ['%.1f' % float(i) for i in changeified_row]
changes.append(changeified_row)
rows_plus_names = []
for index, row in enumerate(changes):
row = [puma_ids[index], puma_names[index]] + row
rows_plus_names.append(row)
row_mapper = {}
for row in rows_plus_names:
row_mapper[row[0]] = row
rows_with_summary = []
with open(file_prefix + 'price_index_summary.csv', 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
puma_id = row[0][1:].zfill(5)
try:
full_row = row_mapper[puma_id]
# ignore the first 2 columns and chop off the % sign
full_row.extend([i[:-1] for i in row[2:]])
rows_with_summary.append(full_row)
except KeyError:
break
summary_header = ['Change Since 2000',
'Change Peak to Current',
'Change Bottom to Current',
'Year-over-year change',
'Median Sales Price']
header = ['PumaID', 'Name'] + quarters + summary_header
with open('cook_puma_trend_by_quarter.csv', 'w') as outp:
writer = csv.writer(outp)
writer.writerow(header)
writer.writerows(rows_with_summary)
print("done!")
| 26.454545 | 66 | 0.619539 |
2bef3498033e9c3237664648f2b178c301c6914f | 31 | py | Python | starfish/image/__init__.py | joshmoore/starfish | db38168da35f01ad1541f67aebe49a083d12e224 | [
"MIT"
] | 1 | 2018-10-07T03:53:43.000Z | 2018-10-07T03:53:43.000Z | starfish/image/__init__.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | null | null | null | starfish/image/__init__.py | Xiaojieqiu/starfish | 426480fcfeda4b8b1eb9371a818ff20275ac898d | [
"MIT"
] | 1 | 2019-03-12T23:39:55.000Z | 2019-03-12T23:39:55.000Z | from ._stack import ImageStack
| 15.5 | 30 | 0.83871 |
243054f132f7c76494576331f85042a76565a3bf | 3,079 | py | Python | Video_Summarization/helpers/init_helper.py | AlexanderSlav/Automatic-Soccer-Highlights-Generation | 073e95776052034a327a102e5291234983965ad2 | [
"MIT"
] | null | null | null | Video_Summarization/helpers/init_helper.py | AlexanderSlav/Automatic-Soccer-Highlights-Generation | 073e95776052034a327a102e5291234983965ad2 | [
"MIT"
] | 1 | 2021-05-30T06:43:53.000Z | 2021-06-02T16:49:07.000Z | Video_Summarization/helpers/init_helper.py | AlexanderSlav/Automatic-Soccer-Highlights-Generation | 073e95776052034a327a102e5291234983965ad2 | [
"MIT"
] | null | null | null | import argparse
import logging
import random
from pathlib import Path
import numpy as np
import torch
def set_random_seed(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def init_logger(log_dir: str, log_file: str) -> None:
logger = logging.getLogger()
format_str = r'[%(asctime)s] %(message)s'
logging.basicConfig(
level=logging.INFO,
datefmt=r'%Y/%m/%d %H:%M:%S',
format=format_str
)
log_dir = Path(log_dir)
log_dir.mkdir(parents=True, exist_ok=True)
fh = logging.FileHandler(str(log_dir / log_file))
fh.setFormatter(logging.Formatter(format_str))
logger.addHandler(fh)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# model type
parser.add_argument('model', type=str,
choices=('anchor-based', 'anchor-free'))
# resume training from ckpt
parser.add_argument('--saved_ckpt', type=str)
# training & evaluation
parser.add_argument('--device', type=str, default='cuda',
choices=('cuda', 'cpu'))
parser.add_argument('--seed', type=int, default=12345)
parser.add_argument('--splits', type=str, nargs='+', required=True)
parser.add_argument('--max-epoch', type=int, default=300)
parser.add_argument('--model-dir', type=str, default='../models/model')
parser.add_argument('--log-file', type=str, default='log.txt')
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument('--weight-decay', type=float, default=1e-5)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--nms-thresh', type=float, default=0.5)
# common model config
parser.add_argument('--base-model', type=str, default='attention',
choices=['attention', 'lstm', 'linear', 'bilstm',
'gcn'])
parser.add_argument('--num-head', type=int, default=8)
parser.add_argument('--num-feature', type=int, default=1024)
parser.add_argument('--num-hidden', type=int, default=128)
# anchor based
parser.add_argument('--neg-sample-ratio', type=float, default=2.0)
parser.add_argument('--incomplete-sample-ratio', type=float, default=1.0)
parser.add_argument('--pos-iou-thresh', type=float, default=0.6)
parser.add_argument('--neg-iou-thresh', type=float, default=0.0)
parser.add_argument('--incomplete-iou-thresh', type=float, default=0.3)
parser.add_argument('--anchor-scales', type=int, nargs='+',
default=[4, 8, 16, 32])
# anchor free
parser.add_argument('--lambda-ctr', type=float, default=1.0)
parser.add_argument('--cls-loss', type=str, default='focal',
choices=['focal', 'cross-entropy'])
parser.add_argument('--reg-loss', type=str, default='soft-iou',
choices=['soft-iou', 'smooth-l1'])
return parser
def get_arguments() -> argparse.Namespace:
parser = get_parser()
args = parser.parse_args()
return args
| 36.223529 | 77 | 0.639818 |
308e65dea7d89a57767e6362c59586e0bf108e5b | 11,193 | py | Python | splash/download.py | disruptiveplanets/splash | 8ac3ccdb8afde87de2c5b83de5c6768d4353b7e7 | [
"MIT"
] | 2 | 2021-02-05T04:47:33.000Z | 2021-04-25T02:19:18.000Z | splash/download.py | disruptiveplanets/splash | 8ac3ccdb8afde87de2c5b83de5c6768d4353b7e7 | [
"MIT"
] | 9 | 2020-07-22T00:29:44.000Z | 2020-10-12T14:12:01.000Z | splash/download.py | disruptiveplanets/splash | 8ac3ccdb8afde87de2c5b83de5c6768d4353b7e7 | [
"MIT"
] | 1 | 2020-08-01T11:58:41.000Z | 2020-08-01T11:58:41.000Z | import os
import requests
import glob
import numpy as np
from os import path
from tqdm import tqdm
from astropy.io import fits
from astropy.io import fits
from io import BytesIO
from .Functions import GetID
def get_file_from_url(url, user, password):
resp = requests.get(url, auth=(user, password))
return BytesIO(resp.content)
def DownloadData(SpNumber, GAIAID="", user="", password=""):
'''
This function download Artemis data processed using cambridge pipeline from Cambridge server.
Input
----------
SpNumber: string
SPECULOOS target number such as SP0025+5422
GAIAID: integer
GAIA ID corresponding to the SPECULOOS Target
user: string
Username to access the Cambridge data
password: string
password to access the Cambridge data
'''
if "TRAPPIST" in SpNumber:
SpName = "Sp2306-0502"
GAIAID = 2635476908753563008
elif "TOI-736" in SpNumber:
SpName = "TOI-736"
GAIAID = 3562427951852172288
elif not(GAIAID):
SpName=SpNumber
GAIAID = GetID(SpNumber,IdType="SPECULOOS")
else:
SpName=SpNumber
#Construct the path
url = "http://www.mrao.cam.ac.uk/SPECULOOS/speculoos-portal/php/get_observations.php?id=%s&date=&filter=&telescope=" %GAIAID
resp = requests.get(url, auth=(user, password))
assert (
resp.status_code == 200
), "Wrong username or password used to access data, please check it with Peter"
assert (
resp.content != b"null" and resp.content != b"\r\nnull"
), "Your request is not matching any available data in the Cambridge archive. To see available data, please check http://www.mrao.cam.ac.uk/SPECULOOS/portal_v2/"
Content = eval(resp.content)[0]
CompletePath = []
DateValues = []
FilterValues = []
BaseLocation = "http://www.mrao.cam.ac.uk/SPECULOOS"
for Item in Content:
DateValues.append(Item['date'])
FilterValues.append(Item['filter'])
if "ARTEMIS" in Item['telescope'].upper() and int(DateValues[-1])<20200927:
version="v2_01"
else:
version="v2"
ConstructedPath = os.path.join(BaseLocation,Item['telescope'],"output",version,DateValues[-1],SpName,"lightcurves")
CompletePath.append(ConstructedPath)
if len(DateValues)<1:
print("Error downloading the data")
return 0
elif len(DateValues)>300:
print("Too many data file found")
return 0
else:
print("Found %d different nights of observation. Downloading now." %len(DateValues))
#Clean the TempFolder
os.system("rm -rf TempFolder/*")
for Path, Filter, Date in tqdm(zip(CompletePath, FilterValues, DateValues)):
if not(os.path.exists("TempFolder")):
os.system("mkdir TempFolder")
urlGet3 = os.path.join(Path, "%s_%s_%s_3_MCMC" %(GAIAID, Filter, Date))
urlGet4 = os.path.join(Path, "%s_%s_%s_4_MCMC" %(GAIAID, Filter, Date))
urlGet5 = os.path.join(Path, "%s_%s_%s_5_MCMC" %(GAIAID, Filter, Date))
urlGet6 = os.path.join(Path, "%s_%s_%s_6_MCMC" %(GAIAID, Filter, Date))
urlGet7 = os.path.join(Path, "%s_%s_%s_7_MCMC" %(GAIAID, Filter, Date))
urlGet8 = os.path.join(Path, "%s_%s_%s_8_MCMC" %(GAIAID, Filter, Date))
rGET3 = requests.get(urlGet3, auth=(user, password))
rGET4 = requests.get(urlGet4, auth=(user, password))
rGET5 = requests.get(urlGet5, auth=(user, password))
rGET6 = requests.get(urlGet6, auth=(user, password))
rGET7 = requests.get(urlGet7, auth=(user, password))
rGET8 = requests.get(urlGet8, auth=(user, password))
SaveFileName3 = "TempFolder/%s_%s_SPC_ap3.txt" %(str(SpNumber), Date)
SaveFileName4 = "TempFolder/%s_%s_SPC_ap4.txt" %(str(SpNumber), Date)
SaveFileName5 = "TempFolder/%s_%s_SPC_ap5.txt" %(str(SpNumber), Date)
SaveFileName6 = "TempFolder/%s_%s_SPC_ap6.txt" %(str(SpNumber), Date)
SaveFileName7 = "TempFolder/%s_%s_SPC_ap7.txt" %(str(SpNumber), Date)
SaveFileName8 = "TempFolder/%s_%s_SPC_ap8.txt" %(str(SpNumber), Date)
if not("TITLE" in rGET3.text.upper()):
with open(SaveFileName3,'w') as f:
f.write(rGET3.text)
if not("TITLE" in rGET4.text.upper()):
with open(SaveFileName4,'w') as f:
f.write(rGET4.text)
if not("TITLE" in rGET5.text.upper()):
with open(SaveFileName5,'w') as f:
f.write(rGET5.text)
if not("TITLE" in rGET6.text.upper()):
with open(SaveFileName6,'w') as f:
f.write(rGET6.text)
if not("TITLE" in rGET7.text.upper()):
with open(SaveFileName7,'w') as f:
f.write(rGET7.text)
if not("TITLE" in rGET8.text.upper()):
with open(SaveFileName8,'w') as f:
f.write(rGET8.text)
CombineData(SpNumber)
return 1
def CombineData(SpNumber):
'''
Combines the data in the TempFolder when downloaded
'''
Parameters= "BJDMID, FLUX, DX, DY, FWHM, FWHM_X, FWHM_Y, SKY, AIRMASS"
for Aper in range(3,9):
CurrentFileList = glob.glob("TempFolder/*ap%s.txt" %Aper)
if len(CurrentFileList)<1:
continue
AllData = []
for FileItem in CurrentFileList:
try:
DataText = np.genfromtxt(FileItem, skip_header=1)
X,Y = np.shape(DataText)
CurrentData = np.empty((X, 9))
CurrentData[:,0] = DataText[:,1]
except:
continue
CurrentData[:,1] = DataText[:,3]
CurrentData[:,2] = DataText[:,6]
CurrentData[:,3] = DataText[:,7]
CurrentData[:,4] = DataText[:,8]
CurrentData[:,5] = DataText[:,9]
CurrentData[:,6] = DataText[:,10]
CurrentData[:,7] = DataText[:,11]
CurrentData[:,8] = DataText[:,12]
AllData.extend(CurrentData)
AllData = np.array(AllData)
AllTime = AllData[:,0]
AllData = AllData[np.argsort(AllTime)]
if not(os.path.exists("data")):
os.system("mkdir data")
np.savetxt("data/%s_%sAp.txt" %(SpNumber, Aper), AllData, header=Parameters)
os.system("rm -rf TempFolder")
def DownloadFitsData(SpNumber, GAIAID="", user="", password=""):
'''
This function download Artemis data processed using cambridge pipeline from Cambridge server.
Input
----------
SpNumber: string
SPECULOOS target number such as SP0025+5422
user: string
Username to access the Cambridge data
password: string
password to access the Cambridge data
'''
if "TRAPPIST-1" in SpNumber:
SpName = "Sp2306-0502"
GAIAID = 2635476908753563008
elif "TOI-736" in SpNumber:
SpName = "TOI-736"
GAIAID = 3562427951852172288
else:
SpName = SpNumber
GAIAID = GetID(SpNumber,IdType="SPECULOOS")
#Construct the path
url = "http://www.mrao.cam.ac.uk/SPECULOOS/speculoos-portal/php/get_observations.php?id=%s&date=&filter=&telescope=" %GAIAID
resp = requests.get(url, auth=(user, password))
assert (
resp.status_code == 200
), "Wrong username or password used to access data, please check it with Peter"
assert (
resp.content != b"null" and resp.content != b"\r\nnull"
), "Your request is not matching any available data in the Cambridge archive. To see available data, please check http://www.mrao.cam.ac.uk/SPECULOOS/portal_v2/"
print("The value of GAIAID is:", GAIAID)
Content = eval(resp.content)[0]
CompletePath = []
DateValues = []
FilterValues = []
BaseLocation = "http://www.mrao.cam.ac.uk/SPECULOOS"
if len(Content)<1:
print("Error downloading the data")
return 0
for Item in Content:
DateValues.append(Item['date'])
FilterValues.append(Item['filter'])
if "ARTEMIS" in Item['telescope'].upper() and int(DateValues[-1])<20200927:
version="v2_01"
else:
version="v2"
ConstructedPath = os.path.join(BaseLocation,Item['telescope'],"output",version,DateValues[-1],SpName)
CompletePath.append(ConstructedPath)
os.system("rm -rf TempFolder/*")
for Path, Filter, Date in zip(CompletePath, FilterValues, DateValues):
print("Downloading Date:", Date)
if not(os.path.exists("TempFolder")):
os.system("mkdir TempFolder")
if not(os.path.exists("TempFolder/%s" %str(SpName))):
os.system("mkdir TempFolder/%s" %str(SpName))
Catalogue = Path+"/1_initial-catalogue.fits"
urlGet3 = Path+"/%s_%s_%s_3_diff.fits" %(GAIAID, Filter, Date)
urlGet4 = Path+"/%s_%s_%s_4_diff.fits" %(GAIAID, Filter, Date)
urlGet5 = Path+"/%s_%s_%s_5_diff.fits" %(GAIAID, Filter, Date)
urlGet6 = Path+"/%s_%s_%s_6_diff.fits" %(GAIAID, Filter, Date)
urlGet7 = Path+"/%s_%s_%s_7_diff.fits" %(GAIAID, Filter, Date)
urlGet8 = Path+"/%s_%s_%s_8_diff.fits" %(GAIAID, Filter, Date)
rGET3 = requests.get(urlGet3, auth=(user, password))
rGET4 = requests.get(urlGet4, auth=(user, password))
rGET5 = requests.get(urlGet5, auth=(user, password))
rGET6 = requests.get(urlGet6, auth=(user, password))
rGET7 = requests.get(urlGet7, auth=(user, password))
rGET8 = requests.get(urlGet8, auth=(user, password))
SaveFileName0 = "TempFolder/%s/Catalogue.fits" %str(SpNumber)
SaveFileName3 = "TempFolder/%s/%s_%s_SPC_3.fits" %(str(SpNumber), Filter, Date)
SaveFileName4 = "TempFolder/%s/%s_%s_SPC_4.fits" %(str(SpNumber), Filter, Date)
SaveFileName5 = "TempFolder/%s/%s_%s_SPC_5.fits" %(str(SpNumber), Filter, Date)
SaveFileName6 = "TempFolder/%s/%s_%s_SPC_6.fits" %(str(SpNumber), Filter, Date)
SaveFileName7 = "TempFolder/%s/%s_%s_SPC_7.fits" %(str(SpNumber), Filter, Date)
SaveFileName8 = "TempFolder/%s/%s_%s_SPC_8.fits" %(str(SpNumber), Filter, Date)
if not(os.path.exists(SaveFileName0)):
rGETCat = requests.get(Catalogue, auth=(user, password))
with open(SaveFileName0,'w') as f:
if len(rGETCat.text)>200:
f.write(rGETCat.text)
with open(SaveFileName3,'w') as f:
if len(rGET3.text)>200:
f.write(rGET3.text)
with open(SaveFileName4,'w') as f:
if len(rGET4.text)>200:
f.write(rGET4.text)
with open(SaveFileName5,'w') as f:
if len(rGET5.text)>200:
f.write(rGET5.text)
with open(SaveFileName6,'w') as f:
if len(rGET6.text)>200:
f.write(rGET6.text)
with open(SaveFileName7,'w') as f:
if len(rGET7.text)>200:
f.write(rGET7.text)
with open(SaveFileName8,'w') as f:
if len(rGET8.text)>200:
f.write(rGET8.text)
print("data Saved File for %s" %Date)
| 34.869159 | 165 | 0.605736 |
0c8e0123894cf9a73fd6b0855d5fb2dc054f9682 | 7,738 | py | Python | shareseq.py | YichaoOU/SHARE_seq_pipeline | d7e142d35aa90c33d3c53f2919f4f04862392935 | [
"MIT"
] | null | null | null | shareseq.py | YichaoOU/SHARE_seq_pipeline | d7e142d35aa90c33d3c53f2919f4f04862392935 | [
"MIT"
] | null | null | null | shareseq.py | YichaoOU/SHARE_seq_pipeline | d7e142d35aa90c33d3c53f2919f4f04862392935 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import argparse
import logging
import Colorer
import pandas as pd
import subprocess
import yaml
import datetime
import getpass
from utils import collision_boxplot
"""
main script to stitch together share-seq analysis pipeline
"""
username = getpass.getuser()
current_file_base_name = __file__.split("/")[-1].split(".")[0]
def my_args():
mainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
mainParser.add_argument('-j',"--jid", help="enter a job ID, which is used to make a new directory. Every output will be moved into this folder.", default=current_file_base_name+'_'+username+"_"+str(datetime.date.today()))
mainParser.add_argument('-f1',"--sample_barcode", help="input config file,tsv: label, sample_barcode, ATAC/RNA", required=True)
mainParser.add_argument('-f2',"--cell_barcode", help="a list of barcode sequences", required=True)
mainParser.add_argument('-r1', help="input undetermined R1 fastq.gz", required=True)
mainParser.add_argument('-r2', help="input undetermined R2 fastq.gz", required=True)
# mainParser.add_argument('-r2', help="input undetermined R2 fastq.gz", required=True)
mainParser.add_argument('-n',"--num_mismatch", help="number of mismatch allowed", default=1,type=int)
mainParser.add_argument("--collision_threshold", help="max mapping rate as collision", default=0.8,type=float)
mainParser.add_argument("--min_reads_per_cell", help="minimal number of reads per cell", default=100,type=float)
mainParser.add_argument("--collision", help="map to hybrid genome and calculate collision rate",action='store_true')
mainParser.add_argument("--filter_polyT", help="polyT reads may not be noise",action='store_true')
genome=mainParser.add_argument_group(title='Genome Info')
genome.add_argument('-g','--genome', help="genome version, must match key in genome config yaml file", default='hg38',type=str)
genome.add_argument('--genome_config', help="genome config file specifing: index file, black list, chrom size and effectiveGenomeSize", default='genome.yaml',type=str)
##------- add parameters above ---------------------
args = mainParser.parse_args()
return args
def main():
args = my_args()
with open(args.genome_config, 'r') as f:
genome = yaml.load(f,Loader=yaml.FullLoader)
# genome = parse_yaml(args.genome_config)
src="/home/yli11/Tools/SHARE_seq_pipeline"
# print (genome)
df = pd.read_csv(args.sample_barcode,sep="\t",header=None)
# step 1: demultiplexing
command = f"{src}/share_seq_step1_demultiplex.py -r1 {args.r1} -r2 {args.r2} -b {args.sample_barcode} -n {args.num_mismatch}"
logging.info("Running sample demultiplexing...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
# exit()
# step 2: rename fastq, add cell barcode to read name, proper format for UMI-tools
for label,_,data_type in df.values:
command = f"{src}/share_seq_step2_rename_fastq.py -r1 {label}.R1.fastq.gz -r2 {label}.R2.fastq.gz --sample_ID {label} --barcode_list {args.cell_barcode} --error {args.num_mismatch} --revcomp"
logging.info(f"Reformatting fastq read name for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
# step 3: extract UMI and match polyT for RNA-seq
for label,_,data_type in df.values:
if str(data_type).upper() != "RNA":
continue
command = f"umi_tools extract --bc-pattern=NNNNNNNNNN --stdin {label}.matched.R2.fastq.gz --stdout {label}.matched.R2.extract --read2-in {label}.matched.R1.fastq.gz --read2-out {label}.matched.R1.extract"
logging.info(f"UMI-tools extract UMI for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
if args.filter_polyT:
command = f"cutadapt --overlap 6 -G ^TTTTTT --no-trim --untrimmed-output {label}.noPolyT.R1.fastq.gz --untrimmed-paired-output {label}.noPolyT.R2.fastq.gz -e 0.2 -o {label}.matched.R1.fastq.gz -p {label}.matched.R2.fastq.gz {label}.matched.R1.extract {label}.matched.R2.extract;rm {label}.matched.R1.extract {label}.matched.R2.extract"
logging.info(f"cutadapt match polyT for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
else:
command = f"mv {label}.matched.R2.extract {label}.matched.R2.fastq;gzip {label}.matched.R2.fastq;mv {label}.matched.R1.extract {label}.matched.R1.fastq;gzip {label}.matched.R1.fastq"
logging.info(f"User choose not to filter reads based on polyT (this is default). Renaming fastq files...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
# exit()
# step 4: if collision mode, map to hybrid genome
if args.collision:
for label,_,data_type in df.values:
if str(data_type).upper() == "RNA":
command = f"{src}/STAR_mapping.sh {genome['hybrid']['STAR']} {label}.matched.R1.fastq.gz {label}.matched.R2.fastq.gz {label} {genome['hybrid']['gtf']}"
logging.info(f"STAR mapping for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
if str(data_type).upper() == "ATAC":
command = f"{src}/BWA_mapping.sh {genome['hybrid']['BWA']} {label}.matched.R1.fastq.gz {label}.matched.R2.fastq.gz {label}"
logging.info(f"BWA mapping for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
command = f"{src}/step4_calculate_collision_rate_hybrid.py --table {label}.total_number_reads.tsv --reads {label}.R1.bed --threshold {args.collision_threshold}"
logging.info(f"step4_calculate_collision_rate_hybrid for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
command = f"module load R/3.5.1;Rscript {src}/draw_collision_figure.R {label}.for_collision_plot.tsv {label}_collision.pdf {args.collision_threshold}"
logging.info(f"draw_collision_figure for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
collision_boxplot(f"{label}.for_collision_plot.tsv",label,cutoff=args.min_reads_per_cell,threshold=args.collision_threshold)
exit()
# step 5: using HemTools map the final fastq to the genome by BWA or STAR
for label,_,data_type in df.values:
if str(data_type).upper() == "RNA":
command = f"{src}/STAR_mapping.sh {genome[args.genome]['STAR']} {label}.matched.R1.fastq.gz {label}.matched.R2.fastq.gz {label} {genome[args.genome]['gtf']} {genome[args.genome]['rseqc_bed']}"
logging.info(f"STAR mapping for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
if str(data_type).upper() == "ATAC":
command = f"{src}/BWA_mapping.sh {genome[args.genome]['BWA']} {label}.matched.R1.fastq.gz {label}.matched.R2.fastq.gz {label} {genome[args.genome]['rseqc_bed']} {args.genome}"
logging.info(f"BWA mapping for {label} ...")
logging.info(command)
subprocess.call(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
# output Organization
os.system(f"mkdir -p {args.jid}")
for label,_,data_type in df.values:
os.system(f"mkdir {args.jid}/{label};mv {label}* {args.jid}/{label}/")
if __name__ == "__main__":
main()
| 46.059524 | 339 | 0.71543 |
6d401b9a0ff7c132c503be5a8596db98977a6649 | 1,500 | py | Python | PaddleNLP/examples/language_model/rnnlm/args.py | weiwei1115/models | e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3 | [
"Apache-2.0"
] | 1 | 2021-02-24T14:03:55.000Z | 2021-02-24T14:03:55.000Z | PaddleNLP/examples/language_model/rnnlm/args.py | weiwei1115/models | e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3 | [
"Apache-2.0"
] | null | null | null | PaddleNLP/examples/language_model/rnnlm/args.py | weiwei1115/models | e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3 | [
"Apache-2.0"
] | 1 | 2021-06-09T01:50:13.000Z | 2021-06-09T01:50:13.000Z | import argparse
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--data_path",
type=str,
default=None,
help="all the data for train,valid,test")
parser.add_argument('--batch_size', type=int, default=20, help='batch size')
parser.add_argument(
'--hidden_size', type=int, default=650, help='hidden_size')
parser.add_argument('--num_steps', type=int, default=35, help='num steps')
parser.add_argument('--num_layers', type=int, default=2, help='num_layers')
parser.add_argument(
'--max_grad_norm', type=float, default=5.0, help='max grad norm')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout')
parser.add_argument(
'--epoch_start_decay', type=int, default=6, help='epoch_start_decay')
parser.add_argument('--max_epoch', type=int, default=39, help='max_epoch')
parser.add_argument('--lr_decay', type=float, default=0.8, help='lr_decay')
parser.add_argument('--base_lr', type=float, default=1.0, help='base_lr')
parser.add_argument(
'--init_scale', type=float, default=0.05, help='init_scale')
parser.add_argument(
"--init_from_ckpt",
type=str,
default=None,
help="The path of checkpoint to be loaded.")
parser.add_argument(
"--n_gpu",
type=int,
default=1,
help="number of gpus to use, 0 for cpu.")
args = parser.parse_args()
return args
| 39.473684 | 80 | 0.649333 |
05f1968bbdb6de0ef4483e930a395162959b2f45 | 1,074 | py | Python | checking/unittest_coverage.py | jwhonce/container-check | 8c0751359994d93025cd996cec0633ec7a330bd7 | [
"Apache-2.0"
] | null | null | null | checking/unittest_coverage.py | jwhonce/container-check | 8c0751359994d93025cd996cec0633ec7a330bd7 | [
"Apache-2.0"
] | null | null | null | checking/unittest_coverage.py | jwhonce/container-check | 8c0751359994d93025cd996cec0633ec7a330bd7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import unittest
import coverage
import coverage.version
OPTIONS = {
'data_file': '.coverage',
'branch': True,
'include': [
'checking/*',
'container_check',
],
'omit': [
'*.sh',
'*/test_*.py',
]
}
if __name__ == '__main__':
"""Run unit tests with coverage."""
cov = coverage.coverage(**OPTIONS)
try:
cov.start()
suite = unittest.TestLoader().discover('checking')
result = unittest.TextTestRunner(verbosity=1).run(suite)
if result.wasSuccessful():
cov.stop()
print('\nCoverage Summary:\n')
cov.report(show_missing=True)
# writing html report is optional depending on /host mounts
try:
covdir = '/host/var/tmp/coverage'
cov.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
except IOError:
pass
sys.exit(0)
finally:
cov.erase()
sys.exit(1)
| 22.851064 | 71 | 0.537244 |
5498611438e237d62ee0df192447c6b38ddaee21 | 5,762 | py | Python | seleniumwire/thirdparty/mitmproxy/contrib/kaitaistruct/tls_client_hello.py | KozminMoci/selenium-wire | 063c44ab42ac5e53e28c8a8c49c9ae7036bd878b | [
"MIT"
] | 975 | 2018-06-23T10:50:42.000Z | 2022-03-31T00:56:03.000Z | seleniumwire/thirdparty/mitmproxy/contrib/kaitaistruct/tls_client_hello.py | KozminMoci/selenium-wire | 063c44ab42ac5e53e28c8a8c49c9ae7036bd878b | [
"MIT"
] | 492 | 2018-07-30T12:49:51.000Z | 2022-03-31T12:46:56.000Z | seleniumwire/thirdparty/mitmproxy/contrib/kaitaistruct/tls_client_hello.py | KozminMoci/selenium-wire | 063c44ab42ac5e53e28c8a8c49c9ae7036bd878b | [
"MIT"
] | 149 | 2018-08-29T06:53:12.000Z | 2022-03-31T09:23:56.000Z | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import array
import struct
import zlib
from enum import Enum
from kaitaistruct import BytesIO, KaitaiStream, KaitaiStruct
from kaitaistruct import __version__ as ks_version
from pkg_resources import parse_version
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class TlsClientHello(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.version = self._root.Version(self._io, self, self._root)
self.random = self._root.Random(self._io, self, self._root)
self.session_id = self._root.SessionId(self._io, self, self._root)
self.cipher_suites = self._root.CipherSuites(self._io, self, self._root)
self.compression_methods = self._root.CompressionMethods(self._io, self, self._root)
if self._io.is_eof() == True:
self.extensions = [None] * (0)
for i in range(0):
self.extensions[i] = self._io.read_bytes(0)
if self._io.is_eof() == False:
self.extensions = self._root.Extensions(self._io, self, self._root)
class ServerName(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.name_type = self._io.read_u1()
self.length = self._io.read_u2be()
self.host_name = self._io.read_bytes(self.length)
class Random(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.gmt_unix_time = self._io.read_u4be()
self.random = self._io.read_bytes(28)
class SessionId(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.len = self._io.read_u1()
self.sid = self._io.read_bytes(self.len)
class Sni(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.list_length = self._io.read_u2be()
self.server_names = []
while not self._io.is_eof():
self.server_names.append(self._root.ServerName(self._io, self, self._root))
class CipherSuites(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.len = self._io.read_u2be()
self.cipher_suites = [None] * (self.len // 2)
for i in range(self.len // 2):
self.cipher_suites[i] = self._io.read_u2be()
class CompressionMethods(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.len = self._io.read_u1()
self.compression_methods = self._io.read_bytes(self.len)
class Alpn(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.ext_len = self._io.read_u2be()
self.alpn_protocols = []
while not self._io.is_eof():
self.alpn_protocols.append(self._root.Protocol(self._io, self, self._root))
class Extensions(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.len = self._io.read_u2be()
self.extensions = []
while not self._io.is_eof():
self.extensions.append(self._root.Extension(self._io, self, self._root))
class Version(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.major = self._io.read_u1()
self.minor = self._io.read_u1()
class Protocol(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.strlen = self._io.read_u1()
self.name = self._io.read_bytes(self.strlen)
class Extension(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.type = self._io.read_u2be()
self.len = self._io.read_u2be()
_on = self.type
if _on == 0:
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.Sni(io, self, self._root)
elif _on == 16:
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.Alpn(io, self, self._root)
else:
self.body = self._io.read_bytes(self.len)
| 40.865248 | 118 | 0.598924 |
10bcbfb2897d145ce356bfb4bf240893b92d69a5 | 3,417 | py | Python | history_of_changes/callback2-for-MTA-ent_extMidtown-Map-works.py | kakun45/MTADashVisualization | 7edac4de650c54671356a11ac91d1c1f477d33bc | [
"MIT"
] | null | null | null | history_of_changes/callback2-for-MTA-ent_extMidtown-Map-works.py | kakun45/MTADashVisualization | 7edac4de650c54671356a11ac91d1c1f477d33bc | [
"MIT"
] | null | null | null | history_of_changes/callback2-for-MTA-ent_extMidtown-Map-works.py | kakun45/MTADashVisualization | 7edac4de650c54671356a11ac91d1c1f477d33bc | [
"MIT"
] | null | null | null | import dash
import numpy as np
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
import folium
# get help from installed module:
# in terminal
# import dash_html_components as html
# print(help(html.Div))
# load a file
df = pd.read_csv("_59THST_midtown_timebins_sum_grater10_sort.csv")
# Объекты Dash Slider имеют дело только с int/ float,
# поэтому вам нужно сначала преобразовать даты int,
# такие как метки времени Unix / epoch, а затем
# сопоставить метки с любым strформатом даты, который вы пожелаете.
# df['epoch_dt'] = df['TIME'].astype(np.int64) // 1e9
# launch app
app = dash.Dash()
# from https://dash.plot.ly/dash-core-components/dropdown
# Crate a dash layout that con tains a Graph component
hour_options = []
for hour in df["HODBIN2"].unique():
hour_options.append({"label": str(hour), "value": hour})
daymap = {
"Sunday": 0,
"Monday": 1,
"Tuesday": 2,
"Wednesdays": 3,
"Thursday": 4,
"Friday": 5,
"Saturday": 6,
}
weekdays = list(df["WEEKDAY"])
app.layout = html.Div(
[
html.H1("Midtown, NYC: MTA Entries vs Exsits 10/12/2019 - 10/18/2019"),
html.Iframe(
id="map",
srcDoc=open("Map4.html", "r").read(),
width="100%",
height="300",
style={"border": "none"},
),
dcc.Graph(id="graph"),
dcc.Slider(
id="hour-slider",
min=df["HODBIN2"].min(),
max=df["HODBIN2"].max(),
value=df["HODBIN2"].min(),
marks={
str(time): str(time) + " o'clock" for time in df["HODBIN2"].unique()
},
step=None,
),
]
)
@app.callback(Output("graph", "figure"), [Input("hour-slider", "value")])
def update_figure(selected_time):
filtered_df = df[df["HODBIN2"] == selected_time]
traces = []
for station_name in filtered_df["station_id"].unique():
df_by_station = filtered_df[filtered_df["station_id"] == station_name]
traces.append(
go.Scatter(
y=df_by_station["ENTRIES_diff_sum"],
x=df_by_station["WEEKDAY"],
text=df_by_station["HODBIN2"],
mode="lines+markers",
opacity=0.8,
marker={"size": 7, "line": {"width": 0.5, "color": "blue"}},
name=station_name + " (ENTRIES)",
)
)
for station_name in filtered_df["station_id"].unique():
df_by_station = filtered_df[filtered_df["station_id"] == station_name]
traces.append(
go.Scatter( # maybe use Bar
y=df_by_station["EXITS_diff_sum"],
x=df_by_station["WEEKDAY"],
text=df_by_station["HODBIN2"],
mode="lines",
opacity=0.6,
marker={"size": 9, "line": {"width": 0.5, "color": "red"}},
name=station_name + " (EXITS)",
)
)
return {
"data": traces,
"layout": go.Layout(
yaxis={
"type": "log",
"title": "Midtown: Number of People through Entries & Exits",
},
xaxis={"title": "Weekday"},
hovermode="closest",
),
}
if __name__ == "__main__":
app.run_server(debug=True)
| 29.205128 | 84 | 0.558092 |
1bb889e69c910f14533c7281feee69da1b72328f | 2,100 | py | Python | glue_vispy_viewers/extern/vispy/visuals/spectrogram.py | jzuhone/glue-vispy-viewers | d940705f4ba95f8d7a9a74d37fb68c71080b490a | [
"BSD-2-Clause"
] | 3 | 2018-05-09T17:55:53.000Z | 2019-07-22T09:14:41.000Z | glue_vispy_viewers/extern/vispy/visuals/spectrogram.py | jzuhone/glue-vispy-viewers | d940705f4ba95f8d7a9a74d37fb68c71080b490a | [
"BSD-2-Clause"
] | 19 | 2015-06-16T14:33:22.000Z | 2015-07-27T21:18:15.000Z | graphViz/vispy/visuals/spectrogram.py | onecklam/ethereum-graphviz | 6993accf0cb85e23013bf7ae6b04145724a6dbd2 | [
"Apache-2.0"
] | 1 | 2017-09-29T01:24:47.000Z | 2017-09-29T01:24:47.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from .image import ImageVisual
from ..util.fourier import stft, fft_freqs
from ..ext.six import string_types
class SpectrogramVisual(ImageVisual):
"""Calculate and show a spectrogram
Parameters
----------
x : array-like
1D signal to operate on. ``If len(x) < n_fft``, x will be
zero-padded to length ``n_fft``.
n_fft : int
Number of FFT points. Much faster for powers of two.
step : int | None
Step size between calculations. If None, ``n_fft // 2``
will be used.
fs : float
The sample rate of the data.
window : str | None
Window function to use. Can be ``'hann'`` for Hann window, or None
for no windowing.
color_scale : {'linear', 'log'}
Scale to apply to the result of the STFT.
``'log'`` will use ``10 * log10(power)``.
cmap : str
Colormap name.
clim : str | tuple
Colormap limits. Should be ``'auto'`` or a two-element tuple of
min and max values.
"""
def __init__(self, x, n_fft=256, step=None, fs=1., window='hann',
color_scale='log', cmap='cubehelix', clim='auto'):
self._n_fft = int(n_fft)
self._fs = float(fs)
if not isinstance(color_scale, string_types) or \
color_scale not in ('log', 'linear'):
raise ValueError('color_scale must be "linear" or "log"')
data = stft(x, self._n_fft, step, self._fs, window)
data = np.abs(data)
data = 20 * np.log10(data) if color_scale == 'log' else data
super(SpectrogramVisual, self).__init__(data, clim=clim, cmap=cmap)
@property
def freqs(self):
"""The spectrogram frequencies"""
return fft_freqs(self._n_fft, self._fs)
| 36.842105 | 79 | 0.561905 |
56a17950e89d3f9f213ebc9414d520df9373dff2 | 6,431 | py | Python | boilerplate/mail.py | ikcam/django-boilerplate | d8253665d74f0f18cf9a5fd46772598a60f20c5c | [
"Apache-2.0"
] | 5 | 2016-10-02T04:57:10.000Z | 2019-08-12T22:22:39.000Z | boilerplate/mail.py | ikcam/django-boilerplate | d8253665d74f0f18cf9a5fd46772598a60f20c5c | [
"Apache-2.0"
] | null | null | null | boilerplate/mail.py | ikcam/django-boilerplate | d8253665d74f0f18cf9a5fd46772598a60f20c5c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.utils.text import slugify
from django.utils import translation
class SendEmail(object):
"""
Send an emails or several emails easily.
**Example**
::
email = SendEmail(
to='user@example.com',
template_name_suffix='recover-account',
subject='Recover your account',
is_html=True
)
email.set_from_email('no-reply@example.com')
email.set_from_name('No Reply')
email.set_language('es')
email.add_context_data('protocol', 'https')
email.add_context_data('domain', 'example.com')
email.add_context_data('uid', uid)
email.add_context_data('token', token)
email.add_context_data('object', user)
email.add_context_data('site_name', 'Boilerplate - Make it easy')
email.send()
"""
context_data = {}
files = list()
from_name = None
from_email = None
language = None
headers = {}
template_name = None
content = None
def __init__(
self, to, subject, is_html=False, template_name_suffix=None,
content=None
):
if isinstance(to, list):
self.to = to
else:
self.to = list()
self.to.append(to)
self.content = content
self.template_name_suffix = template_name_suffix
self.subject = subject
self.is_html = is_html
def add_file(self, file):
self.files.append(file)
def add_context_data(self, key, value):
"""
Add a key-value to the context data of the email
**Parameters**:
:key: A valid key name
:value: Can be an object or any kind of value
"""
self.context_data.update({
str(key): value
})
def add_header(self, key, value):
"""
Add a key-value to the header of the email
**Parameters**:
:key: A valid key name
:value: Can be an object or any kind of value
"""
self.headers.update({
str(key): value
})
def set_from_email(self, from_email=None):
"""
Set the email sender
**Parameters**:
:from_email: String, a valid email
"""
if from_email:
self.from_email = str(from_email)
else:
self.from_email = settings.SERVER_EMAIL
def set_from_name(self, from_name=None):
"""
Set the name sender
**Parameters**:
:from_name: String, a valid name
"""
if from_name:
self.from_name = str(from_name)
else:
self.from_name = settings.SERVER_EMAIL
def set_language(self, language):
"""
Set the name sender
**Parameters**:
:language: String, a valid value
"""
language = slugify(language)
if language not in dict(settings.LANGUAGES):
raise Exception("Invalid language.")
self.language = language
def set_template_name_suffix(self, template_name_suffix=None):
"""
Set the email template name suffix
**Parameters**:
:template_name_suffix: String: the name of the template without the
extension
"""
if template_name_suffix:
self.template_name_suffix = str(template_name_suffix)
def set_template_name(self, template_name=None):
"""
Set the email template name
**Parameters**:
:template_name: String: the name of the template without the
extension
"""
if template_name:
self.template_name = template_name
else:
if not self.template_name_suffix:
self.set_template_name_suffix()
if self.template_name_suffix:
self.template_name = 'mail/' + self.template_name_suffix
else:
self.template_name = None
def get_content(self):
return self.content
def get_from_email(self):
if not self.from_email:
self.set_from_email()
return self.from_email
def get_from_name(self):
if not self.from_name:
self.set_from_name()
return self.from_name
def get_template_name_suffix(self):
if not self.template_name_suffix:
self.set_template_name_suffix()
return self.template_name_suffix
def get_template_name(self):
if not self.template_name:
self.set_template_name()
return self.template_name
def get_context_data(self, **kwargs):
self.context_data.update({
'email': self
})
return self.context_data
def send(self, fail_silently=True, test=False):
template_name = self.get_template_name()
content = self.get_content()
if not template_name and not content:
raise Exception(
"You need to set the `template_name_suffix` or `content`."
)
if self.language:
translation.activate(self.language)
if template_name:
plain_template = get_template(template_name + '.txt')
plain_content = plain_template.render(self.get_context_data())
if self.is_html:
html_template = get_template(template_name + '.html')
html_content = html_template.render(self.get_context_data())
elif content:
plain_content = content
if self.is_html:
html_content = content
if test:
return plain_content
msg = EmailMultiAlternatives(
subject=self.subject,
body=plain_content,
from_email='%s <%s>' % (
self.get_from_name(), self.get_from_email()
),
to=self.to,
headers=self.headers,
)
if self.is_html:
msg.attach_alternative(html_content, 'text/html')
if self.files:
for name, file_content, content_type in self.files:
msg.attach(name, file_content, content_type)
return msg.send(fail_silently=fail_silently)
| 27.600858 | 79 | 0.582336 |
52f4cdf6eac797b36019bf2e64955a6f08acf931 | 441 | py | Python | setup.py | grundprinzip/pyxplorer | 34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2 | [
"BSD-2-Clause"
] | 10 | 2015-01-10T00:54:56.000Z | 2021-08-14T17:01:37.000Z | setup.py | grundprinzip/pyxplorer | 34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2 | [
"BSD-2-Clause"
] | 1 | 2016-09-08T15:09:52.000Z | 2016-09-08T15:09:52.000Z | setup.py | grundprinzip/pyxplorer | 34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2 | [
"BSD-2-Clause"
] | 1 | 2015-04-24T19:21:03.000Z | 2015-04-24T19:21:03.000Z | from distutils.core import setup
setup(
name='pyxplorer',
version='0.1.0',
author='Martin Grund',
author_email='grundprinzip+pip@gmail.com',
packages=['pyxplorer'],
url='http://github.com/grundprinzip/pyxplorer',
license='LICENSE',
description='Simple Big Data Profiling',
long_description=open('README.rst').read(),
install_requires=[
"snakebite",
"pyhs2",
"pandas"
],
)
| 23.210526 | 51 | 0.630385 |
3054667af2bdade775bc12dbbadc8b999f2adab4 | 1,662 | py | Python | atom/FileZillaPortable_feed.py | click3/SubProjects | bda2a7124c4d5d34576610154793504a61790ca0 | [
"BSL-1.0"
] | null | null | null | atom/FileZillaPortable_feed.py | click3/SubProjects | bda2a7124c4d5d34576610154793504a61790ca0 | [
"BSL-1.0"
] | null | null | null | atom/FileZillaPortable_feed.py | click3/SubProjects | bda2a7124c4d5d34576610154793504a61790ca0 | [
"BSL-1.0"
] | null | null | null | #!/usr/bin/python3.2
# -*- coding: utf-8 -*-
from FeedUpdate import FeedUpdate, FeedUpdateData
import re
import datetime
class FileZillaPortableData(FeedUpdateData):
def getCheckUrl():
return 'http://portableapps.com/apps/internet/filezilla_portable'
def __init__(self):
super().__init__()
self.__updateExist = False
self.__title = ''
self.__url = ''
self.__isError = False
def setFeed(self, feed):
super().setFeed(feed)
def setBody(self, body):
super().setBody(body)
assert(isinstance(body, str))
p = re.compile(r'<strong>Version ((?:\d+\.?)+)</strong>', re.DOTALL)
result = p.search(body)
if (result == None):
self.__isError = True
return
version = result.group(1)
title = self.__class__.__name__.split("Data")[0] + version
entrys = super().getFeed().getEntry()
if (entrys[len(entrys)-1]['title'] == title):
return
self.__updateExist = True
self.__title = title
self.__url = self.__class__.getCheckUrl()
def updateExist(self):
return self.__updateExist
def getTitle(self):
return self.__title
def getUrl(self):
return self.__url
def getSummary(self):
return ""
def getUpdated(self):
return datetime.datetime.utcnow()
def isError(self):
return self.__isError
def main():
return FeedUpdate(__file__, 'http://portableapps.com/apps/internet/filezilla_portable').run()
if __name__ == '__main__':
main()
| 25.96875 | 98 | 0.582431 |
a434b25eea998b9c5edecc3c77be6f17f57b7f71 | 1,537 | py | Python | tgtypes/models/chat_permissions.py | autogram/tgtypes | 90f8d0d35d3c372767508e56c20777635e128e38 | [
"MIT"
] | null | null | null | tgtypes/models/chat_permissions.py | autogram/tgtypes | 90f8d0d35d3c372767508e56c20777635e128e38 | [
"MIT"
] | null | null | null | tgtypes/models/chat_permissions.py | autogram/tgtypes | 90f8d0d35d3c372767508e56c20777635e128e38 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Optional
from ._base import MutableTelegramObject
class ChatPermissions(MutableTelegramObject):
"""
Describes actions that a non-administrator user is allowed to take in a chat.
Source: https://core.telegram.org/bots/api#chatpermissions
"""
can_send_messages: Optional[bool] = None
"""True, if the user is allowed to send text messages, contacts, locations and venues"""
can_send_media_messages: Optional[bool] = None
"""True, if the user is allowed to send audios, documents, photos, videos, video notes and
voice notes, implies can_send_messages"""
can_send_polls: Optional[bool] = None
"""True, if the user is allowed to send polls, implies can_send_messages"""
can_send_other_messages: Optional[bool] = None
"""True, if the user is allowed to send animations, games, stickers and use inline bots,
implies can_send_media_messages"""
can_add_web_page_previews: Optional[bool] = None
"""True, if the user is allowed to add web page previews to their messages, implies
can_send_media_messages"""
can_change_info: Optional[bool] = None
"""True, if the user is allowed to change the chat title, photo and other settings. Ignored in
public supergroups"""
can_invite_users: Optional[bool] = None
"""True, if the user is allowed to invite new users to the chat"""
can_pin_messages: Optional[bool] = None
"""True, if the user is allowed to pin messages. Ignored in public supergroups"""
| 43.914286 | 98 | 0.732596 |
e46b23aa7e7f7893bfc633cb2b315f2978165abc | 915 | py | Python | test.py | ning1875/falcon-dashboard | c04e625c49358a278d5f1663a8055627eeb56334 | [
"Apache-2.0"
] | 1 | 2020-07-09T00:40:42.000Z | 2020-07-09T00:40:42.000Z | test.py | ning1875/falcon-dashboard | c04e625c49358a278d5f1663a8055627eeb56334 | [
"Apache-2.0"
] | null | null | null | test.py | ning1875/falcon-dashboard | c04e625c49358a278d5f1663a8055627eeb56334 | [
"Apache-2.0"
] | null | null | null | #-*- coding:utf-8 -*-
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
base_dir = os.path.dirname(os.path.abspath(__file__))
activate_this = '%s/env/bin/activate_this.py' % base_dir
execfile(activate_this, dict(__file__=activate_this))
import sys
sys.path.insert(0, base_dir)
from rrd import app
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8081, debug=True) | 32.678571 | 74 | 0.746448 |
2c5c503d35db26c03d9e2b33d42d97ef9240c8f7 | 15,806 | py | Python | ds4ml/utils.py | Mykrass/data-synthesis-for-machine-learning | df56959424f73d7797b70dcbf9cc5105a2a2365f | [
"Apache-2.0"
] | 1 | 2020-12-17T09:21:07.000Z | 2020-12-17T09:21:07.000Z | ds4ml/utils.py | Mykrass/data-synthesis-for-machine-learning | df56959424f73d7797b70dcbf9cc5105a2a2365f | [
"Apache-2.0"
] | null | null | null | ds4ml/utils.py | Mykrass/data-synthesis-for-machine-learning | df56959424f73d7797b70dcbf9cc5105a2a2365f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Utility functions for data synthesis. Including:
input/output,
machine learning,
...
"""
import argparse
import csv
import logging
import numpy as np
import os
import hashlib
from string import ascii_lowercase
from pandas import Series, DataFrame
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------
# Utilities for Input/Output
def has_header(path, encoding='utf-8', sep=','):
"""
Auto-detect if csv file has header.
"""
from pandas import read_csv
def _offset_stream():
from io import StringIO
if isinstance(path, StringIO):
path.seek(0)
_offset_stream()
df0 = read_csv(path, header=None, nrows=10, skipinitialspace=True,
encoding=encoding, sep=sep)
_offset_stream()
df1 = read_csv(path, nrows=10, skipinitialspace=True, encoding=encoding, sep=sep)
# If the column is numerical, its dtype is different without/with header
# TODO how about categorical columns
_offset_stream()
return tuple(df0.dtypes) != tuple(df1.dtypes)
def read_data_from_csv(path, na_values=None, header=None, sep=","):
"""
Read data set from csv or other delimited text file. And remove empty
columns (all values are null).
"""
from pandas import read_csv
try:
header = header or ('infer' if has_header(path, sep=sep) else None)
df = read_csv(path, skipinitialspace=True, na_values=na_values,
header=header, sep=sep, float_precision='high')
except (UnicodeDecodeError, NameError):
header = header or ('infer' if has_header(path, encoding='latin1',
sep=sep) else None)
df = read_csv(path, skipinitialspace=True, na_values=na_values,
header=header, encoding='latin1', sep=sep,
float_precision='high')
# Remove columns with empty active domain, i.e., all values are missing.
before_attrs = set(df.columns)
df.dropna(axis=1, how='all')
after_attrs = set(df.columns)
if len(before_attrs) > len(after_attrs):
print(
f'Empty columns are removed, include {before_attrs - after_attrs}.')
# Remove rows with all empty values
df.dropna(axis=0, how='all')
if header is None:
df = df.rename(lambda x: f'#{x}', axis='columns')
return df
def write_csv(file, data):
""" write data to csv files """
# if data is not a list of list, make it:
if isinstance(data, list) and not isinstance(data[0], list):
data = [data]
with open(file, 'a') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(data)
f.close()
def file_name(path):
""" Return the file name without extension from a path. """
return os.path.splitext(os.path.basename(path))[0]
def str_to_list(val, separator=','):
"""
Split one string to a list by separator.
"""
if val is None:
return None
return val.split(separator)
# ---------------------------------------------------------------
# Utilities for Plotting
def _compress_svg_data(svg: str):
import re
value = re.sub(r'\n', ' ', svg)
value = re.sub(r' {2,}', ' ', value)
value = re.sub(r'<style type="text/css">.*</style>', '', value)
value = re.sub(r'<!--(.*?)-->', '', value)
value = re.sub(r'<\?xml.*\?>', '', value)
value = re.sub(r'<!DOCTYPE.*dtd">', '', value)
return value.strip()
def _prepare_for_cjk_characters(chars):
""" If input string has Chinese, Japanese, and Korean characters, set
specific font for them. """
has_cjk = False
for c in chars:
if any([start <= ord(c) <= end for start, end in [
(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)
]]):
has_cjk = True
break
if has_cjk:
import matplotlib
matplotlib.rcParams['font.family'] = ['Microsoft Yahei']
def plot_confusion_matrix(matrix: DataFrame, title='',
ylabel='Predicted', xlabel='Actual',
otype='string', path=None, cmap='Blues',
vrange=None):
"""
Plot a confusion matrix to show predict and actual values.
Parameters
----------
matrix : pandas.DataFrame
the matrix to plot
title : str
title of image
ylabel : str
label in y-axis of image
xlabel : str
label in x-axis of image
otype : str
output type, support 'string' (default), 'file', 'show'
path : str
output path for 'file' type, default is 'matrix_2_3.svg' ((2, 3) is the
shape of matrix).
cmap : str
color
vrange : 2-tuple
manually set range of matrix
"""
import matplotlib.pyplot as plt
_prepare_for_cjk_characters(''.join(map(str, matrix.columns)))
figsize = (6.4, 4.8)
n_columns = matrix.columns.size
if n_columns > 9:
from math import ceil
width = 6.4 + ceil((n_columns - 9) / 2) * 0.8
height = width * figsize[1] / figsize[0]
figsize = (width, height)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
if vrange is None:
vrange = (matrix.values.min(), matrix.values.max())
exp = len(str(vrange[1])) - 1
if exp > 2:
matrix = matrix.div(10 ** exp)
vrange = (vrange[0] / (10 ** exp), vrange[1] / (10 ** exp))
im = ax.imshow(matrix.values, cmap=cmap, vmin=vrange[0], vmax=vrange[1])
ax.set(xticks=np.arange(matrix.shape[1]),
yticks=np.arange(matrix.shape[0]),
xticklabels=matrix.columns,
yticklabels=matrix.index,
title=title,
ylabel=ylabel,
xlabel=xlabel)
ax.tick_params(which='both', length=0)
for edge, spine in ax.spines.items():
spine.set_visible(False)
# Loop over data dimensions and create text annotations.
thresh = (vrange[0] + vrange[1]) / 2.
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
if exp > 2:
text = '{:.2f}'.format(matrix.iloc[i, j])
else:
text = matrix.iloc[i, j]
ax.text(j, i, text,
ha="center", va="center",
color="white" if matrix.iloc[i, j] > thresh else "black")
from ds4ml.metrics import error_rate
ax.set_title('Rate: {:.1%}'.format(error_rate(matrix)), fontsize=10)
cbar = ax.figure.colorbar(im, ax=ax, pad=0.03, aspect=30)
cbar.outline.set_visible(False)
if exp > 2:
cbar.ax.text(0, -0.1, f'(e+{exp})')
cbar.ax.tick_params(which='both', length=0)
fig.autofmt_xdate()
fig.tight_layout()
plt.subplots_adjust()
try:
if otype == 'string':
from io import StringIO
img = StringIO()
plt.savefig(img, format='svg', bbox_inches='tight')
return _compress_svg_data(img.getvalue())
elif otype == 'file':
if path is None:
path = 'matrix_{}_{}.svg'.format(matrix.shape[0],
matrix.shape[1])
plt.savefig(path, bbox_inches='tight')
return path
elif otype == 'show':
plt.show()
finally:
plt.close()
def plot_histogram(bins, heights, otype='string', path=None,
x_label='', y_label='Frequency'):
"""
Plot two bars.
Note: Because pyplot draws diagrams auto-scale, this function will provide
kinds of diagram patterns.
"""
import matplotlib.pyplot as plt
_prepare_for_cjk_characters(''.join(map(str, bins)))
length = len(bins)
fig_width = 6.4
ticks = 22
if length < 5:
# make 7 bars to show the data
# insert 0 or '' to the center of array
# e.g. [3, 4] => [3, 0, 4], ['1', '2', '3'] => ['1', '', '2', '', '3'], ...
bins = list(map(str, bins))
bins = ',,'.join(bins).split(',')
heights = np.insert(heights, list(range(1, length)), 0, axis=1)
# pad 0 or '' to array in the begin and end
pad = (7 - len(bins)) // 2
bins = tuple([''] * pad + bins + [''] * pad)
heights = np.append(np.insert(heights, [0], [0] * pad, axis=1),
np.zeros((len(heights), pad), dtype=int), axis=1)
length = 7
else:
# TODO: if count of bins is bigger than 33, and it is categorical, how?
if length >= 60:
bins = bins[:60]
heights = heights[:, :60]
length = 60
bins = tuple(map(str, bins))
length_ = [8, 12, 16, 20, 32, 42, 48, 60]
ticks_ = [22, 30, 36, 48, 76, 96, 108, 172]
width_ = [6.4, 9.6, 11.2, 11.2, 11.2, 14, 14, 15]
idx = len([i for i in length_ if length > i])
ticks = ticks_[idx]
fig_width = width_[idx]
x = np.arange(length)
fig = plt.figure(figsize=(fig_width, 4.8))
ax = fig.add_subplot(111)
width = length / ticks
diff = width / 2 + 0.01
ax.bar(x - diff, heights[0], width=width, color='#38bbe8')
ax.bar(x + diff, heights[1], width=width, color='#ffd56e')
ax.legend(['raw', 'synth'])
fig.autofmt_xdate()
# fig.tight_layout(pad=1.5)
plt.xticks(x, bins, fontsize=10)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.subplots_adjust()
try:
if otype == 'string':
from io import StringIO
img = StringIO()
plt.savefig(img, format='svg', bbox_inches='tight')
return _compress_svg_data(img.getvalue())
elif otype == 'file':
if path is None:
path = 'histogram_{}_{}.svg'.format(len(bins), len(heights))
plt.savefig(path, bbox_inches='tight')
return path
elif otype == 'show':
plt.show()
finally:
plt.close()
def plot_heatmap(data, title='', otype='string', path=None, cmap='Blues'):
"""
Plot a heatmap to show pairwise correlations (e.g. mutual information).
Parameters see <code>plot_confusion_matrix</code>
"""
import matplotlib.pyplot as plt
_prepare_for_cjk_characters(''.join(data.columns))
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(data.values, cmap=cmap)
ax.set_title(title, fontsize=10)
ticks = np.arange(len(data.columns))
ax.set_xticks(ticks)
ax.set_xticklabels(data.columns)
ax.set_yticks(ticks)
ax.set_yticklabels(data.index)
ax.tick_params(which='both', length=0)
for edge, spine in ax.spines.items():
spine.set_visible(False)
# set color bar in the right
cbar = ax.figure.colorbar(im, ax=ax, pad=0.03, aspect=30)
cbar.outline.set_visible(False)
cbar.ax.tick_params(which='both', length=0)
fig.autofmt_xdate()
fig.tight_layout()
plt.subplots_adjust()
try:
if otype == 'string':
from io import StringIO
img = StringIO()
plt.savefig(img, format='svg', facecolor='#ebebeb',
bbox_inches='tight')
return _compress_svg_data(img.getvalue())
elif otype == 'file':
if path is None:
path = 'heatmap_{}_{}.svg'.format(data.shape[0], data.shape[1])
plt.savefig(path, facecolor='#ebebeb', bbox_inches='tight')
return path
elif otype == 'show':
plt.show()
finally:
plt.close()
# ---------------------------------------------------------------
# Utilities for Metrics, ML
def train_and_predict(x_train, y_train, x_test):
"""
Predict <x, y> by SVM classifier, and compare with test data
TODO: do some analysis and then choose svm classifier and its parameters
"""
from sklearn.svm import SVC
classifier = SVC(gamma='scale')
classifier.fit(x_train, y_train)
result = classifier.predict(x_test)
return result
def mutual_information(child: Series, parents: DataFrame):
"""
Mutual information of child (Series) and parents (DataFrame) distributions
"""
from sklearn.metrics import mutual_info_score
if parents.shape[1] == 1:
parents = parents.iloc[:, 0]
else:
parents = parents.apply(lambda x: ' '.join(x.array), axis=1)
return mutual_info_score(child, parents)
def normalize_distribution(frequencies):
frequencies = np.array(frequencies, dtype=float)
frequencies = frequencies.clip(0)
total = frequencies.sum()
if total > 0:
if np.isinf(total):
return normalize_distribution(np.isinf(frequencies))
else:
return frequencies / total
else:
return np.full_like(frequencies, 1 / frequencies.size)
def normalize_range(start, stop, bins=20):
"""
Return evenly spaced values within a given interval, and a dynamically
calculated step, to make number of bins close to 20. If integer interval,
the smallest step is 1; If float interval, the smallest step is 0.5.
"""
from math import ceil, floor
if isinstance(start, int) and isinstance(stop, int):
step = ceil((stop - start) / bins)
else:
start = floor(start)
stop = ceil(stop)
step = (stop - start) / bins
step = ceil(step) if ceil(step) == round(step) else round(step) + 0.5
stop = step * (bins + 1) + start
return np.arange(start, stop, step)
def is_datetime(value: str):
from dateutil.parser import parse
"""
Detect a value is a datetime. Exclude some datetime literals (weekdays and
months) from method `dateutil.parser`.
"""
literals = {'mon', 'monday', 'tue', 'tuesday', 'wed', 'wednesday', 'thu',
'thursday', 'fri', 'friday', 'sat', 'saturday', 'sun', 'sunday',
'jan', 'january', 'feb', 'february', 'mar', 'march', 'apr',
'april', 'may', 'jun', 'june', 'jul', 'july', 'aug', 'august',
'sep', 'sept', 'september', 'oct', 'october', 'nov', 'november',
'dec', 'december'}
try:
value = value.lower()
if value in literals:
return False
parse(value)
return True
except ValueError:
return False
except AttributeError:
return False
def randomize_string(length):
return ''.join(np.random.choice(list(ascii_lowercase), size=length))
def pseudonymise_string(value):
""" pseudonymise a string by RIPEMD-160 hashes """
return hashlib.new('ripemd160', str(value).encode('utf-8')).hexdigest()
# ---------------------------------------------------------------
# Utilities for arguments parser
class CustomFormatter(argparse.HelpFormatter):
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
# change to
# -s, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
# parts.append('%s %s' % (option_string, args_string))
parts.append('%s' % option_string)
parts[-1] += ' %s' % args_string
return ', '.join(parts)
| 33.275789 | 85 | 0.571239 |
cc8a1197119fa3de0ab9b943795fe310280b6e1d | 1,839 | py | Python | app/main/views.py | Krasivaya/News-Highlight | 4cd53ad52603170f8b62b70a73d0916c5db0dc03 | [
"MIT"
] | null | null | null | app/main/views.py | Krasivaya/News-Highlight | 4cd53ad52603170f8b62b70a73d0916c5db0dc03 | [
"MIT"
] | null | null | null | app/main/views.py | Krasivaya/News-Highlight | 4cd53ad52603170f8b62b70a73d0916c5db0dc03 | [
"MIT"
] | null | null | null | from flask import render_template, request, redirect, url_for
from . import main
from ..request import get_sources, view_source, get_by_category, get_by_language, search_article, get_top_headlines
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to best News Website Online'
sort_type = request.args.get('sortBy_query')
news_sources = get_sources()
top_headlines = get_top_headlines()
Categories = ['business', 'entertainment', 'general', 'health', 'science', 'sports', 'technology']
NewCategories = []
for Category in Categories:
cat = get_by_category(Category)
NewCategories.append(cat)
Languages = ['en', 'ar', 'de', 'es', 'fr', 'he', 'it', 'nl', 'no', 'pt', 'ru', 'se', 'ud', 'zh']
NewLanguages = []
for Language in Languages:
lan = get_by_language(Language)
NewLanguages.append(lan)
return render_template('index.html', sources = news_sources, title = title, Categories = NewCategories, Languages = NewLanguages, sort_type = sort_type, headlines = top_headlines)
@main.route('/source/<id>')
def source(id):
news_articles = view_source(id)
title = id
searched_name = request.args.get('search_query')
if searched_name:
return redirect(url_for('main.search', name = searched_name, id = id))
else:
return render_template('source.html', title = title, news = news_articles)
@main.route('/search/<id>/<name>')
def search(id, name):
'''
View function to display the search results
'''
name_list = name.split(" ")
name_format = "+".join(name_list)
search_articles = search_article(name_format, id)
title = f'search results for {name}'
return render_template('search.html', news = search_articles)
| 34.055556 | 183 | 0.672648 |
4e2782db41a6c3e7319966f3aacaa5a92631cf5c | 2,344 | py | Python | RenderVideo.py | apichlkostner/CarND-Vehicle-Detection | a3937b19b5e8e36da01cb5c15437e952c70a766a | [
"MIT"
] | null | null | null | RenderVideo.py | apichlkostner/CarND-Vehicle-Detection | a3937b19b5e8e36da01cb5c15437e952c70a766a | [
"MIT"
] | null | null | null | RenderVideo.py | apichlkostner/CarND-Vehicle-Detection | a3937b19b5e8e36da01cb5c15437e952c70a766a | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import numpy as np
import cv2
from CameraCalibration import CalibrateCamera
from ProcessImage import ProcessImage
from ProcessImageLane import ProcessImageLane
from moviepy.editor import VideoFileClip
import glob
import matplotlib.image as mpimg
def main():
if (len(sys.argv) > 1) and isinstance(sys.argv[1], str):
filename = sys.argv[1]
else:
filename = 'test_video.mp4'
print('Processing file ' + filename)
white_output = 'processed_videos/' + filename
clip1 = VideoFileClip('source_videos/' + filename)#.subclip(27, 30)
#################################
# Lane detection
#################################
# calculate matrices for perspective transformation
target_left_x = 300
target_right_x = 1002
target_top_y = 0
target_bottom_y =690
src_points = np.float32([[283, 664], [548, 480], [736, 480], [1019, 664]])
dst_points = np.float32([[target_left_x, target_bottom_y], [target_left_x, target_top_y],
[target_right_x, target_top_y], [target_right_x, target_bottom_y]])
# transformation to bird's eye view
M = cv2.getPerspectiveTransform(src_points, dst_points)
# transformation back to normal view
Mi = cv2.getPerspectiveTransform(dst_points, src_points)
# calculate or load camera calibration
calCam = CalibrateCamera.load()
if calCam == None:
images = glob.glob('camera_cal/calibration*.jpg')
calCam = CalibrateCamera()
calCam.findCorners(images, (9, 6))
calCam.calibrateCamera()
calCam.write()
# class which will process the images, initialize with image size and
# transformation matrices
ld = ProcessImageLane()
ld.fit((720, 1280), M, Mi, calCam=calCam)
#################################
# Vehicle detection
#################################
# class to detect the vehicles
# for a quick merge with the lane detection the lane detection pipeline is added
vd = ProcessImage()
vd.fit(ld)
if False:
image = mpimg.imread('test.jpg')
vd.process_image(image)
return
else:
white_clip = clip1.fl_image(vd.process_image) # color images
white_clip.write_videofile(white_output, audio=False)
if __name__ == "__main__":
main()
| 30.441558 | 99 | 0.637372 |
3eb3924f5d14b00a279d7802406fbab7f95e5864 | 5,767 | py | Python | egg/core/callbacks.py | chan0park/EGG | d7d7238da35eac0306b6bf13c47017a6c0c0b67e | [
"MIT"
] | null | null | null | egg/core/callbacks.py | chan0park/EGG | d7d7238da35eac0306b6bf13c47017a6c0c0b67e | [
"MIT"
] | null | null | null | egg/core/callbacks.py | chan0park/EGG | d7d7238da35eac0306b6bf13c47017a6c0c0b67e | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import Dict, Any, Union, NamedTuple
import pathlib
import torch
from egg.core.util import get_summary_writer
class Callback:
trainer: 'Trainer'
def on_train_begin(self, trainer_instance: 'Trainer'):
self.trainer = trainer_instance
def on_train_end(self):
pass
def on_test_begin(self):
pass
def on_test_end(self, loss: float, logs: Dict[str, Any] = None):
pass
def on_epoch_begin(self):
pass
def on_epoch_end(self, loss: float, logs: Dict[str, Any] = None):
pass
class ConsoleLogger(Callback):
def __init__(self, print_train_loss=False, as_json=False, print_test_loss=True):
self.print_train_loss = print_train_loss
self.as_json = as_json
self.epoch_counter = 0
self.print_test_loss = print_test_loss
def on_test_end(self, loss: float, logs: Dict[str, Any] = None):
if self.print_test_loss:
if self.as_json:
dump = dict(mode='test', epoch=self.epoch_counter, loss=self._get_metric(loss))
for k, v in logs.items():
dump[k] = self._get_metric(v)
output_message = json.dumps(dump)
else:
output_message = f'test: epoch {self.epoch_counter}, loss {loss:.4f}, {logs}'
print(output_message, flush=True)
def on_epoch_end(self, loss: float, logs: Dict[str, Any] = None):
self.epoch_counter += 1
if self.print_train_loss:
if self.as_json:
dump = dict(mode='train', epoch=self.epoch_counter, loss=self._get_metric(loss))
for k, v in logs.items():
dump[k] = self._get_metric(v)
output_message = json.dumps(dump)
else:
output_message = f'train: epoch {self.epoch_counter}, loss {loss:.4f}, {logs}'
print(output_message, flush=True)
def _get_metric(self, metric: Union[torch.Tensor, float]) -> float:
if torch.is_tensor(metric) and metric.dim() > 1:
return metric.mean().item()
elif torch.is_tensor(metric):
return metric.item()
elif type(metric) == float:
return metric
else:
raise TypeError('Metric must be either float or torch.Tensor')
class TensorboardLogger(Callback):
def __init__(self, writer=None):
if writer:
self.writer = writer
else:
self.writer = get_summary_writer()
self.epoch_counter = 0
def on_test_end(self, loss: float, logs: Dict[str, Any] = None):
self.writer.add_scalar(tag=f'test/loss', scalar_value=loss, global_step=self.epoch_counter)
for k, v in logs.items():
self.writer.add_scalar(tag=f'test/{k}', scalar_value=v, global_step=self.epoch_counter)
def on_epoch_end(self, loss: float, logs: Dict[str, Any] = None):
self.writer.add_scalar(tag=f'train/loss', scalar_value=loss, global_step=self.epoch_counter)
for k, v in logs.items():
self.writer.add_scalar(tag=f'train/{k}', scalar_value=v, global_step=self.epoch_counter)
self.epoch_counter += 1
def on_train_end(self):
self.writer.close()
class TemperatureUpdater(Callback):
def __init__(self, agent, decay=0.9, minimum=0.1, update_frequency=1):
self.agent = agent
assert hasattr(agent, 'temperature'), 'Agent must have a `temperature` attribute'
assert not isinstance(agent.temperature, torch.nn.Parameter), \
'When using TemperatureUpdater, `temperature` cannot be trainable'
self.decay = decay
self.minimum = minimum
self.update_frequency = update_frequency
self.epoch_counter = 0
def on_epoch_end(self, loss: float, logs: Dict[str, Any] = None):
if self.epoch_counter % self.update_frequency == 0:
self.agent.temperature = max(self.minimum, self.agent.temperature * self.decay)
self.epoch_counter += 1
class Checkpoint(NamedTuple):
epoch: int
model_state_dict: Dict[str, Any]
optimizer_state_dict: Dict[str, Any]
class CheckpointSaver(Callback):
def __init__(
self,
checkpoint_path: Union[str, pathlib.Path],
checkpoint_freq: int = 1,
prefix: str = ''
):
self.checkpoint_path = pathlib.Path(checkpoint_path)
self.checkpoint_freq = checkpoint_freq
self.prefix = prefix
self.epoch_counter = 0
def on_epoch_end(self, loss: float, logs: Dict[str, Any] = None):
if self.checkpoint_freq > 0 and (self.epoch_counter % self.checkpoint_freq == 0):
filename = f'{self.prefix}_{self.epoch_counter}' if self.prefix else str(self.epoch_counter)
self.save_checkpoint(filename=filename)
self.epoch_counter += 1
def on_train_end(self):
self.save_checkpoint(filename=f'{self.prefix}_final' if self.prefix else 'final')
def save_checkpoint(self, filename: str):
"""
Saves the game, agents, and optimizer states to the checkpointing path under `<number_of_epochs>.tar` name
"""
self.checkpoint_path.mkdir(exist_ok=True, parents=True)
path = self.checkpoint_path / f'{filename}.tar'
torch.save(self.get_checkpoint(), path)
def get_checkpoint(self):
return Checkpoint(epoch=self.epoch_counter,
model_state_dict=self.trainer.game.state_dict(),
optimizer_state_dict=self.trainer.optimizer.state_dict())
| 35.598765 | 114 | 0.636726 |
0f09fceea11d4f9551d3e15b28783e7a412790d8 | 259 | py | Python | turtlebot_controller/src/turtlebot_controller_node.py | IhabMohamed/deep_motion_planning | 6512f651bafbb56710ddbae501a5b4c22d56ac66 | [
"BSD-3-Clause"
] | 17 | 2020-01-29T16:25:31.000Z | 2022-03-06T13:04:13.000Z | turtlebot_controller/src/turtlebot_controller_node.py | IhabMohamed/deep_motion_planning | 6512f651bafbb56710ddbae501a5b4c22d56ac66 | [
"BSD-3-Clause"
] | 1 | 2020-07-05T21:21:55.000Z | 2020-08-13T08:36:09.000Z | turtlebot_controller/src/turtlebot_controller_node.py | ethz-asl/deep_motion_planning | 6512f651bafbb56710ddbae501a5b4c22d56ac66 | [
"BSD-3-Clause"
] | 10 | 2020-01-30T05:45:12.000Z | 2021-11-20T11:38:14.000Z | #!/usr/bin/env python2
import rospy
from turtlebot_controller import TurtlebotController
def main():
rospy.init_node('turtlebot_controller')
with TurtlebotController() as controller:
rospy.spin()
if __name__ == "__main__":
main()
| 17.266667 | 52 | 0.706564 |
c2536e24fee0000997995324e066323d561809cc | 589 | py | Python | client.py | eshnil2000/crypto_trading | 491f5f99907f9e5082fdfb3d5a6c07cbd137966c | [
"MIT"
] | null | null | null | client.py | eshnil2000/crypto_trading | 491f5f99907f9e5082fdfb3d5a6c07cbd137966c | [
"MIT"
] | null | null | null | client.py | eshnil2000/crypto_trading | 491f5f99907f9e5082fdfb3d5a6c07cbd137966c | [
"MIT"
] | null | null | null | import socket
import json
# Create a socket (SOCK_STREAM means a TCP socket)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server and send data
sock.connect(("localhost", 8080))
order = {'id': 0, 'type': 'limit', 'side': 'sell', 'price': 99, 'quantity': 12}
serialized_order = json.dumps(order).encode('utf-8')
sock.sendall(serialized_order)
# Receive data from the server and shut down
received = str(sock.recv(1024), "utf-8")
finally:
sock.close()
print("Sent: {}".format(order))
print("Received: {}".format(received)) | 29.45 | 83 | 0.66893 |
5693eb28373257e98ee4a44beda83c4a6baeb227 | 3,823 | py | Python | src/saltext/vmware/modules/tag.py | dmurphy18/salt-ext-modules-vmware | 93342bfed2261c2f41232ff14d6cc0fb731b4d49 | [
"Apache-2.0"
] | null | null | null | src/saltext/vmware/modules/tag.py | dmurphy18/salt-ext-modules-vmware | 93342bfed2261c2f41232ff14d6cc0fb731b4d49 | [
"Apache-2.0"
] | null | null | null | src/saltext/vmware/modules/tag.py | dmurphy18/salt-ext-modules-vmware | 93342bfed2261c2f41232ff14d6cc0fb731b4d49 | [
"Apache-2.0"
] | 1 | 2021-12-15T02:46:59.000Z | 2021-12-15T02:46:59.000Z | # Copyright 2021 VMware, Inc.
# SPDX-License: Apache-2.0
import logging
import salt.exceptions
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.connect as connect
log = logging.getLogger(__name__)
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
__virtualname__ = "vmware_tag"
__func_alias__ = {"list_": "list"}
def __virtual__():
if not HAS_PYVMOMI:
return False, "Unable to import pyVmomi module."
return __virtualname__
def create(tag_name, category_id, description=""):
"""
Create a new tag.
tag_name
Name of tag.
category_id
(string) Category ID of type: com.vmware.cis.tagging.Tag.
description
(optional) Description for the tag being created.
"""
data = {
"create_spec": {"category_id": category_id, "description": description, "name": tag_name}
}
response = connect.request(
"/rest/com/vmware/cis/tagging/tag", "POST", body=data, opts=__opts__, pillar=__pillar__
)
response = response["response"].json()
return {"tag": response["value"]}
def get(tag_id):
"""
Returns info on given tag.
tag_id
(string) Tag ID of type: com.vmware.cis.tagging.Tag.
"""
url = f"/rest/com/vmware/cis/tagging/tag/id:{tag_id}"
response = connect.request(url, "GET", opts=__opts__, pillar=__pillar__)
response = response["response"].json()
return {"tag": response["value"]}
def update(tag_id, tag_name=None, description=None):
"""
Updates give tag.
tag_id
(string) Tag ID of type: com.vmware.cis.tagging.Tag.
tag_name
Name of tag.
description
(optional) Description for the tag being created.
"""
spec = {"update_spec": {}}
if tag_name:
spec["update_spec"]["name"] = tag_name
if description:
spec["update_spec"]["description"] = description
url = f"/rest/com/vmware/cis/tagging/tag/id:{tag_id}"
response = connect.request(url, "PATCH", body=spec, opts=__opts__, pillar=__pillar__)
if response["response"].status_code == 200:
return {"tag": "updated"}
return {
"tag": "failed to update",
"status_code": response["response"].status_code,
"reason": response["response"].reason,
}
def delete(tag_id):
"""
Delete given tag.
tag_id
(string) Tag ID of type: com.vmware.cis.tagging.Tag.
"""
url = f"/rest/com/vmware/cis/tagging/tag/id:{tag_id}"
response = connect.request(url, "DELETE", opts=__opts__, pillar=__pillar__)
if response["response"].status_code == 200:
return {"tag": "deleted"}
return {
"tag": "failed to update",
"status_code": response.status_code,
"reason": response.reason,
}
def list_():
"""
Lists IDs for all the tags on a given vCenter.
"""
response = connect.request(
"/rest/com/vmware/cis/tagging/tag", "GET", opts=__opts__, pillar=__pillar__
)
response = response["response"].json()
return {"tags": response["value"]}
def list_category():
"""
Lists IDs for all the categories on a given vCenter.
"""
response = connect.request(
"/rest/com/vmware/cis/tagging/category", "GET", opts=__opts__, pillar=__pillar__
)
response = response["response"].json()
return {"categories": response["value"]}
def get_category(category_id):
"""
Returns info on given category.
category_id
(string) Category ID of type: com.vmware.cis.tagging.Category.
"""
url = f"/rest/com/vmware/cis/tagging/category/id:{category_id}"
response = connect.request(url, "GET", opts=__opts__, pillar=__pillar__)
response = response["response"].json()
return {"category": response["value"]}
| 26.365517 | 97 | 0.640073 |
ad1fa7a11796a464041015d8d2521e5ee94f7c81 | 25,578 | py | Python | apps/jobbrowser/src/jobbrowser/views.py | bopopescu/Hue-4 | 127a23f563611b0e8dc0dd35ad393cbaff8a64c6 | [
"Apache-2.0"
] | 1 | 2018-08-01T05:10:26.000Z | 2018-08-01T05:10:26.000Z | apps/jobbrowser/src/jobbrowser/views.py | bopopescu/Hue-4 | 127a23f563611b0e8dc0dd35ad393cbaff8a64c6 | [
"Apache-2.0"
] | null | null | null | apps/jobbrowser/src/jobbrowser/views.py | bopopescu/Hue-4 | 127a23f563611b0e8dc0dd35ad393cbaff8a64c6 | [
"Apache-2.0"
] | 1 | 2020-07-25T19:27:13.000Z | 2020-07-25T19:27:13.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import string
import time
import urllib2
import urlparse
from urllib import quote_plus
from lxml import html
from django.http import HttpResponseRedirect
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.log.access import access_log_level
from desktop.lib.rest.http_client import RestException
from desktop.lib.rest.resource import Resource
from desktop.lib.django_util import JsonResponse, render_json, render, copy_query_dict
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.lib.exceptions import MessageException
from desktop.lib.exceptions_renderable import PopupException
from desktop.views import register_status_bar_view
from hadoop import cluster
from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundException, ThriftJobState
from hadoop.yarn.clients import get_log_client
from hadoop.yarn import resource_manager_api as resource_manager_api
LOG = logging.getLogger(__name__)
try:
from beeswax.hive_site import hiveserver2_impersonation_enabled
except:
LOG.warn('Hive is not enabled')
def hiveserver2_impersonation_enabled(): return True
from jobbrowser.conf import LOG_OFFSET, SHARE_JOBS
from jobbrowser.api import get_api, ApplicationNotRunning, JobExpired
from jobbrowser.models import Job, JobLinkage, Tracker, Cluster, can_view_job, LinkJobLogs, can_kill_job
from jobbrowser.yarn_models import Application
LOG_OFFSET_BYTES = LOG_OFFSET.get()
def check_job_permission(view_func):
"""
Ensure that the user has access to the job.
Assumes that the wrapped function takes a 'jobid' param named 'job'.
"""
def decorate(request, *args, **kwargs):
jobid = kwargs['job']
try:
job = get_job(request, job_id=jobid)
except ApplicationNotRunning, e:
LOG.warn('Job %s has not yet been accepted by the RM, will poll for status.' % jobid)
return job_not_assigned(request, jobid, request.path)
if not SHARE_JOBS.get() and not request.user.is_superuser \
and job.user != request.user.username and not can_view_job(request.user.username, job):
raise PopupException(_("You don't have permission to access job %(id)s.") % {'id': jobid})
kwargs['job'] = job
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def get_job(request, job_id):
try:
job = get_api(request.user, request.jt).get_job(jobid=job_id)
except ApplicationNotRunning, e:
if e.job.get('state', '').lower() == 'accepted':
rm_api = resource_manager_api.get_resource_manager(request.user)
job = Application(e.job, rm_api)
else:
raise e # Job has not yet been accepted by RM
except JobExpired, e:
raise PopupException(_('Job %s has expired.') % job_id, detail=_('Cannot be found on the History Server.'))
except Exception, e:
msg = 'Could not find job %s.'
LOG.exception(msg % job_id)
raise PopupException(_(msg) % job_id, detail=e)
return job
def apps(request):
return render('job_browser.mako', request, {
'is_embeddable': request.GET.get('is_embeddable', False),
'is_mini': request.GET.get('is_mini', False),
'hiveserver2_impersonation_enabled': hiveserver2_impersonation_enabled()
})
def job_not_assigned(request, jobid, path):
if request.GET.get('format') == 'json':
result = {'status': -1, 'message': ''}
try:
get_api(request.user, request.jt).get_job(jobid=jobid)
result['status'] = 0
except ApplicationNotRunning, e:
result['status'] = 1
except Exception, e:
result['message'] = _('Error polling job %s: %s') % (jobid, e)
return JsonResponse(result, encoder=JSONEncoderForHTML)
else:
return render('job_not_assigned.mako', request, {'jobid': jobid, 'path': path})
def jobs(request):
user = request.POST.get('user', request.user.username)
state = request.POST.get('state')
text = request.POST.get('text')
retired = request.POST.get('retired')
time_value = request.POST.get('time_value', 7)
time_unit = request.POST.get('time_unit', 'days')
if request.POST.get('format') == 'json':
try:
# Limit number of jobs to be 1000
jobs = get_api(request.user, request.jt).get_jobs(user=request.user, username=user, state=state, text=text,
retired=retired, limit=1000, time_value=int(time_value), time_unit=time_unit)
except Exception, ex:
ex_message = str(ex)
if 'Connection refused' in ex_message or 'standby RM' in ex_message:
raise PopupException(_('Resource Manager cannot be contacted or might be down.'))
elif 'Could not connect to' in ex_message:
raise PopupException(_('Job Tracker cannot be contacted or might be down.'))
else:
raise PopupException(ex)
json_jobs = {
'jobs': [massage_job_for_json(job, request) for job in jobs],
}
return JsonResponse(json_jobs, encoder=JSONEncoderForHTML)
return render('jobs.mako', request, {
'request': request,
'state_filter': state,
'user_filter': user,
'text_filter': text,
'retired': retired,
'filtered': not (state == 'all' and user == '' and text == ''),
'is_yarn': cluster.is_yarn(),
'hiveserver2_impersonation_enabled': hiveserver2_impersonation_enabled()
})
def massage_job_for_json(job, request=None, user=None):
job = {
'id': job.jobId,
'shortId': job.jobId_short,
'name': hasattr(job, 'jobName') and job.jobName or '',
'status': job.status,
'yarnStatus': hasattr(job, 'yarnStatus') and job.yarnStatus or '',
'url': job.jobId and reverse('jobbrowser.views.single_job', kwargs={'job': job.jobId}) or '',
'logs': job.jobId and reverse('jobbrowser.views.job_single_logs', kwargs={'job': job.jobId}) or '',
'queueName': hasattr(job, 'queueName') and job.queueName or _('N/A'),
'priority': hasattr(job, 'priority') and job.priority or _('N/A'),
'user': job.user,
'isRetired': job.is_retired,
'isMR2': job.is_mr2,
'progress': hasattr(job, 'progress') and job.progress or '',
'mapProgress': hasattr(job, 'mapProgress') and job.mapProgress or '',
'reduceProgress': hasattr(job, 'reduceProgress') and job.reduceProgress or '',
'setupProgress': hasattr(job, 'setupProgress') and job.setupProgress or '',
'cleanupProgress': hasattr(job, 'cleanupProgress') and job.cleanupProgress or '',
'desiredMaps': job.desiredMaps,
'desiredReduces': job.desiredReduces,
'applicationType': hasattr(job, 'applicationType') and job.applicationType or None,
'mapsPercentComplete': int(job.maps_percent_complete) if job.maps_percent_complete else '',
'finishedMaps': job.finishedMaps,
'finishedReduces': job.finishedReduces,
'reducesPercentComplete': int(job.reduces_percent_complete) if job.reduces_percent_complete else '',
'jobFile': hasattr(job, 'jobFile') and job.jobFile or '',
'launchTimeMs': hasattr(job, 'launchTimeMs') and job.launchTimeMs or 0,
'launchTimeFormatted': hasattr(job, 'launchTimeFormatted') and job.launchTimeFormatted or '',
'startTimeMs': hasattr(job, 'startTimeMs') and job.startTimeMs or 0,
'startTimeFormatted': hasattr(job, 'startTimeFormatted') and job.startTimeFormatted or '',
'finishTimeMs': hasattr(job, 'finishTimeMs') and job.finishTimeMs or 0,
'finishTimeFormatted': hasattr(job, 'finishTimeFormatted') and job.finishTimeFormatted or '',
'durationFormatted': hasattr(job, 'durationFormatted') and job.durationFormatted or '',
'durationMs': hasattr(job, 'durationInMillis') and job.durationInMillis or 0,
'canKill': can_kill_job(job, request.user if request else user),
'killUrl': job.jobId and reverse('jobbrowser.views.kill_job', kwargs={'job': job.jobId}) or '',
'diagnostics': hasattr(job, 'diagnostics') and job.diagnostics or '',
}
return job
def massage_task_for_json(task):
task = {
'id': task.taskId,
'shortId': task.taskId_short,
'url': task.taskId and reverse('jobbrowser.views.single_task', kwargs={'job': task.jobId, 'taskid': task.taskId}) or '',
'logs': task.taskAttemptIds and reverse('jobbrowser.views.single_task_attempt_logs', kwargs={'job': task.jobId, 'taskid': task.taskId, 'attemptid': task.taskAttemptIds[-1]}) or '',
'type': task.taskType
}
return task
def single_spark_job(request, job):
if request.REQUEST.get('format') == 'json':
json_job = {
'job': massage_job_for_json(job, request)
}
return JsonResponse(json_job, encoder=JSONEncoderForHTML)
else:
return render('job.mako', request, {
'request': request,
'job': job
})
@check_job_permission
def single_job(request, job):
def cmp_exec_time(task1, task2):
return cmp(task1.execStartTimeMs, task2.execStartTimeMs)
if job.applicationType == 'SPARK':
return single_spark_job(request, job)
failed_tasks = job.filter_tasks(task_states=('failed',))
failed_tasks.sort(cmp_exec_time)
recent_tasks = job.filter_tasks(task_states=('running', 'succeeded',))
recent_tasks.sort(cmp_exec_time, reverse=True)
if request.REQUEST.get('format') == 'json':
json_failed_tasks = [massage_task_for_json(task) for task in failed_tasks]
json_recent_tasks = [massage_task_for_json(task) for task in recent_tasks]
json_job = {
'job': massage_job_for_json(job, request),
'failedTasks': json_failed_tasks,
'recentTasks': json_recent_tasks
}
return JsonResponse(json_job, encoder=JSONEncoderForHTML)
return render('job.mako', request, {
'request': request,
'job': job,
'failed_tasks': failed_tasks and failed_tasks[:5] or [],
'recent_tasks': recent_tasks and recent_tasks[:5] or [],
})
@check_job_permission
def job_counters(request, job):
return render("counters.html", request, {"counters": job.counters})
@access_log_level(logging.WARN)
@check_job_permission
def kill_job(request, job):
if request.method != "POST":
raise Exception(_("kill_job may only be invoked with a POST (got a %(method)s).") % {'method': request.method})
if not can_kill_job(job, request.user):
raise PopupException(_("Kill operation is forbidden."))
try:
job.kill()
except Exception, e:
LOG.exception('Killing job')
raise PopupException(e)
cur_time = time.time()
api = get_api(request.user, request.jt)
while time.time() - cur_time < 15:
try:
job = api.get_job(jobid=job.jobId)
except Exception, e:
LOG.warn('Failed to get job with ID %s: %s' % (job.jobId, e))
else:
if job.status not in ["RUNNING", "QUEUED"]:
if request.REQUEST.get("next"):
return HttpResponseRedirect(request.REQUEST.get("next"))
elif request.REQUEST.get("format") == "json":
return JsonResponse({'status': 0}, encoder=JSONEncoderForHTML)
else:
raise MessageException("Job Killed")
time.sleep(1)
raise Exception(_("Job did not appear as killed within 15 seconds."))
@check_job_permission
def job_attempt_logs(request, job, attempt_index=0):
return render("job_attempt_logs.mako", request, {
"attempt_index": attempt_index,
"job": job,
"log_offset": LOG_OFFSET_BYTES
})
@check_job_permission
def job_attempt_logs_json(request, job, attempt_index=0, name='syslog', offset=LOG_OFFSET_BYTES):
"""For async log retrieval as Yarn servers are very slow"""
log_link = None
response = {'status': -1}
try:
jt = get_api(request.user, request.jt)
app = jt.get_application(job.jobId)
if app['applicationType'] == 'MAPREDUCE':
if app['finalStatus'] in ('SUCCEEDED', 'FAILED', 'KILLED'):
attempt_index = int(attempt_index)
if not job.job_attempts['jobAttempt']:
response = {'status': 0, 'log': _('Job has no tasks')}
else:
attempt = job.job_attempts['jobAttempt'][attempt_index]
log_link = attempt['logsLink']
# Reformat log link to use YARN RM, replace node addr with node ID addr
log_link = log_link.replace(attempt['nodeHttpAddress'], attempt['nodeId'])
elif app['state'] == 'RUNNING':
log_link = app['amContainerLogs']
except (KeyError, RestException), e:
raise KeyError(_("Cannot find job attempt '%(id)s'.") % {'id': job.jobId}, e)
except Exception, e:
raise Exception(_("Failed to get application for job %s: %s") % (job.jobId, e))
if log_link:
link = '/%s/' % name
params = {
'doAs': request.user.username
}
if offset != 0:
params['start'] = offset
root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
api_resp = None
try:
api_resp = root.get(link, params=params)
log = html.fromstring(api_resp, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()
response['status'] = 0
response['log'] = LinkJobLogs._make_hdfs_links(log)
except Exception, e:
response['log'] = _('Failed to retrieve log: %s' % e)
try:
debug_info = '\nLog Link: %s' % log_link
if api_resp:
debug_info += '\nHTML Response: %s' % response
response['debug'] = debug_info
LOG.error(debug_info)
except:
LOG.exception('failed to create debug info')
return JsonResponse(response)
@check_job_permission
def job_single_logs(request, job, offset=LOG_OFFSET_BYTES):
"""
Try to smartly detect the most useful task attempt (e.g. Oozie launcher, failed task) and get its MR logs.
"""
def cmp_exec_time(task1, task2):
return cmp(task1.execStartTimeMs, task2.execStartTimeMs)
task = None
failed_tasks = job.filter_tasks(task_states=('failed',))
failed_tasks.sort(cmp_exec_time)
if failed_tasks:
task = failed_tasks[0]
if not task.taskAttemptIds and len(failed_tasks) > 1: # In some cases the last task ends up without any attempt
task = failed_tasks[1]
else:
task_states = ['running', 'succeeded']
if job.is_mr2:
task_states.append('scheduled')
recent_tasks = job.filter_tasks(task_states=task_states, task_types=('map', 'reduce',))
recent_tasks.sort(cmp_exec_time, reverse=True)
if recent_tasks:
task = recent_tasks[0]
if task is None or not task.taskAttemptIds:
raise PopupException(_("No tasks found for job %(id)s.") % {'id': job.jobId})
params = {'job': job.jobId, 'taskid': task.taskId, 'attemptid': task.taskAttemptIds[-1], 'offset': offset}
if request.GET.get('format') == 'link':
return JsonResponse(params)
else:
return single_task_attempt_logs(request, **params)
@check_job_permission
def tasks(request, job):
"""
We get here from /jobs/job/tasks?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
tasktype=<type> - Type can be one of hadoop.job_tracker.VALID_TASK_TYPES
("map", "reduce", "job_cleanup", "job_setup")
taskstate=<state> - State can be one of hadoop.job_tracker.VALID_TASK_STATES
("succeeded", "failed", "running", "pending", "killed")
tasktext=<text> - Where <text> is a string matching info on the task
"""
ttypes = request.GET.get('tasktype')
tstates = request.GET.get('taskstate')
ttext = request.GET.get('tasktext')
pagenum = int(request.GET.get('page', 1))
pagenum = pagenum > 0 and pagenum or 1
filters = {
'task_types': ttypes and set(ttypes.split(',')) or None,
'task_states': tstates and set(tstates.split(',')) or None,
'task_text': ttext,
'pagenum': pagenum,
}
jt = get_api(request.user, request.jt)
task_list = jt.get_tasks(job.jobId, **filters)
filter_params = copy_query_dict(request.GET, ('tasktype', 'taskstate', 'tasktext')).urlencode()
return render("tasks.mako", request, {
'request': request,
'filter_params': filter_params,
'job': job,
'task_list': task_list,
'tasktype': ttypes,
'taskstate': tstates,
'tasktext': ttext
})
@check_job_permission
def single_task(request, job, taskid):
jt = get_api(request.user, request.jt)
job_link = jt.get_job_link(job.jobId)
task = job_link.get_task(taskid)
return render("task.mako", request, {
'task': task,
'joblnk': job_link
})
@check_job_permission
def single_task_attempt(request, job, taskid, attemptid):
jt = get_api(request.user, request.jt)
job_link = jt.get_job_link(job.jobId)
task = job_link.get_task(taskid)
try:
attempt = task.get_attempt(attemptid)
except (KeyError, RestException), e:
raise PopupException(_("Cannot find attempt '%(id)s' in task") % {'id': attemptid}, e)
return render("attempt.mako", request, {
"attempt": attempt,
"taskid": taskid,
"joblnk": job_link,
"task": task
})
@check_job_permission
def single_task_attempt_logs(request, job, taskid, attemptid, offset=LOG_OFFSET_BYTES):
jt = get_api(request.user, request.jt)
job_link = jt.get_job_link(job.jobId)
task = job_link.get_task(taskid)
try:
attempt = task.get_attempt(attemptid)
except (KeyError, RestException), e:
raise KeyError(_("Cannot find attempt '%(id)s' in task") % {'id': attemptid}, e)
first_log_tab = 0
try:
# Add a diagnostic log
if job_link.is_mr2:
diagnostic_log = attempt.diagnostics
else:
diagnostic_log = ", ".join(task.diagnosticMap[attempt.attemptId])
logs = [diagnostic_log]
# Add remaining logs
logs += [section.strip() for section in attempt.get_task_log(offset=offset)]
log_tab = [i for i, log in enumerate(logs) if log]
if log_tab:
first_log_tab = log_tab[0]
except TaskTrackerNotFoundException:
# Four entries,
# for diagnostic, stdout, stderr and syslog
logs = [_("Failed to retrieve log. TaskTracker not found.")] * 4
except urllib2.URLError:
logs = [_("Failed to retrieve log. TaskTracker not ready.")] * 4
context = {
"attempt": attempt,
"taskid": taskid,
"joblnk": job_link,
"task": task,
"logs": logs,
"first_log_tab": first_log_tab,
}
if request.GET.get('format') == 'python':
return context
else:
context['logs'] = [LinkJobLogs._make_links(log) for i, log in enumerate(logs)]
if request.GET.get('format') == 'json':
response = {
"logs": context['logs'],
"isRunning": job.status.lower() in ('running', 'pending', 'prep')
}
return JsonResponse(response)
else:
return render("attempt_logs.mako", request, context)
@check_job_permission
def task_attempt_counters(request, job, taskid, attemptid):
"""
We get here from /jobs/jobid/tasks/taskid/attempts/attemptid/counters
(phew!)
"""
job_link = JobLinkage(request.jt, job.jobId)
task = job_link.get_task(taskid)
attempt = task.get_attempt(attemptid)
counters = {}
if attempt:
counters = attempt.counters
return render("counters.html", request, {'counters':counters})
@access_log_level(logging.WARN)
def kill_task_attempt(request, attemptid):
"""
We get here from /jobs/jobid/tasks/taskid/attempts/attemptid/kill
TODO: security
"""
ret = request.jt.kill_task_attempt(request.jt.thriftattemptid_from_string(attemptid))
return render_json({})
def trackers(request):
"""
We get here from /trackers
"""
trackers = get_tasktrackers(request)
return render("tasktrackers.mako", request, {'trackers':trackers})
def single_tracker(request, trackerid):
jt = get_api(request.user, request.jt)
try:
tracker = jt.get_tracker(trackerid)
except Exception, e:
raise PopupException(_('The tracker could not be contacted.'), detail=e)
return render("tasktracker.mako", request, {'tracker':tracker})
def container(request, node_manager_http_address, containerid):
jt = get_api(request.user, request.jt)
try:
tracker = jt.get_tracker(node_manager_http_address, containerid)
except Exception, e:
# TODO: add a redirect of some kind
raise PopupException(_('The container disappears as soon as the job finishes.'), detail=e)
return render("container.mako", request, {'tracker':tracker})
def clusterstatus(request):
"""
We get here from /clusterstatus
"""
return render("clusterstatus.html", request, Cluster(request.jt))
def queues(request):
"""
We get here from /queues
"""
return render("queues.html", request, { "queuelist" : request.jt.queues()})
@check_job_permission
def set_job_priority(request, job):
"""
We get here from /jobs/job/setpriority?priority=PRIORITY
"""
priority = request.GET.get("priority")
jid = request.jt.thriftjobid_from_string(job.jobId)
request.jt.set_job_priority(jid, ThriftJobPriority._NAMES_TO_VALUES[priority])
return render_json({})
CONF_VARIABLE_REGEX = r"\$\{(.+)\}"
def make_substitutions(conf):
"""
Substitute occurences of ${foo} with conf[foo], recursively, in all the values
of the conf dict.
Note that the Java code may also substitute Java properties in, which
this code does not have.
"""
r = re.compile(CONF_VARIABLE_REGEX)
def sub(s, depth=0):
# Malformed / malicious confs could make this loop infinitely
if depth > 100:
logging.warn("Max recursion depth exceeded when substituting jobconf value: %s" % s)
return s
m = r.search(s)
if m:
for g in [g for g in m.groups() if g in conf]:
substr = "${%s}" % g
s = s.replace(substr, sub(conf[g], depth+1))
return s
for k, v in conf.items():
conf[k] = sub(v)
return conf
##################################
## Helper functions
def get_shorter_id(hadoop_job_id):
return "_".join(hadoop_job_id.split("_")[-2:])
def format_counter_name(s):
"""
Makes counter/config names human readable:
FOOBAR_BAZ -> "Foobar Baz"
foo_barBaz -> "Foo Bar Baz"
"""
def splitCamels(s):
""" Convert "fooBar" to "foo bar" """
return re.sub(r'[a-z][A-Z]',
lambda x: x.group(0)[0] + " " + x.group(0)[1].lower(),
s)
return string.capwords(re.sub('_', ' ', splitCamels(s)).lower())
def get_state_link(request, option=None, val='', VALID_OPTIONS = ("state", "user", "text", "taskstate")):
"""
constructs the query string for the state of the current query for the jobs page.
pass in the request, and an optional option/value pair; these are used for creating
links to turn on the filter, while preserving the other present settings.
"""
states = []
val = quote_plus(val)
assert option is None or option in VALID_OPTIONS
states = dict()
for o in VALID_OPTIONS:
if o in request.GET:
states[o] = request.GET[o]
if option is not None:
states[option] = val
return "&".join([ "%s=%s" % (key, quote_plus(value)) for key, value in states.iteritems() ])
## All Unused below
# DEAD?
def dock_jobs(request):
username = request.user.username
matching_jobs = get_job_count_by_state(request, username)
return render("jobs_dock_info.mako", request, {
'jobs': matching_jobs
}, force_template=True)
register_status_bar_view(dock_jobs)
def get_tasktrackers(request):
"""
Return a ThriftTaskTrackerStatusList object containing all task trackers
"""
return [ Tracker(tracker) for tracker in request.jt.all_task_trackers().trackers]
def get_single_job(request, jobid):
"""
Returns the job which matches jobid.
"""
return Job.from_id(jt=request.jt, jobid=jobid)
def get_job_count_by_state(request, username):
"""
Returns the number of comlpeted, running, and failed jobs for a user.
"""
res = {
'completed': 0,
'running': 0,
'failed': 0,
'killed': 0,
'all': 0
}
jobcounts = request.jt.get_job_count_by_user(username)
res['completed'] = jobcounts.nSucceeded
res['running'] = jobcounts.nPrep + jobcounts.nRunning
res['failed'] = jobcounts.nFailed
res['killed'] = jobcounts.nKilled
res['all'] = res['completed'] + res['running'] + res['failed'] + res['killed']
return res
def jobbrowser(request):
"""
jobbrowser.jsp - a - like.
"""
# TODO(bc): Is this view even reachable?
def check_job_state(state):
return lambda job: job.status == state
status = request.jt.cluster_status()
alljobs = [] #get_matching_jobs(request)
runningjobs = filter(check_job_state('RUNNING'), alljobs)
completedjobs = filter(check_job_state('COMPLETED'), alljobs)
failedjobs = filter(check_job_state('FAILED'), alljobs)
killedjobs = filter(check_job_state('KILLED'), alljobs)
jobqueues = request.jt.queues()
return render("jobbrowser.html", request, {
"clusterstatus" : status,
"queues" : jobqueues,
"alljobs" : alljobs,
"runningjobs" : runningjobs,
"failedjobs" : failedjobs,
"killedjobs" : killedjobs,
"completedjobs" : completedjobs
})
| 34.013298 | 184 | 0.688756 |
4c28baed59ac5a77b405f356eff00bcab5e6c48c | 2,438 | py | Python | src/foreign_if/python/UT/src/dt/test_006.py | XpressAI/frovedis | bda0f2c688fb832671c5b542dd8df1c9657642ff | [
"BSD-2-Clause"
] | 63 | 2018-06-21T14:11:59.000Z | 2022-03-30T11:24:36.000Z | src/foreign_if/python/UT/src/dt/test_006.py | XpressAI/frovedis | bda0f2c688fb832671c5b542dd8df1c9657642ff | [
"BSD-2-Clause"
] | 5 | 2018-09-22T14:01:53.000Z | 2021-12-27T16:11:05.000Z | src/foreign_if/python/UT/src/dt/test_006.py | XpressAI/frovedis | bda0f2c688fb832671c5b542dd8df1c9657642ff | [
"BSD-2-Clause"
] | 12 | 2018-08-23T15:59:44.000Z | 2022-02-20T06:47:22.000Z | #!/usr/bin/env python
from frovedis.exrpc.server import FrovedisServer
from frovedis.mllib.tree import DecisionTreeClassifier
from frovedis.mllib.tree import DecisionTreeRegressor
import sys
import numpy as np
import pandas as pd
# Objective : When only single element predict
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if (argc < 2):
print 'Please give frovedis_server calling command as the first argument \n(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")'
quit()
FrovedisServer.initialize(argvs[1])
mat = np.asmatrix([[1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0]],dtype=np.float64)
lbl = np.array([0.0, 1.0, 1.0, 0.0],dtype=np.float64)
# fitting input matrix and label on DecisionTree Classifier object
dtc1 = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=None,
min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features=None, random_state=None, max_leaf_nodes=1,
min_impurity_decrease=0.0,
class_weight=None, presort=False, verbose = 0)
dtc = dtc1.fit(mat,lbl)
dtc.debug_print()
# predicting on train model
print("predicting on DecisionTree classifier model: ")
dtcm = dtc.predict(mat[2:3])
print dtcm
print("Accuracy score for predicted DecisionTree Classifier model")
print dtc.score(mat,lbl)
# fitting input matrix and label on DecisionTree Regressor object
dtr1 = DecisionTreeRegressor(criterion='mse', splitter='best',
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None, random_state=None,
max_leaf_nodes=1, min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False, verbose = 0)
lbl1 = np.array([1.2,0.3,1.1,1.9])
dtr = dtr1.fit(mat,lbl1)
dtr.debug_print()
# predicting on train model
print("predicting on DecisionTree Regressor model: ")
dtrm = dtr.predict(mat[2:3])
print dtrm
print("Root mean square for predicted DecisionTree Regressor model")
print dtr.score(mat,lbl1)
if (lbl[2] == dtcm) and (lbl1[2] == dtrm):
print("Status: Passed")
else:
print("Status: Failed")
#clean-up
dtc.release()
dtr.release()
FrovedisServer.shut_down()
| 34.338028 | 142 | 0.685808 |
5beeacd74eb3e3c0a362d8c3eed734d2c62b9c04 | 13,381 | py | Python | flink-python/pyflink/datastream/__init__.py | huyuanfeng2018/flink | b3a9dcbd65719c742fe4907ec17de396b188d378 | [
"Apache-2.0"
] | null | null | null | flink-python/pyflink/datastream/__init__.py | huyuanfeng2018/flink | b3a9dcbd65719c742fe4907ec17de396b188d378 | [
"Apache-2.0"
] | null | null | null | flink-python/pyflink/datastream/__init__.py | huyuanfeng2018/flink | b3a9dcbd65719c742fe4907ec17de396b188d378 | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Entry point classes of Flink DataStream API:
- :class:`StreamExecutionEnvironment`:
The context in which a streaming program is executed.
- :class:`DataStream`:
Represents a stream of elements of the same type. A DataStream can be transformed
into another DataStream by applying a transformation.
- :class:`KeyedStream`:
Represents a :class:`DataStream` where elements are partitioned by key using a
provided KeySelector.
- :class:`WindowedStream`:
Represents a data stream where elements are grouped by key, and for each
key, the stream of elements is split into windows based on a WindowAssigner. Window emission
is triggered based on a Trigger.
- :class:`ConnectedStreams`:
Represent two connected streams of (possibly) different data types. Connected
streams are useful for cases where operations on one stream directly affect the operations on
the other stream, usually via shared state between the streams.
Functions used to transform a :class:`DataStream` into another :class:`DataStream`:
- :class:`MapFunction`:
Performs a map transformation of a :class:`DataStream` at element wise.
- :class:`CoMapFunction`:
Performs a map transformation over two connected streams.
- :class:`FlatMapFunction`:
Performs a flatmap transformation of a :class:`DataStream` which produces zero, one, or more
elements for each input element.
- :class:`CoFlatMapFunction`:
Performs a flatmap transformation over two connected streams.
- :class:`FilterFunction`:
A filter function is a predicate applied individually to each record.
- :class:`ReduceFunction`:
Combines groups of elements to a single value.
- :class:`ProcessFunction`:
Similar to :class:`FlatMapFunction`, except that it could access the current timestamp and
watermark in :class:`ProcessFunction`.
- :class:`KeyedProcessFunction`:
Similar to :class:`ProcessFunction`, except that it was applied to a :class:`KeyedStream` and
could register event-time and processing-time timers.
- :class:`CoProcessFunction`:
Similar to :class:`CoFlatMapFunction`, except that it could access the current timestamp and
watermark in :class:`CoProcessFunction`.
- :class:`KeyedCoProcessFunction`:
Similar to :class:`CoProcessFunction`, except that it was applied to a keyed
:class:`ConnectedStreams` and could register event-time and processing-time timers.
- :class:`WindowFunction`:
Base interface for functions that are evaluated over keyed (grouped) windows.
- :class:`ProcessWindowFunction`:
Similar to :class:`WindowFunction`, except that it could access a context for retrieving extra
information such as the current timestamp, the watermark, etc.
- :class:`AggregateFunction`:
Base class for a user-defined aggregate function.
- :class:`RuntimeContext`:
Contains information about the context in which functions are executed. Each
parallel instance of the function will have a context through which it can access static
contextual information (such as the current parallelism), etc.
Classes to define window:
- :class:`Window`:
A grouping of elements into finite buckets.
- :class:`TimeWindow`:
A grouping of elements according to a time interval from start (inclusive) to end (exclusive).
- :class:`CountWindow`:
A grouping of elements according to element count from start (inclusive) to end (exclusive).
- :class:`WindowAssigner`:
Assigns zero or more :class:`Window` to an element.
- :class:`MergingWindowAssigner`:
A :class:`WindowAssigner` that can merge windows.
- :class:`TriggerResult`:
Result type for trigger methods. This determines what happens with the window, for example
whether the window function should be called, or the window should be discarded.
- :class:`Trigger`:
Determines when a pane of a window should be evaluated to emit the results for that
part of the window.
Classes to define the behavior of checkpoint and state backend:
- :class:`CheckpointingMode`:
Defines what consistency guarantees the system gives in the presence of failures.
- :class:`CheckpointConfig`:
Configuration that captures all checkpointing related settings.
- :class:`StateBackend`:
Base class of the state backends which define how the state of a streaming application is
stored locally within the cluster. Different state backends store their state in different
fashions, and use different data structures to hold the state of a running application.
- :class:`HashMapStateBackend`:
Holds the working state in the memory (JVM heap) of the TaskManagers and
checkpoints based on the configured :class:`CheckpointStorage`.
- :class:`EmbeddedRocksDBStateBackend`:
Stores its state in an embedded `RocksDB` instance. This state backend can store very large
state that exceeds memory and spills to local disk.
- :class:`CustomStateBackend`:
A wrapper of customized java state backend.
- :class:`JobManagerCheckpointStorage`:
Checkpoints state directly to the JobManager's memory (hence the name), but savepoints will
be persisted to a file system.
- :class:`FileSystemCheckpointStorage`:
Checkpoints state as files to a file system. Each checkpoint individually will store all its
files in a subdirectory that includes the checkpoint number, such as
`hdfs://namenode:port/flink-checkpoints/chk-17/`.
- :class:`CustomCheckpointStorage`:
A wrapper of customized java checkpoint storage.
Classes for state operations:
- :class:`state.ValueState`:
Interface for partitioned single-value state. The value can be retrieved or updated.
- :class:`state.ListState`:
Interface for partitioned list state in Operations. The state is accessed and modified by
user functions, and checkpointed consistently by the system as part of the distributed
snapshots.
- :class:`state.MapState`:
Interface for partitioned key-value state. The key-value pair can be added, updated and
retrieved.
- :class:`state.ReducingState`:
Interface for reducing state. Elements can be added to the state, they will be combined using
a :class:`ReduceFunction`. The current state can be inspected.
- :class:`state.AggregatingState`:
Interface for aggregating state, based on an :class:`AggregateFunction`. Elements that are
added to this type of state will be eagerly pre-aggregated using a given AggregateFunction.
- :class:`state.StateTtlConfig`:
Configuration of state TTL logic.
Classes to define source & sink:
- :class:`connectors.FlinkKafkaConsumer`:
A streaming data source that pulls a parallel data stream from Apache Kafka.
- :class:`connectors.FlinkKafkaProducer`:
A streaming data sink to produce data into a Kafka topic.
- :class:`connectors.FileSource`:
A unified data source that reads files - both in batch and in streaming mode.
This source supports all (distributed) file systems and object stores that can be accessed via
the Flink's FileSystem class.
- :class:`connectors.FileSink`:
A unified sink that emits its input elements to FileSystem files within buckets. This
sink achieves exactly-once semantics for both BATCH and STREAMING.
- :class:`connectors.NumberSequenceSource`:
A data source that produces a sequence of numbers (longs). This source is useful for testing
and for cases that just need a stream of N events of any kind.
- :class:`connectors.JdbcSink`:
A data sink to produce data into an external storage using JDBC.
- :class:`connectors.StreamingFileSink`:
Sink that emits its input elements to files within buckets. This is integrated with the
checkpointing mechanism to provide exactly once semantics.
- :class:`connectors.RMQSource`:
A streaming data source that pulls a parallel data stream from RabbitMQ.
- :class:`connectors.RMQSink`:
A Sink for publishing data into RabbitMQ.
Other important classes:
- :class:`TimeCharacteristic`:
Defines how the system determines time for time-dependent order and operations that depend
on time (such as time windows).
- :class:`TimeDomain`:
Specifies whether a firing timer is based on event time or processing time.
- :class:`KeySelector`:
The extractor takes an object and returns the deterministic key for that object.
- :class:`Partitioner`:
Function to implement a custom partition assignment for keys.
- :class:`SinkFunction`:
Interface for implementing user defined sink functionality.
- :class:`SourceFunction`:
Interface for implementing user defined source functionality.
- :class:`OutputTag`:
Tag with a name and type for identifying side output of an operator
"""
from pyflink.datastream.checkpoint_config import CheckpointConfig, ExternalizedCheckpointCleanup
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.datastream.data_stream import DataStream, KeyedStream, WindowedStream, \
ConnectedStreams, DataStreamSink
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.functions import (MapFunction, CoMapFunction, FlatMapFunction,
CoFlatMapFunction, ReduceFunction, RuntimeContext,
KeySelector, FilterFunction, Partitioner, SourceFunction,
SinkFunction, CoProcessFunction, KeyedProcessFunction,
KeyedCoProcessFunction, AggregateFunction, WindowFunction,
ProcessWindowFunction)
from pyflink.datastream.slot_sharing_group import SlotSharingGroup, MemorySize
from pyflink.datastream.state_backend import (StateBackend, MemoryStateBackend, FsStateBackend,
RocksDBStateBackend, CustomStateBackend,
PredefinedOptions, HashMapStateBackend,
EmbeddedRocksDBStateBackend)
from pyflink.datastream.checkpoint_storage import (CheckpointStorage, JobManagerCheckpointStorage,
FileSystemCheckpointStorage,
CustomCheckpointStorage)
from pyflink.datastream.stream_execution_environment import StreamExecutionEnvironment
from pyflink.datastream.time_characteristic import TimeCharacteristic
from pyflink.datastream.time_domain import TimeDomain
from pyflink.datastream.functions import ProcessFunction
from pyflink.datastream.timerservice import TimerService
from pyflink.datastream.window import Window, TimeWindow, CountWindow, WindowAssigner, \
MergingWindowAssigner, TriggerResult, Trigger
from pyflink.datastream.output_tag import OutputTag
__all__ = [
'StreamExecutionEnvironment',
'DataStream',
'KeyedStream',
'WindowedStream',
'ConnectedStreams',
'DataStreamSink',
'MapFunction',
'CoMapFunction',
'FlatMapFunction',
'CoFlatMapFunction',
'ReduceFunction',
'FilterFunction',
'ProcessFunction',
'KeyedProcessFunction',
'CoProcessFunction',
'KeyedCoProcessFunction',
'WindowFunction',
'ProcessWindowFunction',
'AggregateFunction',
'RuntimeContext',
'TimerService',
'CheckpointingMode',
'CheckpointConfig',
'ExternalizedCheckpointCleanup',
'StateBackend',
'HashMapStateBackend',
'EmbeddedRocksDBStateBackend',
'CustomStateBackend',
'MemoryStateBackend',
'RocksDBStateBackend',
'FsStateBackend',
'PredefinedOptions',
'CheckpointStorage',
'JobManagerCheckpointStorage',
'FileSystemCheckpointStorage',
'CustomCheckpointStorage',
'RuntimeExecutionMode',
'Window',
'TimeWindow',
'CountWindow',
'WindowAssigner',
'MergingWindowAssigner',
'TriggerResult',
'Trigger',
'TimeCharacteristic',
'TimeDomain',
'KeySelector',
'Partitioner',
'SourceFunction',
'SinkFunction',
'SlotSharingGroup',
'MemorySize',
'OutputTag'
]
| 49.194853 | 100 | 0.709439 |
cc306bf4ec8a08308ac109b488c9a68e4896b143 | 1,575 | py | Python | codechain/rpc/devel.py | foriequal0/codechain-primitives-python | 8e020196be7beeb327b4e540aa2dfc193f34e40e | [
"ISC"
] | 11 | 2018-08-22T09:42:54.000Z | 2019-11-30T07:19:42.000Z | codechain/rpc/devel.py | foriequal0/codechain-primitives-python | 8e020196be7beeb327b4e540aa2dfc193f34e40e | [
"ISC"
] | 38 | 2019-07-22T06:13:39.000Z | 2021-06-02T00:43:21.000Z | codechain/rpc/devel.py | foriequal0/codechain-primitives-python | 8e020196be7beeb327b4e540aa2dfc193f34e40e | [
"ISC"
] | 5 | 2019-07-24T19:13:00.000Z | 2020-03-18T12:13:27.000Z | from typing import Union
from jsonrpcclient.requests import Request
class Devel:
def __init__(self, client):
self.client = client
def get_state_trie_keys(self, offset: int, limit: int):
payload = Request("devel_getStateTrieKeys", offset, limit)
response = self.client.send(payload)
return response.data.result
def get_state_trie_value(self, key: str):
payload = Request("devel_getStateTrieValue", key)
response = self.client.send(payload)
return response.data.result
def start_sealing(self):
payload = Request("devel_startSealing")
response = self.client.send(payload)
return response.data.result
def stop_sealing(self):
payload = Request("devel_stopSealing")
response = self.client.send(payload)
return response.data.result
def get_block_sync_peers(self):
payload = Request("devel_getBlockSyncPeers")
response = self.client.send(payload)
return response.data.result
def test_tps(self, count: int, seed: int, option: str):
if (
option != "payOnly"
and option != "transferSingle"
and option != "transferMultiple"
and option != "payOrTransfer"
):
raise ValueError(
f"option should be one of payOnly | transferSingle | transferMultiple | payOrTransfer"
)
payload = Request("devel_testTPS", [count, seed, option])
response = self.client.send(payload)
return response.data.result
| 28.636364 | 102 | 0.63746 |
eea95aa529ae7c989528fb319c2cf791290a1de0 | 986 | py | Python | gethands.py | iamSwaps/Hand-Track-Sketcher | 24de737b7d07715573db04eb277e5c24273f8a1b | [
"MIT"
] | null | null | null | gethands.py | iamSwaps/Hand-Track-Sketcher | 24de737b7d07715573db04eb277e5c24273f8a1b | [
"MIT"
] | null | null | null | gethands.py | iamSwaps/Hand-Track-Sketcher | 24de737b7d07715573db04eb277e5c24273f8a1b | [
"MIT"
] | null | null | null | import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)
def gethan(camwidth, camheight, camera):
success,frame = camera.read()
if not success:
print("Alert ! Camera disconnected")
exit()
else:
frame=cv2.flip(frame, 1)
#cv2.imshow('image',frame)
results = hands.process(frame)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks( frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
ind=hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP]
pixel=mp_drawing._normalized_to_pixel_coordinates(ind.x, ind.y, camwidth, camheight)
return pixel
else:
return (0, 0)
| 32.866667 | 100 | 0.624746 |
f1184f26693b117149b892e0a1283dfc43ff6c0d | 1,712 | py | Python | misc/perturb_ic.py | wy2136/wython | 0eaa9db335d57052806ae956afe6a34705407628 | [
"MIT"
] | 1 | 2022-03-21T21:24:40.000Z | 2022-03-21T21:24:40.000Z | misc/perturb_ic.py | wy2136/wython | 0eaa9db335d57052806ae956afe6a34705407628 | [
"MIT"
] | null | null | null | misc/perturb_ic.py | wy2136/wython | 0eaa9db335d57052806ae956afe6a34705407628 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Wenchang Yang (wenchang@princeton.edu)
# Mon Feb 21 13:19:58 EST 2022
# perturb IC by adding small "random" numbers to the temperature field of tile1
if __name__ == '__main__':
import sys
from misc.timer import Timer
tt = Timer('start ' + ' '.join(sys.argv))
import sys, os.path, os, glob, datetime
import xarray as xr, numpy as np, pandas as pd, matplotlib.pyplot as plt
#more imports
#
if __name__ == '__main__':
tt.check('end import')
#
#start from here
#ifile = 'fv_core.res.tile1.nc'
#ofile = 'tmp.nc'
#perturb_num = 0 # usually given as ensemble member # so that each ensemble member has unique perturbation dependent on this number
ifile, ofile, perturb_num = sys.argv[1:4]
perturb_num = float(perturb_num)
ds = xr.open_dataset(ifile)
ds['T'] += np.sin(ds['T'] + perturb_num) * 1e-10 #temperature field perturbation
if 'checksum' in ds['T'].attrs: #delete the checksum attrs so that no checksum in the fv_io_restart; otherwise, model fails in checksum
del ds['T'].attrs['checksum']
encoding = {vname:{'_FillValue': None} for vname in list(ds.variables)}
ds.to_netcdf(ofile, encoding=encoding)
print('[saved]:', ofile)
if __name__ == '__main__':
from wyconfig import * #my plot settings
ds0 = xr.open_dataset(ifile)
ds = xr.open_dataset(ofile)
da = ds['T'] - ds0['T']
da.isel(Time=0, zaxis_1=0).plot(robust=True)
#savefig
if len(sys.argv)>1 and 'savefig' in sys.argv[1:]:
figname = __file__.replace('.py', f'.png')
if 'overwritefig' in sys.argv[1:]:
wysavefig(figname, overwritefig=True)
else:
wysavefig(figname)
tt.check(f'**Done**')
print()
plt.show()
| 33.568627 | 135 | 0.668224 |
8e33f038084388860b412cd07aa0af34506fa566 | 6,096 | py | Python | src/streetscope/streetscope_visualization.py | ddeangelis/predictive-modeling-tech | 986163ac0d94cc2bdd8b66ab4da631cea76b51f5 | [
"MIT"
] | 1 | 2020-07-19T08:58:30.000Z | 2020-07-19T08:58:30.000Z | src/streetscope/streetscope_visualization.py | ddeangelis/predictive-modeling-tech | 986163ac0d94cc2bdd8b66ab4da631cea76b51f5 | [
"MIT"
] | null | null | null | src/streetscope/streetscope_visualization.py | ddeangelis/predictive-modeling-tech | 986163ac0d94cc2bdd8b66ab4da631cea76b51f5 | [
"MIT"
] | null | null | null | '''
File name: streetscope_visualization.py
Author: Tyche Analytics Co.
'''
from streetscope import StreetScope
import pandas as pd
from collections import Counter, defaultdict
from matplotlib import pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsRegressor as KNN
from tqdm import *
def load_amenity_df():
a_df = pd.read_csv("amenity_coords.csv")
a_df.columns = [col.strip() for col in a_df.columns]
return a_df
def produce_visualization():
N = 10000
a_df = load_amenity_df()
lat_lons = a_df[['lat', 'lon']].sample(N)
streetscope = STREETSCOPE()
counts = []
for i, (lat, lon) in lat_lons.iterrows():
print(lat, lon)
count = streetscope.lookup_lat_lon(lat, lon)
count = defaultdict(int, {k.lower():v for (k, v) in count.items()})
counts.append(count)
all_counts = defaultdict(int)
for count in counts:
for k, v in count.items():
all_counts[k] += v
print("total amenity types:", len(all_counts))
sorted_counts = sorted(all_counts.items(), key=lambda kv:kv[1], reverse=True)
sorted_types = [k for (k,v) in sorted_counts]
plt.plot(sorted(all_counts.values(), reverse=True))
plt.loglog()
plt.xlabel("Amenity Rank", fontsize='large')
plt.ylabel("Amenity Count", fontsize='large')
a_types = ["place_of_worship", "school", "bar", "motorcycle_parking", "tattoo"]
for a_type in a_types:
x, y = (sorted_types.index(a_type), all_counts[a_type])
plt.plot(x, y, 'o', color='b')
plt.annotate(a_type, (x*1.1, y*1.1))
plt.title("Amenity Count/Rank Distribution, with Selected Examples", fontsize='large')
plt.savefig("amenity_frequency_rank_plot.png", dpi=300)
plt.close()
top_amenities = sorted_types[:100]
M = np.log10(np.matrix([[count[atype] for atype in top_amenities] for count in counts])+1)
pca = PCA(n_components=10)
X = pca.fit_transform(M)
plt.plot(pca.explained_variance_ratio_[:9], color='b', label='Variance')
plt.plot(range(8, 10), pca.explained_variance_ratio_[8:10], linestyle='--', color='b')
plt.plot(np.cumsum(pca.explained_variance_ratio_[:9]), color='g',
label='Cumulative variance')
s = sum(pca.explained_variance_ratio_[:8])
plt.plot(range(8, 10), s + np.cumsum(pca.explained_variance_ratio_[8:10]),
linestyle='--', color='g')
#plt.semilogy()
plt.ylim(0, 1)
plt.xlabel("Principal Components (Truncated)", fontsize='large')
plt.ylabel("% Variance Explained", fontsize='large')
plt.legend()
plt.title("Explained Variance by Principle Component")
plt.savefig("pca_explained_variance.png", dpi=300)
plt.close()
plt.scatter(X[:,0], X[:,1], s=1)
for i, amen in enumerate(top_amenities[:5]):
x, y = pca.components_[:2,i]
zoom = 10
x_offset = 0.25
plt.arrow(0, 0, x*zoom, y*zoom, color='r', head_width=0.10)
plt.annotate(amen, (x*zoom + x_offset, y*zoom), fontsize='large')
plt.xlabel("First Principal Component", fontsize='large')
plt.ylabel("Second Principal Component", fontsize='large')
plt.title("Locations in Reduced Amenity Space with Selected Loadings", fontsize='large')
plt.xlim(-2, 10)
plt.ylim(-3.5, 10)
plt.savefig("reduced_amenity_space.png", dpi=300)
# loss analysis
lat_lons, addresses = analyze_addresses()
loc_amenities = get_amenities(lat_lons, streetscope)
locs = [loc for loc, a in loc_amenities.items() if a
and all(pd.notnull(df[df.Location==loc].LossCost2))]
loc_amenities = {loc:counts for loc, counts in loc_amenities.items() if loc in locs}
loc_df = df[df.Location.isin(locs)]
loc_ys = np.array([loc_df[loc_df.Location == loc].LossCost2.mean() for loc in locs])
loc_M = np.log10(np.array([[loc_amenities[loc][atype] for atype in top_amenities]
for loc in locs]) + 1)
loc_X = pca.transform(loc_M)
plt.scatter(loc_X[:,0], loc_X[:,1], c=loc_ys, cmap='jet')
for i, amen in enumerate(top_amenities[:5]):
x, y = pca.components_[:2,i]
zoom = 10
x_offset = 0.25
plt.arrow(0, 0, x*zoom, y*zoom, color='r', head_width=0.10)
plt.annotate(amen, (x*zoom + x_offset, y*zoom), fontsize='large')
plt.xlabel("First Principal Component", fontsize='large')
plt.ylabel("Second Principal Component", fontsize='large')
plt.title("MCD Locations in Reduced Amenity Space", fontsize='large')
plt.xlim(-2, 10)
plt.ylim(-3.5, 10)
plt.colorbar(label='LossCost2')
plt.savefig("reduced_amenity_space_w_locs.png", dpi=300)
plt.close()
knn = KNN(n_neighbors=50)
knn.fit(loc_X[:,:2], loc_ys)
x = loc_X[:,0]
y = loc_X[:,1]
z = knn.predict(loc_X[:,:2])
n = 30j
extent = (min(x), max(x), min(y), max(y))
xs,ys = np.mgrid[extent[0]:extent[1]:n, extent[2]:extent[3]:n]
resampled = griddata(x, y, z, xs, ys, interp='linear')
plt.imshow(resampled.T, extent=extent)
cbar=plt.colorbar(label='LossCost2')
for i, amen in enumerate(top_amenities[:5]):
x, y = pca.components_[:2,i]
zoom = 10
x_offset = 0.25
plt.arrow(0, 0, x*zoom, y*zoom, color='r', head_width=0.10)
plt.annotate(amen, (x*zoom + x_offset, y*zoom), fontsize='large')
plt.xlabel("First Principal Component", fontsize='large')
plt.ylabel("Second Principal Component", fontsize='large')
plt.title("K-Nearest Neighbors Regression", fontsize='large')
plt.xlim(-2, 10)
plt.ylim(-3.5, 10)
plt.savefig("knn.png", dpi=300)
plt.close()
from matplotlib.mlab import griddata
def extract_isocontour(x, y, z):
N = 30j
extent = (min(x), max(x), min(y), max(y))
xs,ys = np.mgrid[extent[0]:extent[1]:N, extent[2]:extent[3]:N]
resampled = griddata(x, y, z, xs, ys, interp='linear')
plt.imshow(resampled.T, extent=extent)
plt.xlim([x.min(),x.max()])
plt.ylim([y.min(),y.max()])
cbar=plt.colorbar()
plt.show()
| 38.582278 | 94 | 0.640748 |
fea4b48816337819425e0e84e6e5d6f28dcc283f | 1,815 | py | Python | ask-smapi-model/ask_smapi_model/v1/skill/alexa_hosted/hosted_skill_runtime.py | rivamarco/alexa-apis-for-python | 62e3a9057a26003e836fa09aa12a2e1c8b62d6e0 | [
"Apache-2.0"
] | 2 | 2021-10-30T06:52:48.000Z | 2021-11-16T12:34:16.000Z | ask-smapi-model/ask_smapi_model/v1/skill/alexa_hosted/hosted_skill_runtime.py | Shreyas-vgr/alexa-apis-for-python | 74ea73b3b6a03fd9cb735fb8c1fb2bd961faab54 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/alexa_hosted/hosted_skill_runtime.py | Shreyas-vgr/alexa-apis-for-python | 74ea73b3b6a03fd9cb735fb8c1fb2bd961faab54 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class HostedSkillRuntime(Enum):
"""
Hosted skill lambda runtime
Allowed enum values: [nodejs8_10, python3_7]
"""
nodejs8_10 = "nodejs8.10"
python3_7 = "python3.7"
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {self.name: self.value}
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.value)
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, HostedSkillRuntime):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 27.089552 | 96 | 0.649587 |
7e27f971fcf060902e1f5ae70668020210d77c18 | 35 | py | Python | lms/Domain/Teachers/__init__.py | bobrovskayaa/LMS | adac324977f5255d15c0ebfeef69995bd9dc8f26 | [
"MIT"
] | null | null | null | lms/Domain/Teachers/__init__.py | bobrovskayaa/LMS | adac324977f5255d15c0ebfeef69995bd9dc8f26 | [
"MIT"
] | 1 | 2018-12-21T21:09:30.000Z | 2018-12-21T21:09:30.000Z | lms/Domain/Teachers/__init__.py | bobrovskayaa/LMS | adac324977f5255d15c0ebfeef69995bd9dc8f26 | [
"MIT"
] | 3 | 2018-12-18T13:37:24.000Z | 2019-02-12T17:33:39.000Z | from .teacher_model import Teacher
| 17.5 | 34 | 0.857143 |
38e363bd0dd712e4bf391ca804575302246610ce | 278 | py | Python | ABC/abc001-abc050/abc024/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc001-abc050/abc024/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc001-abc050/abc024/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# AtCoder Beginner Contest
if __name__ == '__main__':
a, b, c, k = list(map(int, input().split()))
s, t = list(map(int, input().split()))
total = a * s + b * t
if s + t >= k:
total -= c * (s + t)
print(total)
| 19.857143 | 49 | 0.460432 |
84f49540f2ba78a8326878d29c2bfe45d8b99ca5 | 2,066 | py | Python | devicely/muse.py | DigitalBiomarkerDiscoveryPipeline/devicely | 9773fead4d3969a32ca2760b8db4ae728c4d5d50 | [
"MIT"
] | 13 | 2020-07-13T22:26:25.000Z | 2022-03-18T17:40:56.000Z | devicely/muse.py | DigitalBiomarkerDiscoveryPipeline/devicely | 9773fead4d3969a32ca2760b8db4ae728c4d5d50 | [
"MIT"
] | 26 | 2020-11-29T11:11:09.000Z | 2022-01-12T11:34:19.000Z | devicely/muse.py | DigitalBiomarkerDiscoveryPipeline/devicely | 9773fead4d3969a32ca2760b8db4ae728c4d5d50 | [
"MIT"
] | 5 | 2021-07-26T11:01:38.000Z | 2022-02-22T18:23:57.000Z | """
Module to process Muse data from the mind monitor application
"""
import os
import random
import numpy as np
import pandas as pd
class MuseReader:
"""
Parses, timeshifts and writes data generated by the Muse S headband using the Mind Monitor application.
Attributes
----------
data : DataFrame
dataframe of the read data
"""
def __init__(self, path):
"""
Parse the csv file located in the specified directory into a dataframe.
Parameters
----------
path : str
path to the csv file
"""
self.data = pd.read_csv(path, parse_dates=['TimeStamp'], index_col='TimeStamp')
def write(self, path):
"""
Writes the dataframe back into a csv file.
Parameters
----------
path : str
path to the write file.
"""
self.data.reset_index().to_csv(path, index=False)
def timeshift(self, shift='random'):
"""
Shifts the index column 'TimeStamp'.
Parameters
----------
shift : None/'random', pd.Timestamp or pd.Timedelta
If shift is not specified, shifts the data by a random time interval
between one month and two years to the past.
If shift is a timdelta, shifts the data by that timedelta.
If shift is a timestamp, shifts the data such that the earliest entry
is at that timestamp and the remaining values keep the same
time distance to the first entry.
"""
if shift == 'random':
one_month = pd.Timedelta('- 30 days').value
two_years = pd.Timedelta('- 730 days').value
random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))
self.timeshift(random_timedelta)
if isinstance(shift, pd.Timestamp):
timedeltas = self.data.index - self.data.index[0]
self.data.index = shift + timedeltas
if isinstance(shift, pd.Timedelta):
self.data.index += shift
| 27.918919 | 107 | 0.590029 |
924583d8796ebee51a455942c936b981040f84cd | 31,604 | py | Python | burger/toppings/blockstates.py | extremeheat/Burger | fdff962aeb1aa0351fc222e005af3fa9248345fb | [
"MIT"
] | null | null | null | burger/toppings/blockstates.py | extremeheat/Burger | fdff962aeb1aa0351fc222e005af3fa9248345fb | [
"MIT"
] | null | null | null | burger/toppings/blockstates.py | extremeheat/Burger | fdff962aeb1aa0351fc222e005af3fa9248345fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
from .topping import Topping
from jawa.constants import *
from jawa.util.descriptor import method_descriptor, field_descriptor
import traceback
import six
import six.moves
# EnumFacing.Plane. Needed because this is also a predicate, which is used
# to get certain facings
class Plane:
def __init__(self, directions):
self.directions = directions
HORIZONTAL = Plane(["NORTH", "EAST", "SOUTH", "WEST"])
VERTICAL = Plane(["UP", "DOWN"])
PLANES = { "HORIZONTAL": HORIZONTAL, "VERTICAL": VERTICAL }
# Classes that represent predicates in various versions
PREDICATE_CLASSES = ("com/google/common/base/Predicate", "java/util/function/Predicate")
class BlockStateTopping(Topping):
"""Gets tile entity (block entity) types."""
PROVIDES = [
"blocks.states"
]
DEPENDS = [
"blocks",
"version.data",
"version.is_flattened",
"identify.blockstatecontainer",
"identify.sounds.list",
"identify.enumfacing.plane"
]
@staticmethod
def act(aggregate, classloader, verbose=False):
if "blockstatecontainer" not in aggregate["classes"]:
if verbose:
print("blockstatecontainer not found; skipping blockstates")
return
is_flattened = aggregate["version"]["is_flattened"]
blockstatecontainer = aggregate["classes"]["blockstatecontainer"]
block_cf = classloader[aggregate["classes"]["block.superclass"]]
plane = aggregate["classes"]["enumfacing.plane"]
# Part 1: build up a list of property fields, by class. Also build a set of property types.
# 18w19a and above use a builder to register states; before that they just directly returned a container.
# Note that blockstatecontainer is the builder class in 18w19a.
is_18w19a = aggregate["version"]["data"] >= 1484
is_protected = lambda m: m.access_flags.acc_protected
if is_18w19a:
base_method = block_cf.methods.find_one(returns="V", args="L" + blockstatecontainer + ";", f=is_protected)
else:
base_method = block_cf.methods.find_one(returns="L" + blockstatecontainer + ";", args="", f=is_protected)
def matches(other_method):
return (other_method.name.value == base_method.name.value and
other_method.descriptor.value == base_method.descriptor.value)
_property_types = set()
# Properties that are used by each block class
properties_by_class = {}
def process_class(name):
"""
Gets the properties for the given block class, checking the parent
class if none are defined. Returns the properties, and also adds
them to properties_by_class
"""
if name in properties_by_class:
# Caching - avoid reading the same class multiple times
return properties_by_class[name]
cf = classloader[name]
method = cf.methods.find_one(f=matches)
if not method:
properties = process_class(cf.super_.name.value)
properties_by_class[name] = properties
return properties
properties = None
if_pos = None
stack = []
for ins in method.code.disassemble():
# This could _almost_ just be checking for getstatic, but
# brewing stands use an array of properties as the field,
# so we need some stupid extra logic.
if ins == "new":
assert not is_18w19a # In 18w19a this should be a parameter
const = ins.operands[0]
type_name = const.name.value
assert type_name == blockstatecontainer
stack.append(object())
elif ins == "aload" and ins.operands[0].value == 1:
assert is_18w19a # The parameter is only used in 18w19a and above
stack.append(object())
elif ins in ("sipush", "bipush"):
stack.append(ins.operands[0].value)
elif ins in ("anewarray", "newarray"):
length = stack.pop()
val = [None] * length
stack.append(val)
elif ins == "getstatic":
const = ins.operands[0]
prop = {
"field_name": const.name_and_type.name.value
}
desc = field_descriptor(const.name_and_type.descriptor.value)
_property_types.add(desc.name)
stack.append(prop)
elif ins == "aaload":
index = stack.pop()
array = stack.pop()
prop = array.copy()
prop["array_index"] = index
stack.append(prop)
elif ins == "aastore":
value = stack.pop()
index = stack.pop()
array = stack.pop()
array[index] = value
elif ins == "dup":
stack.append(stack[-1])
elif ins == "invokespecial":
const = ins.operands[0]
assert const.name_and_type.name == "<init>"
desc = method_descriptor(const.name_and_type.descriptor.value)
assert len(desc.args) == 2
# Normally this constructor call would return nothing, but
# in this case we'd rather remove the object it's called on
# and keep the properties array (its parameter)
arg = stack.pop()
stack.pop() # Block
stack.pop() # Invocation target
stack.append(arg)
elif ins == "invokevirtual":
# Two possibilities (both only present pre-flattening):
# 1. It's isDouble() for a slab. Two different sets of
# properties in that case.
# 2. It's getTypeProperty() for flowers. Only one
# set of properties, but other hacking is needed.
# We can differentiate these cases based off of the return
# type.
# There is a third option post 18w19a:
# 3. It's calling the state container's register method.
# We can check this just by the type.
const = ins.operands[0]
desc = method_descriptor(const.name_and_type.descriptor.value)
if const.class_.name == blockstatecontainer:
# Case 3.
assert properties == None
properties = stack.pop()
assert desc.returns.name == blockstatecontainer
# Don't pop anything, since we'd just pop and re-add the builder
elif desc.returns.name == "boolean":
# Case 2.
properties = [None, None]
stack.pop() # Target object
# XXX shouldn't something be returned here?
else:
# Case 1.
# Assume that the return type is the base interface
# for properties
stack.pop() # Target object
stack.append(None)
elif ins == "ifeq":
assert if_pos is None
if_pos = ins.pos + ins.operands[0].value
elif ins == "pop":
stack.pop()
elif ins == "areturn":
assert not is_18w19a # In 18w19a we don't return a container
if if_pos == None:
assert properties == None
properties = stack.pop()
else:
assert isinstance(properties, list)
index = 0 if ins.pos < if_pos else 1
assert properties[index] == None
properties[index] = stack.pop()
elif ins == "return":
assert is_18w19a # We only return void in 18w19a
elif ins == "aload":
assert ins.operands[0].value == 0 # Should be aload_0 (this)
stack.append(object())
elif verbose:
print("%s createBlockState contains unimplemented ins %s" % (name, ins))
if properties is None:
# If we never set properties, warn; however, this is normal for
# the base implementation in Block in 18w19a
if verbose and name != aggregate["classes"]["block.superclass"]:
print("Didn't find anything that set properties for %s" % name)
properties = []
properties_by_class[name] = properties
return properties
for block in six.itervalues(aggregate["blocks"]["block"]):
cls = block["class"]
try:
process_class(cls)
except:
if verbose:
print("Failed to process properties for %s (for %s)" % (cls, block["text_id"]))
traceback.print_exc()
properties_by_class[cls] = []
assert len(_property_types) == 4
property_types = {}
for type in _property_types:
cf = classloader[type]
if cf.super_.name.value in _property_types:
property_types[type] = "direction"
else:
attribute = cf.attributes.find_one(name='Signature')
signature = attribute.signature.value
# Somewhat ugly behavior until an actual parser is added for these
if "Enum" in signature:
property_types[type] = "enum"
elif "Integer" in signature:
property_types[type] = "int"
elif "Boolean" in signature:
property_types[type] = "bool"
elif verbose:
print("Unknown property type %s with signature %s" % (type, signature))
# Part 2: figure out what each field is.
is_enum_cache = {}
def is_enum(cls):
"""
Checks if the given class is an enum.
This needs to be recursive due to inner classes for enums.
"""
if cls in is_enum_cache:
return is_enum_cache[cls]
if cls not in classloader:
is_enum_cache[cls] = False
return False
cf = classloader[cls]
super = cf.super_.name.value
if super == "java/lang/Enum":
is_enum_cache[cls] = True
elif super == "java/lang/Object":
is_enum_cache[cls] = False
else:
is_enum_cache[cls] = is_enum(super)
return is_enum_cache[cls]
fields_by_class = {}
def find_field(cls, field_name):
"""
cls: name of the class
field_name: name of the field to find. If None, returns all fields
"""
if cls in fields_by_class:
if field_name is not None:
if field_name not in fields_by_class[cls] and verbose:
print("Requested field %s.%s but that wasn't found last time" % (cls, field_name))
return fields_by_class[cls][field_name]
else:
return fields_by_class[cls]
elif cls == aggregate["classes"].get("sounds.list"):
# If we already know what the sounds list class is, just ignore it
# as going through it would take a while for no reason
return object()
cf = classloader[cls]
fields_by_class[cls] = {}
super_name = cf.super_.name.value
if not super_name.startswith("java/lang"):
# Add fields from superclass
fields_by_class[cls].update(find_field(super_name, None))
init = cf.methods.find_one(name="<clinit>")
if not init:
if field_name is not None:
return fields_by_class[cls][field_name]
else:
return fields_by_class[cls]
stack = []
locals = {}
# After certain calls, we're no longer storing properties.
# But, we still want to assign values for remaining fields;
# go through and put None in, only looking at putstatic.
ignore_remaining = False
for ins in init.code.disassemble():
if ins == "putstatic":
const = ins.operands[0]
name = const.name_and_type.name.value
if ignore_remaining:
value = None
else:
value = stack.pop()
if isinstance(value, dict):
if "declared_in" not in value:
# If there's already a declared_in, this is a field
# loaded with getstatic, and we don't want to change
# the true location of it
value["declared_in"] = cls
if value["class"] == plane:
# Convert to an instance of Plane
# Now is the easiest time to do this, and for
# Plane itself it doesn't matter since it's never
# used on the stack
assert "enum_name" in value
assert value["enum_name"] in PLANES
value = PLANES[value["enum_name"]]
fields_by_class[cls][name] = value
elif ignore_remaining:
continue
elif ins == "getstatic":
const = ins.operands[0]
target = const.class_.name.value
type = field_descriptor(const.name_and_type.descriptor.value).name
name = const.name_and_type.name.value
if not target.startswith("java/"):
stack.append(find_field(target, name))
else:
stack.append(object())
elif ins in ("ldc", "ldc_w", "ldc2_w"):
const = ins.operands[0]
if isinstance(const, ConstantClass):
stack.append("%s.class" % const.name.value)
elif isinstance(const, String):
stack.append(const.string.value)
else:
stack.append(const.value)
elif ins.mnemonic.startswith("dconst"):
stack.append(float(ins.mnemonic[-1]))
elif ins in ("bipush", "sipush"):
stack.append(ins.operands[0].value)
elif ins == "aconst_null":
stack.append(None)
elif ins in ("anewarray", "newarray"):
length = stack.pop()
stack.append([None] * length)
elif ins in ("aaload", "iaload"):
index = stack.pop()
array = stack.pop()
prop = array[index].copy()
prop["array_index"] = index
stack.append(prop)
elif ins in ("aastore", "iastore"):
value = stack.pop()
index = stack.pop()
array = stack.pop()
array[index] = value
elif ins == "arraylength":
array = stack.pop()
stack.append(len(array))
elif ins == "dup":
stack.append(stack[-1])
elif ins == "invokedynamic":
# Try to get the class that's being created
const = ins.operands[0]
desc = method_descriptor(const.name_and_type.descriptor.value)
stack.append({"dynamic_class": desc.returns.name, "class": cls})
elif ins.mnemonic.startswith("invoke"):
const = ins.operands[0]
desc = method_descriptor(const.name_and_type.descriptor.value)
num_args = len(desc.args)
args = [stack.pop() for _ in six.moves.range(num_args)]
args.reverse()
if ins == "invokestatic":
if const.class_.name.value.startswith("com/google/"):
# Call to e.g. Maps.newHashMap, beyond what we
# care about
ignore_remaining = True
continue
obj = None
else:
obj = stack.pop()
if desc.returns.name in property_types:
prop = {
"class": desc.returns.name,
"type": property_types[desc.returns.name],
"args": args
}
stack.append(prop)
elif const.name_and_type.name == "<init>":
if obj["is_enum"]:
obj["enum_name"] = args[0]
obj["enum_ordinal"] = args[1]
else:
obj["args"] = args
elif const.name_and_type.name == "values":
# Enum values
fields = find_field(const.class_.name.value, None)
stack.append([fld for fld in fields
if isinstance(fld, dict) and fld["is_enum"]])
elif desc.returns.name != "void":
if isinstance(obj, Plane):
# One special case, where EnumFacing.Plane is used
# to get a list of directions
stack.append(obj.directions)
elif (isinstance(obj, dict) and obj["is_enum"] and
desc.returns.name == "int"):
# Assume it's the enum ordinal, even if it really
# isn't
stack.append(obj["enum_ordinal"])
else:
o = object()
stack.append(o)
elif ins in ("istore", "lstore", "fstore", "dstore", "astore"):
# Store other than array store
locals[ins.operands[0].value] = stack.pop()
elif ins in ("iload", "lload", "fload", "dload", "aload"):
# Load other than array load
stack.append(locals[ins.operands[0].value])
elif ins == "new":
const = ins.operands[0]
type_name = const.name.value
obj = {
"class": type_name,
"is_enum": is_enum(type_name)
}
stack.append(obj)
elif ins == "checkcast":
# We don't have type information, so no checking or casting
pass
elif ins == "return":
break
elif ins == "if_icmpge":
# Code in stairs that loops over state combinations for hitboxes
break
elif verbose:
print("%s initializer contains unimplemented ins %s" % (cls, ins))
if field_name is not None:
return fields_by_class[cls][field_name]
else:
return fields_by_class[cls]
# Part 3: convert those fields into actual well-formed properties.
# Property handlers.
def base_handle_property(prop):
field = prop["field"]
args = field["args"]
assert len(args) >= 1
assert isinstance(args[0], six.string_types)
ret = {
"type": field["type"],
"name": args[0],
"field_name": prop["field_name"]
}
if "array_index" in prop:
ret["array_index"] = prop["array_index"]
else:
# Unfortunately we don't have a declared_in field for arrays at this time
ret["declared_in"] = field["declared_in"]
return ret
def handle_boolean_property(prop):
ret = base_handle_property(prop)
assert len(prop["field"]["args"]) == 1
ret["num_values"] = 2
return ret
def handle_int_property(prop):
ret = base_handle_property(prop)
args = prop["field"]["args"]
assert len(args) == 3
assert isinstance(args[1], int)
assert isinstance(args[2], int)
ret["num_values"] = args[2] - args[1] + 1
ret["min"] = args[1]
ret["max"] = args[2]
return ret
def handle_enum_property(prop):
ret = base_handle_property(prop)
args = prop["field"]["args"]
assert len(args) in (2, 3)
assert isinstance(args[1], six.string_types)
assert args[1].endswith(".class") # Should be a class
class_name = args[1][:-len(".class")]
ret["enum_class"] = class_name
if len(args) == 2:
values = [c["enum_name"] for c
in six.itervalues(find_field(class_name, None))
if isinstance(c, dict) and c["is_enum"]]
elif isinstance(args[2], list):
values = [c["enum_name"] for c in args[2]]
elif isinstance(args[2], dict):
# Possibly a predicate (used for powered and activator rails)
if "dynamic_class" in args[2]:
predicate_type = args[2]["dynamic_class"]
predicate_class = args[2]["dynamic_class"]
else:
cf = classloader[args[2]["class"]]
if len(cf.interfaces) == 1:
predicate_type = cf.interfaces[0].name.value
predicate_class = args[2]["class"]
else:
if verbose:
print("Could not find predicate class for args %s and interfaces %s" % (args, cf.interfaces))
predicate_type = None
predicate_class = None
if predicate_type in PREDICATE_CLASSES:
ret["predicate"] = predicate_class
# Will be trimmed later
values = [c["enum_name"] for c
in six.itervalues(find_field(class_name, None))
if isinstance(c, dict) and c["is_enum"]]
elif verbose:
print("Unhandled args for %s" % prop)
values = []
else:
# Regular Collection (unused)
if verbose:
print("Unhandled args for %s" % prop)
values = []
ret["values"] = values
ret["num_values"] = len(values)
return ret
def handle_direction_property(prop):
ret = base_handle_property(prop)
args = prop["field"]["args"]
assert len(args) in (1, 2)
if len(args) == 1:
# No restrictions
values = ["DOWN", "UP", "NORTH", "SOUTH", "EAST", "WEST"]
elif isinstance(args[1], list):
if isinstance(args[1][0], str):
# A Plane's facings
values = args[1]
else:
# Fields
values = [c["enum_name"] for c in args[1]]
elif isinstance(args[1], Plane):
# Plane used as a predicate
values = args[1].directions
elif isinstance(args[1], dict):
# Possibly a predicate (used for hoppers)
if "dynamic_class" in args[1]:
predicate_type = args[1]["dynamic_class"]
predicate_class = args[1]["dynamic_class"]
else:
cf = classloader[args[1]["class"]]
if len(cf.interfaces) == 1:
predicate_type = cf.interfaces[0].name.value
predicate_class = args[1]["class"]
else:
if verbose:
print("Could not find predicate class for args %s and interfaces %s" % (args, cf.interfaces))
predicate_type = None
predicate_class = None
if predicate_type in PREDICATE_CLASSES:
ret["predicate"] = predicate_class
# Will be filled in later
values = ["DOWN", "UP", "NORTH", "SOUTH", "EAST", "WEST"]
elif verbose:
print("Unhandled args for %s" % prop)
values = []
else:
# Regular Collection (unused)
if verbose:
print("Unhandled args for %s" % prop)
values = []
ret["values"] = values
ret["num_values"] = len(values)
return ret
property_handlers = {
'bool': handle_boolean_property,
'int': handle_int_property,
'enum': handle_enum_property,
'direction': handle_direction_property
}
def process_property(property):
field_name = property["field_name"]
try:
field = find_field(cls, field_name)
if "array_index" in property:
field = field[property["array_index"]]
property["field"] = field
property["data"] = property_handlers[field["type"]](property)
except:
if verbose:
print("Failed to handle property %s (declared %s.%s)" % (property, cls, field_name))
traceback.print_exc()
property["data"] = None
for cls, properties in six.iteritems(properties_by_class):
for property in properties:
if isinstance(property, dict):
process_property(property)
elif isinstance(property, list):
# Slabs
for real_property in property:
process_property(real_property)
elif property == None:
# Manual handling
pass
elif verbose:
print("Skipping odd property %s (declared in %s)" % (property, cls))
# Part 4: attach that information to blocks.
state_id = 0
for block_id in aggregate["blocks"]["ordered_blocks"]:
block = aggregate["blocks"]["block"][block_id]
block["num_states"] = 1
properties = properties_by_class[block["class"]]
if len(properties) != 0 and isinstance(properties[0], list) and "slab" in block_id:
# Convert the double-list of properties for slabs to just 1
if "double" in block["text_id"]:
properties = properties[1]
else:
properties = properties[1]
block["states"] = []
for prop in properties:
if prop == None:
# Manually handle a few properties
if block_id == "yellow_flower":
prop = { "data": {
"type": "enum",
"name": "type",
# no field_name
# no enum_class
"values": ["DANDELION"],
"num_values": 1
}}
elif block_id == "red_flower":
prop = { "data": {
"type": "enum",
"name": "type",
# no field_name
# no enum_class
"values": ["POPPY", "BLUE_ORCHID", "ALLIUM", "HOUSTONIA", "RED_TULIP", "ORANGE_TULIP", "WHITE_TULIP", "PINK_TULIP", "OXEYE_DAISY"],
"num_values": 9
}}
else:
if verbose:
print("Skipping missing prop for %s" % block_id)
continue
if not isinstance(prop, dict) or not isinstance(prop["data"], dict):
if verbose:
print("Skipping bad prop %s for %s" % (prop, block_id))
continue
if "predicate" in prop["data"]:
data = prop["data"].copy()
# Fun times... guess what the predicate does,
# based off of the block
if block_id == "hopper":
predicate = lambda v: v != "UP"
elif block_id in ("powered_rail", "activator_rail", "golden_rail", "detector_rail"):
predicate = lambda v: v not in ("NORTH_EAST", "NORTH_WEST", "SOUTH_EAST", "SOUTH_WEST")
elif prop["field"]["declared_in"] == aggregate["blocks"]["block"]["torch"]["class"]:
# Pre-flattening
predicate = lambda v: v != "DOWN"
elif block_id == "leaves" or block_id == "log":
predicate = lambda v: v in ("OAK", "BIRCH", "SPRUCE", "JUNGLE")
elif block_id == "leaves2" or block_id == "log2":
predicate = lambda v: v in ("DARK_OAK", "ACACIA")
else:
if verbose:
print("Unhandled predicate for prop %s for %s" % (prop, block_id))
predicate = lambda v: False
data["values"] = [v for v in data["values"] if predicate(v)]
data["num_values"] = len(data["values"])
else:
data = prop["data"]
block["states"].append(data)
block["num_states"] *= data["num_values"]
if not is_flattened:
# Each block is allocated 16 states for metadata pre-flattening
block["num_states"] = 16
block["min_state_id"] = state_id
state_id += block["num_states"]
block["max_state_id"] = state_id - 1
| 44.764873 | 159 | 0.475604 |
0819c37c9c6687975bd4fd571864eaeb12f86c7b | 1,234 | py | Python | cajas/chains/migrations/0002_auto_20190408_1614.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/chains/migrations/0002_auto_20190408_1614.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/chains/migrations/0002_auto_20190408_1614.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | # Generated by Django 2.0.9 on 2019-04-08 16:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('office', '0001_initial'),
('chains', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userplace',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='related_chains', to=settings.AUTH_USER_MODEL, verbose_name='Usuario'),
),
migrations.AddField(
model_name='chainplace',
name='chain',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_places', to='chains.Chain', verbose_name='Cadena'),
),
migrations.AddField(
model_name='chain',
name='office',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_chaines', to='office.OfficeCountry', verbose_name='Oficina'),
),
]
| 35.257143 | 189 | 0.657212 |
04569807ff23385a13213c4d302d578019a8ae2b | 1,953 | py | Python | ch05/two_layer_net.py | care0717/deep-learning-from-scratch | 0d8eaae1636a47b6da27930ba857bfa3fa7d355c | [
"MIT"
] | null | null | null | ch05/two_layer_net.py | care0717/deep-learning-from-scratch | 0d8eaae1636a47b6da27930ba857bfa3fa7d355c | [
"MIT"
] | null | null | null | ch05/two_layer_net.py | care0717/deep-learning-from-scratch | 0d8eaae1636a47b6da27930ba857bfa3fa7d355c | [
"MIT"
] | null | null | null | import numpy as np
from common.differential_function import numerical_gradient
from common.layer import Affine, Relu, SoftmaxWithLoss
from collections import OrderedDict
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
self.params = {'W1': weight_init_std * np.random.randn(input_size, hidden_size), 'b1': np.zeros(hidden_size),
'W2': weight_init_std * np.random.randn(hidden_size, output_size), 'b2': np.zeros(output_size)}
self.layers = OrderedDict()
self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
return np.sum(y == t) / float(x.shape[0])
def numerical_gradient(self, x, t):
def loss_w(w): return self.loss(x, t)
return {'W1': numerical_gradient(loss_w, self.params['W1']), 'b1': numerical_gradient(loss_w, self.params['b1']),
'W2': numerical_gradient(loss_w, self.params['W2']), 'b2': numerical_gradient(loss_w, self.params['b2'])}
def gradient(self, x, t):
self.loss(x, t)
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
return {
'W1': self.layers['Affine1'].dW, 'b1': self.layers['Affine1'].db,
'W2': self.layers['Affine2'].dW, 'b2': self.layers['Affine2'].db
}
| 35.509091 | 121 | 0.605735 |
57b274e89c1319a1c7080e0d4133c9592ddd7686 | 18,405 | py | Python | Contents/Libraries/Shared/rebulk/pattern.py | jippo015/Sub-Zero.bundle | 734e0f7128c05c0f639e11e7dfc77daa1014064b | [
"MIT"
] | 1,553 | 2015-11-09T02:17:06.000Z | 2022-03-31T20:24:52.000Z | Contents/Libraries/Shared/rebulk/pattern.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 691 | 2015-11-05T21:32:26.000Z | 2022-03-17T10:52:45.000Z | Contents/Libraries/Shared/rebulk/pattern.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 162 | 2015-11-06T19:38:55.000Z | 2022-03-16T02:42:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abstract pattern class definition along with various implementations (regexp, string, functional)
"""
# pylint: disable=super-init-not-called,wrong-import-position
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from . import debug
from .loose import call, ensure_list, ensure_dict
from .match import Match
from .remodule import re, REGEX_AVAILABLE
from .utils import find_all, is_iterable, get_first_defined
@six.add_metaclass(ABCMeta)
class Pattern(object):
"""
Definition of a particular pattern to search for.
"""
def __init__(self, name=None, tags=None, formatter=None, value=None, validator=None, children=False, every=False,
private_parent=False, private_children=False, private=False, private_names=None, ignore_names=None,
marker=False, format_all=False, validate_all=False, disabled=lambda context: False, log_level=None,
properties=None, post_processor=None, **kwargs):
"""
:param name: Name of this pattern
:type name: str
:param tags: List of tags related to this pattern
:type tags: list[str]
:param formatter: dict (name, func) of formatter to use with this pattern. name is the match name to support,
and func a function(input_string) that returns the formatted string. A single formatter function can also be
passed as a shortcut for {None: formatter}. The returned formatted string with be set in Match.value property.
:type formatter: dict[str, func] || func
:param value: dict (name, value) of value to use with this pattern. name is the match name to support,
and value an object for the match value. A single object value can also be
passed as a shortcut for {None: value}. The value with be set in Match.value property.
:type value: dict[str, object] || object
:param validator: dict (name, func) of validator to use with this pattern. name is the match name to support,
and func a function(match) that returns the a boolean. A single validator function can also be
passed as a shortcut for {None: validator}. If return value is False, match will be ignored.
:param children: generates children instead of parent
:type children: bool
:param every: generates both parent and children.
:type every: bool
:param private: flag this pattern as beeing private.
:type private: bool
:param private_parent: force return of parent and flag parent matches as private.
:type private_parent: bool
:param private_children: force return of children and flag children matches as private.
:type private_children: bool
:param private_names: force return of named matches as private.
:type private_names: bool
:param ignore_names: drop some named matches after validation.
:type ignore_names: bool
:param marker: flag this pattern as beeing a marker.
:type private: bool
:param format_all if True, pattern will format every match in the hierarchy (even match not yield).
:type format_all: bool
:param validate_all if True, pattern will validate every match in the hierarchy (even match not yield).
:type validate_all: bool
:param disabled: if True, this pattern is disabled. Can also be a function(context).
:type disabled: bool|function
:param log_lvl: Log level associated to this pattern
:type log_lvl: int
:param post_process: Post processing function
:type post_processor: func
"""
# pylint:disable=too-many-locals,unused-argument
self.name = name
self.tags = ensure_list(tags)
self.formatters, self._default_formatter = ensure_dict(formatter, lambda x: x)
self.values, self._default_value = ensure_dict(value, None)
self.validators, self._default_validator = ensure_dict(validator, lambda match: True)
self.every = every
self.children = children
self.private = private
self.private_names = private_names if private_names else []
self.ignore_names = ignore_names if ignore_names else []
self.private_parent = private_parent
self.private_children = private_children
self.marker = marker
self.format_all = format_all
self.validate_all = validate_all
if not callable(disabled):
self.disabled = lambda context: disabled
else:
self.disabled = disabled
self._log_level = log_level
self._properties = properties
self.defined_at = debug.defined_at()
if not callable(post_processor):
self.post_processor = None
else:
self.post_processor = post_processor
@property
def log_level(self):
"""
Log level for this pattern.
:return:
:rtype:
"""
return self._log_level if self._log_level is not None else debug.LOG_LEVEL
def _yield_children(self, match):
"""
Does this match has children
:param match:
:type match:
:return:
:rtype:
"""
return match.children and (self.children or self.every)
def _yield_parent(self):
"""
Does this mat
:param match:
:type match:
:return:
:rtype:
"""
return not self.children or self.every
def _match_parent(self, match, yield_parent):
"""
Handle a parent match
:param match:
:type match:
:param yield_parent:
:type yield_parent:
:return:
:rtype:
"""
if not match or match.value == "":
return False
pattern_value = get_first_defined(self.values, [match.name, '__parent__', None],
self._default_value)
if pattern_value:
match.value = pattern_value
if yield_parent or self.format_all:
match.formatter = get_first_defined(self.formatters, [match.name, '__parent__', None],
self._default_formatter)
if yield_parent or self.validate_all:
validator = get_first_defined(self.validators, [match.name, '__parent__', None],
self._default_validator)
if validator and not validator(match):
return False
return True
def _match_child(self, child, yield_children):
"""
Handle a children match
:param child:
:type child:
:param yield_children:
:type yield_children:
:return:
:rtype:
"""
if not child or child.value == "":
return False
pattern_value = get_first_defined(self.values, [child.name, '__children__', None],
self._default_value)
if pattern_value:
child.value = pattern_value
if yield_children or self.format_all:
child.formatter = get_first_defined(self.formatters, [child.name, '__children__', None],
self._default_formatter)
if yield_children or self.validate_all:
validator = get_first_defined(self.validators, [child.name, '__children__', None],
self._default_validator)
if validator and not validator(child):
return False
return True
def matches(self, input_string, context=None, with_raw_matches=False):
"""
Computes all matches for a given input
:param input_string: the string to parse
:type input_string: str
:param context: the context
:type context: dict
:param with_raw_matches: should return details
:type with_raw_matches: dict
:return: matches based on input_string for this pattern
:rtype: iterator[Match]
"""
# pylint: disable=too-many-branches
matches = []
raw_matches = []
for pattern in self.patterns:
yield_parent = self._yield_parent()
match_index = -1
for match in self._match(pattern, input_string, context):
match_index += 1
match.match_index = match_index
raw_matches.append(match)
yield_children = self._yield_children(match)
if not self._match_parent(match, yield_parent):
continue
validated = True
for child in match.children:
if not self._match_child(child, yield_children):
validated = False
break
if validated:
if self.private_parent:
match.private = True
if self.private_children:
for child in match.children:
child.private = True
if yield_parent or self.private_parent:
matches.append(match)
if yield_children or self.private_children:
for child in match.children:
child.match_index = match_index
matches.append(child)
matches = self._matches_post_process(matches)
self._matches_privatize(matches)
self._matches_ignore(matches)
if with_raw_matches:
return matches, raw_matches
return matches
def _matches_post_process(self, matches):
"""
Post process matches with user defined function
:param matches:
:type matches:
:return:
:rtype:
"""
if self.post_processor:
return self.post_processor(matches, self)
return matches
def _matches_privatize(self, matches):
"""
Mark matches included in private_names with private flag.
:param matches:
:type matches:
:return:
:rtype:
"""
if self.private_names:
for match in matches:
if match.name in self.private_names:
match.private = True
def _matches_ignore(self, matches):
"""
Ignore matches included in ignore_names.
:param matches:
:type matches:
:return:
:rtype:
"""
if self.ignore_names:
for match in list(matches):
if match.name in self.ignore_names:
matches.remove(match)
@abstractproperty
def patterns(self): # pragma: no cover
"""
List of base patterns defined
:return: A list of base patterns
:rtype: list
"""
pass
@property
def properties(self):
"""
Properties names and values that can ben retrieved by this pattern.
:return:
:rtype:
"""
if self._properties:
return self._properties
return {}
@abstractproperty
def match_options(self): # pragma: no cover
"""
dict of default options for generated Match objects
:return: **options to pass to Match constructor
:rtype: dict
"""
pass
@abstractmethod
def _match(self, pattern, input_string, context=None): # pragma: no cover
"""
Computes all matches for a given pattern and input
:param pattern: the pattern to use
:param input_string: the string to parse
:type input_string: str
:param context: the context
:type context: dict
:return: matches based on input_string for this pattern
:rtype: iterator[Match]
"""
pass
def __repr__(self):
defined = ""
if self.defined_at:
defined = "@%s" % (self.defined_at,)
return "<%s%s:%s>" % (self.__class__.__name__, defined, self.__repr__patterns__)
@property
def __repr__patterns__(self):
return self.patterns
class StringPattern(Pattern):
"""
Definition of one or many strings to search for.
"""
def __init__(self, *patterns, **kwargs):
super(StringPattern, self).__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
@property
def patterns(self):
return self._patterns
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
for index in find_all(input_string, pattern, **self._kwargs):
yield Match(index, index + len(pattern), pattern=self, input_string=input_string, **self._match_kwargs)
class RePattern(Pattern):
"""
Definition of one or many regular expression pattern to search for.
"""
def __init__(self, *patterns, **kwargs):
super(RePattern, self).__init__(**kwargs)
self.repeated_captures = REGEX_AVAILABLE
if 'repeated_captures' in kwargs:
self.repeated_captures = kwargs.get('repeated_captures')
if self.repeated_captures and not REGEX_AVAILABLE: # pragma: no cover
raise NotImplementedError("repeated_capture is available only with regex module.")
self.abbreviations = kwargs.get('abbreviations', [])
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
self._children_match_kwargs = filter_match_kwargs(kwargs, children=True)
self._patterns = []
for pattern in patterns:
if isinstance(pattern, six.string_types):
if self.abbreviations and pattern:
for key, replacement in self.abbreviations:
pattern = pattern.replace(key, replacement)
pattern = call(re.compile, pattern, **self._kwargs)
elif isinstance(pattern, dict):
if self.abbreviations and 'pattern' in pattern:
for key, replacement in self.abbreviations:
pattern['pattern'] = pattern['pattern'].replace(key, replacement)
pattern = re.compile(**pattern)
elif hasattr(pattern, '__iter__'):
pattern = re.compile(*pattern)
self._patterns.append(pattern)
@property
def patterns(self):
return self._patterns
@property
def __repr__patterns__(self):
return [pattern.pattern for pattern in self.patterns]
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
names = dict((v, k) for k, v in pattern.groupindex.items())
for match_object in pattern.finditer(input_string):
start = match_object.start()
end = match_object.end()
main_match = Match(start, end, pattern=self, input_string=input_string, **self._match_kwargs)
if pattern.groups:
for i in range(1, pattern.groups + 1):
name = names.get(i, main_match.name)
if self.repeated_captures:
for start, end in match_object.spans(i):
child_match = Match(start, end, name=name, parent=main_match, pattern=self,
input_string=input_string, **self._children_match_kwargs)
main_match.children.append(child_match)
else:
start, end = match_object.span(i)
if start > -1 and end > -1:
child_match = Match(start, end, name=name, parent=main_match, pattern=self,
input_string=input_string, **self._children_match_kwargs)
main_match.children.append(child_match)
yield main_match
class FunctionalPattern(Pattern):
"""
Definition of one or many functional pattern to search for.
"""
def __init__(self, *patterns, **kwargs):
super(FunctionalPattern, self).__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
@property
def patterns(self):
return self._patterns
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
ret = call(pattern, input_string, context, **self._kwargs)
if ret:
if not is_iterable(ret) or isinstance(ret, dict) \
or (is_iterable(ret) and hasattr(ret, '__getitem__') and isinstance(ret[0], int)):
args_iterable = [ret]
else:
args_iterable = ret
for args in args_iterable:
if isinstance(args, dict):
options = args
options.pop('input_string', None)
options.pop('pattern', None)
if self._match_kwargs:
options = self._match_kwargs.copy()
options.update(args)
yield Match(pattern=self, input_string=input_string, **options)
else:
kwargs = self._match_kwargs
if isinstance(args[-1], dict):
kwargs = dict(kwargs)
kwargs.update(args[-1])
args = args[:-1]
yield Match(*args, pattern=self, input_string=input_string, **kwargs)
def filter_match_kwargs(kwargs, children=False):
"""
Filters out kwargs for Match construction
:param kwargs:
:type kwargs: dict
:param children:
:type children: Flag to filter children matches
:return: A filtered dict
:rtype: dict
"""
kwargs = kwargs.copy()
for key in ('pattern', 'start', 'end', 'parent', 'formatter', 'value'):
if key in kwargs:
del kwargs[key]
if children:
for key in ('name',):
if key in kwargs:
del kwargs[key]
return kwargs
| 37.561224 | 118 | 0.59348 |
0d5a7d77aa03075cb7bc56ede1a8a2bfa30b7c64 | 1,259 | py | Python | python/test/mocks.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/test/mocks.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/test/mocks.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | # Fake Vector 3
from typing import Iterator, Any
class Comparable:
def __eq__(self, other):
if self.d == other.d:
return True
else:
return False
def __str__(self):
if isinstance(self.d, tuple):
return str(self.d)
elif isinstance(self.d, list):
return str([str(item) for item in self.d])
elif isinstance(self.d, dict):
return str({k: str(v) for k, v in self.d.items()})
class Vector3(Comparable):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
# for testing input data
self.d = (x, y, z)
def __iter__(self) -> Iterator[Any]:
return self.d.__iter__()
class Array(Comparable, list):
def __init__(self, arr):
self.d = arr
def __iter__(self) -> Iterator[Any]:
return self.d.__iter__()
class Dictionary(Comparable, dict):
def __init__(self, d):
self.d = d
def items(self):
return self.d.items()
class PyBridgeNode:
EVENT_PLAYER_MOVE_SUCCESS = "player_move_success"
def __init__(self):
pass
def receive(self, action_name, *args):
pass
def broadcast(self, signal_name, *args):
pass | 20.639344 | 62 | 0.570294 |
c93ee75f3e07ed6bc2aa8d8e2e9d8c15c9c1a478 | 58,776 | py | Python | test/data.py | kaushikacharya/PyStanfordDependencies | 43d8f38a19e40087f273330087918c87df6d4d8f | [
"Apache-2.0"
] | 69 | 2015-01-04T02:15:10.000Z | 2021-09-04T04:16:55.000Z | test/data.py | kaushikacharya/PyStanfordDependencies | 43d8f38a19e40087f273330087918c87df6d4d8f | [
"Apache-2.0"
] | 27 | 2015-01-08T03:38:18.000Z | 2020-12-21T13:57:24.000Z | test/data.py | kaushikacharya/PyStanfordDependencies | 43d8f38a19e40087f273330087918c87df6d4d8f | [
"Apache-2.0"
] | 19 | 2015-07-05T11:12:20.000Z | 2020-07-11T16:54:20.000Z | # this file contains all the string data (inputs and outputs) for tests
# the SD trees were originally produced on SD 3.4.1 but they work up
# to (at least) SD 3.5.2. the UD trees were produced using UD 3.5.2.
# tests now require SD/UD 3.5.2 (and thus Java 1.8). downside of this
# is that we can't test JPype on older versions of SD since it can only
# be (safely) initialized once.
class trees_sd:
tree1 = '(S1 (NP (DT a) (NN cow)))'
tree1_out = '''
Token(index=1, form='a', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='cow', cpos='NN', pos='NN', head=0, deprel='root')
'''.strip()
tree2 = '(S1 (NP (NP (NP (DT A) (NN cat)) (CC and) (NP (DT a) ' \
'(NN mouse))) (. .)))'
tree2_out_basic = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='cat', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='a', cpos='DT', pos='DT', head=5, deprel='det')
Token(index=5, form='mouse', cpos='NN', pos='NN', head=2, deprel='conj')
Token(index=6, form='.', cpos='.', pos='.', head=2, deprel='punct')'''.strip()
tree2_out_collapsed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='cat', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=4, form='a', cpos='DT', pos='DT', head=5, deprel='det')
Token(index=5, form='mouse', cpos='NN', pos='NN', head=2, deprel='conj_and')
Token(index=6, form='.', cpos='.', pos='.', head=2, deprel='punct')'''.strip()
tree2_out_CCprocessed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='cat', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=4, form='a', cpos='DT', pos='DT', head=5, deprel='det')
Token(index=5, form='mouse', cpos='NN', pos='NN', head=2, deprel='conj_and')
Token(index=6, form='.', cpos='.', pos='.', head=2, deprel='punct')'''.strip()
tree2_out_collapsedTree = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='cat', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=4, form='a', cpos='DT', pos='DT', head=5, deprel='det')
Token(index=5, form='mouse', cpos='NN', pos='NN', head=2, deprel='conj_and')
Token(index=6, form='.', cpos='.', pos='.', head=2, deprel='punct')'''.strip()
tree3 = '(S1 (NP (DT some) (JJ blue) (NN moose)))'
tree3_out = '''
Token(index=1, form='some', cpos='DT', pos='DT', head=3, deprel='det')
Token(index=2, form='blue', cpos='JJ', pos='JJ', head=3, deprel='amod')
Token(index=3, form='moose', cpos='NN', pos='NN', head=0, deprel='root')
'''.strip()
tree4 = '(S1 (NP (NP (DT A) (NN burrito)) (PP (IN with) (NP (NP ' + \
'(NNS beans)) (CONJP (CC but) (RB not)) (NP (NN chicken)))) (. .)))'
tree4_out_basic = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='burrito', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=3, form='with', cpos='IN', pos='IN', head=2, deprel='prep')
Token(index=4, form='beans', cpos='NNS', pos='NNS', head=3, deprel='pobj')
Token(index=5, form='but', cpos='CC', pos='CC', head=6, deprel='cc')
Token(index=6, form='not', cpos='RB', pos='RB', head=4, deprel='cc')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=4, deprel='conj')
Token(index=8, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree4_out_collapsed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='burrito', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=4, form='beans', cpos='NNS', pos='NNS', head=2, deprel='prep_with')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=4, deprel='conj_negcc')
Token(index=8, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree4_out_CCprocessed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='burrito', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=4, form='beans', cpos='NNS', pos='NNS', head=2, deprel='prep_with')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=2, deprel='prep_with')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=4, deprel='conj_negcc')
Token(index=8, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree4_out_collapsedTree = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='burrito', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=4, form='beans', cpos='NNS', pos='NNS', head=2, deprel='prep_with')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=4, deprel='conj_negcc')
Token(index=8, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5 = '''
(S1 (S (NP (NNP Ed))
(VP (VBZ cooks)
(CC and)
(VBZ sells)
(NP (NP (NNS burritos))
(PP (IN with)
(NP (NNS beans) (CONJP (CC but) (RB not)) (NN rice)))))
(. .)))
'''.strip()
tree5_out_basic = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', cpos='IN', pos='IN', head=5, deprel='prep')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=6, deprel='pobj')
Token(index=8, form='but', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=9, form='not', cpos='RB', pos='RB', head=7, deprel='cc')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_collapsed = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj_and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='prep_with')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj_negcc')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_CCprocessed = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=4, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj_and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='prep_with')
Token(index=10, form='rice', cpos='NN', pos='NN', head=5, deprel='prep_with')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj_negcc')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_collapsedTree = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj_and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='prep_with')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj_negcc')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_collapsedTree_no_punct = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj_and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='prep_with')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj_negcc')
'''.strip()
tree5_out_collapsedTree_erased = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=0, deprel='erased')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj_and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', cpos='IN', pos='IN', head=0, deprel='erased')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='prep_with')
Token(index=8, form='but', cpos='CC', pos='CC', head=0, deprel='erased')
Token(index=9, form='not', cpos='RB', pos='RB', head=0, deprel='erased')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj_negcc')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_collapsedTree_erased_no_punct = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=0, deprel='erased')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj_and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', cpos='IN', pos='IN', head=0, deprel='erased')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='prep_with')
Token(index=8, form='but', cpos='CC', pos='CC', head=0, deprel='erased')
Token(index=9, form='not', cpos='RB', pos='RB', head=0, deprel='erased')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj_negcc')
'''.strip()
tree5_out_basic_lemmas = '''
Token(index=1, form='Ed', lemma='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', lemma='cook', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', lemma='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='sells', lemma='sell', cpos='VBZ', pos='VBZ', head=2, deprel='conj')
Token(index=5, form='burritos', lemma='burrito', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', lemma='with', cpos='IN', pos='IN', head=5, deprel='prep')
Token(index=7, form='beans', lemma='bean', cpos='NNS', pos='NNS', head=6, deprel='pobj')
Token(index=8, form='but', lemma='but', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=9, form='not', lemma='not', cpos='RB', pos='RB', head=7, deprel='cc')
Token(index=10, form='rice', lemma='rice', cpos='NN', pos='NN', head=7, deprel='conj')
Token(index=11, form='.', lemma='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
# tests -NONE- handling
tree6 = '''
( (S
(S-TPC-1
(NP-SBJ (PRP He) )
(ADVP (RB also) )
(VP (VBZ is)
(NP-PRD (DT a) (NN consensus) (NN manager) )))
(, ,)
(NP-SBJ (NNS insiders) )
(VP (VBP say)
(SBAR (-NONE- 0)
(S (-NONE- *T*-1) )))
(. .) ))
'''
tree6_out = '''
Token(index=1, form='He', cpos='PRP', pos='PRP', head=6, deprel='nsubj')
Token(index=2, form='also', cpos='RB', pos='RB', head=6, deprel='advmod')
Token(index=3, form='is', cpos='VBZ', pos='VBZ', head=6, deprel='cop')
Token(index=4, form='a', cpos='DT', pos='DT', head=6, deprel='det')
Token(index=5, form='consensus', cpos='NN', pos='NN', head=6, deprel='nn')
Token(index=6, form='manager', cpos='NN', pos='NN', head=9, deprel='ccomp')
Token(index=7, form=',', cpos=',', pos=',', head=9, deprel='punct')
Token(index=8, form='insiders', cpos='NNS', pos='NNS', head=9, deprel='nsubj')
Token(index=9, form='say', cpos='VBP', pos='VBP', head=0, deprel='root')
Token(index=10, form='.', cpos='.', pos='.', head=9, deprel='punct')
'''.strip()
# tests weird \/ handling
tree7 = '''(S1 (NP
(NP (NNP PRIME) (NNP RATE) )
(: :)
(NP (CD 10) (CD 1\/2) (NN %) )
(. .) ))'''
tree7_out = '''
Token(index=1, form='PRIME', cpos='NNP', pos='NNP', head=2, deprel='nn')
Token(index=2, form='RATE', cpos='NNP', pos='NNP', head=0, deprel='root')
Token(index=3, form=':', cpos=':', pos=':', head=2, deprel='punct')
Token(index=4, form='10', cpos='CD', pos='CD', head=6, deprel='num')
Token(index=5, form='1/2', cpos='CD', pos='CD', head=6, deprel='num')
Token(index=6, form='%', cpos='NN', pos='NN', head=2, deprel='dep')
Token(index=7, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree8 = '''
(ROOT (S (NP (NNS Visitors)) (VP (MD can) (VP (VB reach) (NP (PRP it)) (ADVP (RB only)) (PP (PP (IN under) (NP (JJ strict) (JJ military) (NN escort))) (CC and) (PP (IN with) (NP (NP (JJ prior) (NN permission)) (PP (IN from) (NP (DT the) (NNP Pentagon)))))) (, ,) (PP (IN aboard) (NP (NP (JJ special) (JJ small) (NN shuttle) (NNS flights)) (SBAR (WHNP (WDT that)) (S (VP (VBP reach) (NP (DT the) (NN base)) (PP (IN by) (NP (NP (DT a) (JJ circuitous) (NN flight)) (PP (IN from) (NP (DT the) (NNP United) (NNPS States)))))))))))) (. .)))
'''
tree8_out = '''
Token(index=1, form='Visitors', cpos='NNS', pos='NNS', head=3, deprel='nsubj')
Token(index=2, form='can', cpos='MD', pos='MD', head=3, deprel='aux')
Token(index=3, form='reach', cpos='VB', pos='VB', head=0, deprel='root')
Token(index=4, form='it', cpos='PRP', pos='PRP', head=3, deprel='dobj')
Token(index=5, form='only', cpos='RB', pos='RB', head=3, deprel='advmod')
Token(index=6, form='under', cpos='IN', pos='IN', head=3, deprel='prep')
Token(index=7, form='strict', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=8, form='military', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=9, form='escort', cpos='NN', pos='NN', head=6, deprel='pobj')
Token(index=10, form='and', cpos='CC', pos='CC', head=6, deprel='cc')
Token(index=11, form='with', cpos='IN', pos='IN', head=6, deprel='conj')
Token(index=12, form='prior', cpos='JJ', pos='JJ', head=13, deprel='amod')
Token(index=13, form='permission', cpos='NN', pos='NN', head=11, deprel='pobj')
Token(index=14, form='from', cpos='IN', pos='IN', head=13, deprel='prep')
Token(index=15, form='the', cpos='DT', pos='DT', head=16, deprel='det')
Token(index=16, form='Pentagon', cpos='NNP', pos='NNP', head=14, deprel='pobj')
Token(index=17, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=18, form='aboard', cpos='IN', pos='IN', head=3, deprel='prep')
Token(index=19, form='special', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=20, form='small', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=21, form='shuttle', cpos='NN', pos='NN', head=22, deprel='nn')
Token(index=22, form='flights', cpos='NNS', pos='NNS', head=18, deprel='pobj')
Token(index=23, form='that', cpos='WDT', pos='WDT', head=24, deprel='nsubj')
Token(index=24, form='reach', cpos='VBP', pos='VBP', head=22, deprel='rcmod')
Token(index=25, form='the', cpos='DT', pos='DT', head=26, deprel='det')
Token(index=26, form='base', cpos='NN', pos='NN', head=24, deprel='dobj')
Token(index=27, form='by', cpos='IN', pos='IN', head=24, deprel='prep')
Token(index=28, form='a', cpos='DT', pos='DT', head=30, deprel='det')
Token(index=29, form='circuitous', cpos='JJ', pos='JJ', head=30, deprel='amod')
Token(index=30, form='flight', cpos='NN', pos='NN', head=27, deprel='pobj')
Token(index=31, form='from', cpos='IN', pos='IN', head=30, deprel='prep')
Token(index=32, form='the', cpos='DT', pos='DT', head=34, deprel='det')
Token(index=33, form='United', cpos='NNP', pos='NNP', head=34, deprel='nn')
Token(index=34, form='States', cpos='NNPS', pos='NNPS', head=31, deprel='pobj')
Token(index=35, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
tree8_out_collapsed = '''
Token(index=1, form='Visitors', cpos='NNS', pos='NNS', head=3, deprel='nsubj')
Token(index=2, form='can', cpos='MD', pos='MD', head=3, deprel='aux')
Token(index=3, form='reach', cpos='VB', pos='VB', head=0, deprel='root')
Token(index=3, form='reach', cpos='VB', pos='VB', head=3, deprel='conj_and', extra={'dep_is_copy': 1})
Token(index=4, form='it', cpos='PRP', pos='PRP', head=3, deprel='dobj')
Token(index=5, form='only', cpos='RB', pos='RB', head=3, deprel='advmod')
Token(index=7, form='strict', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=8, form='military', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=9, form='escort', cpos='NN', pos='NN', head=3, deprel='prep_under')
Token(index=12, form='prior', cpos='JJ', pos='JJ', head=13, deprel='amod')
Token(index=13, form='permission', cpos='NN', pos='NN', head=3, deprel='prep_with', extra={'gov_is_copy': 1})
Token(index=15, form='the', cpos='DT', pos='DT', head=16, deprel='det')
Token(index=16, form='Pentagon', cpos='NNP', pos='NNP', head=13, deprel='prep_from')
Token(index=17, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=19, form='special', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=20, form='small', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=21, form='shuttle', cpos='NN', pos='NN', head=22, deprel='nn')
Token(index=22, form='flights', cpos='NNS', pos='NNS', head=3, deprel='prep_aboard')
Token(index=22, form='flights', cpos='NNS', pos='NNS', head=24, deprel='nsubj')
Token(index=24, form='reach', cpos='VBP', pos='VBP', head=22, deprel='rcmod')
Token(index=25, form='the', cpos='DT', pos='DT', head=26, deprel='det')
Token(index=26, form='base', cpos='NN', pos='NN', head=24, deprel='dobj')
Token(index=28, form='a', cpos='DT', pos='DT', head=30, deprel='det')
Token(index=29, form='circuitous', cpos='JJ', pos='JJ', head=30, deprel='amod')
Token(index=30, form='flight', cpos='NN', pos='NN', head=24, deprel='prep_by')
Token(index=32, form='the', cpos='DT', pos='DT', head=34, deprel='det')
Token(index=33, form='United', cpos='NNP', pos='NNP', head=34, deprel='nn')
Token(index=34, form='States', cpos='NNPS', pos='NNPS', head=30, deprel='prep_from')
Token(index=35, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
tree9 = '''(ROOT (S (NP (NP (DT A) (NN total)) (PP (IN of) (NP (NP
(QP (CD 17) (CD million)) (JJ metric) (NNS tons)) (PP (IN of) (NP
(NNS potatoes)))))) (VP (VBD was) (VP (VBN produced) (, ,) (SBAR (WHNP
(WDT which)) (S (VP (VBD was) (ADJP (NP (CD 14) (NN %)) (JJR less)
(PP (PP (IN than) (NP (NP (NP (JJ last) (NN year)) (PRN (-LRB- -LRB-)
(NP (NP (CD 106) (NNS quintals)) (PP (IN per) (NP (NN hectare))))
(-RRB- -RRB-))) (, ,) (CC and) (NP (NP (QP (CD 5.4) (CD million))
(JJ metric) (NNS tons)) (PP (IN of) (NP (NNS vegetables)))))) (, ,)
(CC or) (ADVP (NP (CD 2.2) (NN %)) (RBR more)) (PP (IN than) (PP (IN
on) (NP (DT the) (JJ same) (NN date)) (NP (JJ last) (NN year))))))
(PRN (-LRB- -LRB-) (NP (NP (JJ 116) (NNS quintals)) (PP (IN per)
(NP (NN hectare)))) (-RRB- -RRB-))))))) (. .)))'''
tree9_out = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='total', cpos='NN', pos='NN', head=11, deprel='nsubjpass')
Token(index=3, form='of', cpos='IN', pos='IN', head=2, deprel='prep')
Token(index=4, form='17', cpos='CD', pos='CD', head=5, deprel='number')
Token(index=5, form='million', cpos='CD', pos='CD', head=7, deprel='num')
Token(index=6, form='metric', cpos='JJ', pos='JJ', head=7, deprel='amod')
Token(index=7, form='tons', cpos='NNS', pos='NNS', head=3, deprel='pobj')
Token(index=8, form='of', cpos='IN', pos='IN', head=7, deprel='prep')
Token(index=9, form='potatoes', cpos='NNS', pos='NNS', head=8, deprel='pobj')
Token(index=10, form='was', cpos='VBD', pos='VBD', head=11, deprel='auxpass')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=0, deprel='root')
Token(index=12, form=',', cpos=',', pos=',', head=11, deprel='punct')
Token(index=13, form='which', cpos='WDT', pos='WDT', head=17, deprel='nsubj')
Token(index=14, form='was', cpos='VBD', pos='VBD', head=17, deprel='cop')
Token(index=15, form='14', cpos='CD', pos='CD', head=16, deprel='num')
Token(index=16, form='%', cpos='NN', pos='NN', head=17, deprel='npadvmod')
Token(index=17, form='less', cpos='JJR', pos='JJR', head=11, deprel='ccomp')
Token(index=18, form='than', cpos='IN', pos='IN', head=17, deprel='prep')
Token(index=19, form='last', cpos='JJ', pos='JJ', head=20, deprel='amod')
Token(index=20, form='year', cpos='NN', pos='NN', head=18, deprel='pobj')
Token(index=21, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=23, deprel='punct')
Token(index=22, form='106', cpos='CD', pos='CD', head=23, deprel='num')
Token(index=23, form='quintals', cpos='NNS', pos='NNS', head=20, deprel='dep')
Token(index=24, form='per', cpos='IN', pos='IN', head=23, deprel='prep')
Token(index=25, form='hectare', cpos='NN', pos='NN', head=24, deprel='pobj')
Token(index=26, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=23, deprel='punct')
Token(index=27, form=',', cpos=',', pos=',', head=20, deprel='punct')
Token(index=28, form='and', cpos='CC', pos='CC', head=20, deprel='cc')
Token(index=29, form='5.4', cpos='CD', pos='CD', head=30, deprel='number')
Token(index=30, form='million', cpos='CD', pos='CD', head=32, deprel='num')
Token(index=31, form='metric', cpos='JJ', pos='JJ', head=32, deprel='amod')
Token(index=32, form='tons', cpos='NNS', pos='NNS', head=20, deprel='conj')
Token(index=33, form='of', cpos='IN', pos='IN', head=32, deprel='prep')
Token(index=34, form='vegetables', cpos='NNS', pos='NNS', head=33, deprel='pobj')
Token(index=35, form=',', cpos=',', pos=',', head=18, deprel='punct')
Token(index=36, form='or', cpos='CC', pos='CC', head=18, deprel='cc')
Token(index=37, form='2.2', cpos='CD', pos='CD', head=38, deprel='num')
Token(index=38, form='%', cpos='NN', pos='NN', head=39, deprel='npadvmod')
Token(index=39, form='more', cpos='RBR', pos='RBR', head=18, deprel='conj')
Token(index=40, form='than', cpos='IN', pos='IN', head=18, deprel='conj')
Token(index=41, form='on', cpos='IN', pos='IN', head=40, deprel='pcomp')
Token(index=42, form='the', cpos='DT', pos='DT', head=44, deprel='det')
Token(index=43, form='same', cpos='JJ', pos='JJ', head=44, deprel='amod')
Token(index=44, form='date', cpos='NN', pos='NN', head=41, deprel='pobj')
Token(index=45, form='last', cpos='JJ', pos='JJ', head=46, deprel='amod')
Token(index=46, form='year', cpos='NN', pos='NN', head=41, deprel='tmod')
Token(index=47, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=49, deprel='punct')
Token(index=48, form='116', cpos='JJ', pos='JJ', head=49, deprel='amod')
Token(index=49, form='quintals', cpos='NNS', pos='NNS', head=17, deprel='dep')
Token(index=50, form='per', cpos='IN', pos='IN', head=49, deprel='prep')
Token(index=51, form='hectare', cpos='NN', pos='NN', head=50, deprel='pobj')
Token(index=52, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=49, deprel='punct')
Token(index=53, form='.', cpos='.', pos='.', head=11, deprel='punct')
'''.strip()
tree9_out_collapsed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='total', cpos='NN', pos='NN', head=11, deprel='nsubjpass')
Token(index=4, form='17', cpos='CD', pos='CD', head=5, deprel='number')
Token(index=5, form='million', cpos='CD', pos='CD', head=7, deprel='num')
Token(index=6, form='metric', cpos='JJ', pos='JJ', head=7, deprel='amod')
Token(index=7, form='tons', cpos='NNS', pos='NNS', head=2, deprel='prep_of')
Token(index=9, form='potatoes', cpos='NNS', pos='NNS', head=7, deprel='prep_of')
Token(index=10, form='was', cpos='VBD', pos='VBD', head=11, deprel='auxpass')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=0, deprel='root')
Token(index=12, form=',', cpos=',', pos=',', head=11, deprel='punct')
Token(index=13, form='which', cpos='WDT', pos='WDT', head=17, deprel='nsubj')
Token(index=14, form='was', cpos='VBD', pos='VBD', head=17, deprel='cop')
Token(index=15, form='14', cpos='CD', pos='CD', head=16, deprel='num')
Token(index=16, form='%', cpos='NN', pos='NN', head=17, deprel='npadvmod')
Token(index=17, form='less', cpos='JJR', pos='JJR', head=11, deprel='ccomp')
Token(index=19, form='last', cpos='JJ', pos='JJ', head=20, deprel='amod')
Token(index=20, form='year', cpos='NN', pos='NN', head=17, deprel='prep_than')
Token(index=21, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=23, deprel='punct')
Token(index=22, form='106', cpos='CD', pos='CD', head=23, deprel='num')
Token(index=23, form='quintals', cpos='NNS', pos='NNS', head=20, deprel='dep')
Token(index=25, form='hectare', cpos='NN', pos='NN', head=23, deprel='prep_per')
Token(index=26, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=23, deprel='punct')
Token(index=27, form=',', cpos=',', pos=',', head=20, deprel='punct')
Token(index=29, form='5.4', cpos='CD', pos='CD', head=30, deprel='number')
Token(index=30, form='million', cpos='CD', pos='CD', head=32, deprel='num')
Token(index=31, form='metric', cpos='JJ', pos='JJ', head=32, deprel='amod')
Token(index=32, form='tons', cpos='NNS', pos='NNS', head=20, deprel='conj_and')
Token(index=34, form='vegetables', cpos='NNS', pos='NNS', head=32, deprel='prep_of')
Token(index=35, form=',', cpos=',', pos=',', head=17, deprel='punct')
Token(index=37, form='2.2', cpos='CD', pos='CD', head=38, deprel='num')
Token(index=38, form='%', cpos='NN', pos='NN', head=39, deprel='npadvmod')
Token(index=39, form='more', cpos='RBR', pos='RBR', head=17, deprel='conj')
Token(index=41, form='on', cpos='IN', pos='IN', head=17, deprel='pcomp')
Token(index=42, form='the', cpos='DT', pos='DT', head=44, deprel='det')
Token(index=43, form='same', cpos='JJ', pos='JJ', head=44, deprel='amod')
Token(index=44, form='date', cpos='NN', pos='NN', head=41, deprel='pobj')
Token(index=45, form='last', cpos='JJ', pos='JJ', head=46, deprel='amod')
Token(index=46, form='year', cpos='NN', pos='NN', head=41, deprel='tmod')
Token(index=47, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=49, deprel='punct')
Token(index=48, form='116', cpos='JJ', pos='JJ', head=49, deprel='amod')
Token(index=49, form='quintals', cpos='NNS', pos='NNS', head=17, deprel='dep')
Token(index=51, form='hectare', cpos='NN', pos='NN', head=49, deprel='prep_per')
Token(index=52, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=49, deprel='punct')
Token(index=53, form='.', cpos='.', pos='.', head=11, deprel='punct')
'''.strip()
tree10 = r'''
(ROOT (NP (NP (NNP Hanoi) (, ,) (NNP May) (CD 13)) (PRN (-LRB- -LRB-) (NP (NNP VNA)) (-RRB- -RRB-)) (: --) (NP (NP (NNP Vietnam)) (SBAR (S (VP (VBZ has) (VP (VBN produced) (NP (NP (DT a) (NN variety)) (PP (IN of) (NP (NNS drugs)))) (S (VP (TO to) (VP (VB control) (NP (NNS HIV\/AIDS)) (PP (IN in) (NP (NP (NNS patients)) (VP (VBG suffering) (PP (IN with) (NP (DT the) (NN disease)))))))))))))) (. .)))
'''.strip()
tree10_out = '''
Token(index=1, form='Hanoi', cpos='NNP', pos='NNP', head=3, deprel='nn')
Token(index=2, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=3, form='May', cpos='NNP', pos='NNP', head=0, deprel='root')
Token(index=4, form='13', cpos='CD', pos='CD', head=3, deprel='num')
Token(index=5, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=6, deprel='punct')
Token(index=6, form='VNA', cpos='NNP', pos='NNP', head=3, deprel='appos')
Token(index=7, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=6, deprel='punct')
Token(index=8, form='--', cpos=':', pos=':', head=3, deprel='punct')
Token(index=9, form='Vietnam', cpos='NNP', pos='NNP', head=3, deprel='dep')
Token(index=10, form='has', cpos='VBZ', pos='VBZ', head=11, deprel='aux')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=9, deprel='rcmod')
Token(index=12, form='a', cpos='DT', pos='DT', head=13, deprel='det')
Token(index=13, form='variety', cpos='NN', pos='NN', head=11, deprel='dobj')
Token(index=14, form='of', cpos='IN', pos='IN', head=13, deprel='prep')
Token(index=15, form='drugs', cpos='NNS', pos='NNS', head=14, deprel='pobj')
Token(index=16, form='to', cpos='TO', pos='TO', head=17, deprel='aux')
Token(index=17, form='control', cpos='VB', pos='VB', head=11, deprel='vmod')
Token(index=18, form='HIV/AIDS', cpos='NNS', pos='NNS', head=17, deprel='dobj')
Token(index=19, form='in', cpos='IN', pos='IN', head=17, deprel='prep')
Token(index=20, form='patients', cpos='NNS', pos='NNS', head=19, deprel='pobj')
Token(index=21, form='suffering', cpos='VBG', pos='VBG', head=20, deprel='vmod')
Token(index=22, form='with', cpos='IN', pos='IN', head=21, deprel='prep')
Token(index=23, form='the', cpos='DT', pos='DT', head=24, deprel='det')
Token(index=24, form='disease', cpos='NN', pos='NN', head=22, deprel='pobj')
Token(index=25, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
tree10_out_collapsed = '''
Token(index=1, form='Hanoi', cpos='NNP', pos='NNP', head=3, deprel='nn')
Token(index=2, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=3, form='May', cpos='NNP', pos='NNP', head=0, deprel='root')
Token(index=4, form='13', cpos='CD', pos='CD', head=3, deprel='num')
Token(index=5, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=6, deprel='punct')
Token(index=6, form='VNA', cpos='NNP', pos='NNP', head=3, deprel='appos')
Token(index=7, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=6, deprel='punct')
Token(index=8, form='--', cpos=':', pos=':', head=3, deprel='punct')
Token(index=9, form='Vietnam', cpos='NNP', pos='NNP', head=3, deprel='dep')
Token(index=10, form='has', cpos='VBZ', pos='VBZ', head=11, deprel='aux')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=9, deprel='rcmod')
Token(index=12, form='a', cpos='DT', pos='DT', head=13, deprel='det')
Token(index=13, form='variety', cpos='NN', pos='NN', head=11, deprel='dobj')
Token(index=15, form='drugs', cpos='NNS', pos='NNS', head=13, deprel='prep_of')
Token(index=16, form='to', cpos='TO', pos='TO', head=17, deprel='aux')
Token(index=17, form='control', cpos='VB', pos='VB', head=11, deprel='vmod')
Token(index=18, form='HIV/AIDS', cpos='NNS', pos='NNS', head=17, deprel='dobj')
Token(index=20, form='patients', cpos='NNS', pos='NNS', head=17, deprel='prep_in')
Token(index=21, form='suffering', cpos='VBG', pos='VBG', head=20, deprel='vmod')
Token(index=23, form='the', cpos='DT', pos='DT', head=24, deprel='det')
Token(index=24, form='disease', cpos='NN', pos='NN', head=21, deprel='prep_with')
Token(index=25, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
@classmethod
def get_basic_test_trees(self):
return ((self.tree1, self.tree1_out),
(self.tree2, self.tree2_out_basic),
(self.tree3, self.tree3_out),
(self.tree4, self.tree4_out_basic),
(self.tree5, self.tree5_out_basic),
(self.tree6, self.tree6_out),
(self.tree7, self.tree7_out),
(self.tree8, self.tree8_out),
(self.tree9, self.tree9_out),
(self.tree10, self.tree10_out))
@classmethod
def get_repr_test_trees(self):
return ((self.tree2,
dict(basic=self.tree2_out_basic,
collapsed=self.tree2_out_collapsed,
CCprocessed=self.tree2_out_CCprocessed,
collapsedTree=self.tree2_out_collapsedTree)),
(self.tree4,
dict(basic=self.tree4_out_basic,
collapsed=self.tree4_out_collapsed,
CCprocessed=self.tree4_out_CCprocessed,
collapsedTree=self.tree4_out_collapsedTree)),
(self.tree5,
dict(basic=self.tree5_out_basic,
collapsed=self.tree5_out_collapsed,
CCprocessed=self.tree5_out_CCprocessed,
collapsedTree=self.tree5_out_collapsedTree)),
(self.tree8, dict(collapsed=self.tree8_out_collapsed)),
(self.tree9, dict(collapsed=self.tree9_out_collapsed)),
(self.tree10, dict(collapsed=self.tree10_out_collapsed)))
# UD trees are similar to SD trees, but some parts are overridden
class trees_ud(trees_sd):
tree2_out_collapsed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='cat', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='a', cpos='DT', pos='DT', head=5, deprel='det')
Token(index=5, form='mouse', cpos='NN', pos='NN', head=2, deprel='conj:and')
Token(index=6, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree2_out_collapsedTree = tree2_out_collapsed
tree2_out_CCprocessed = tree2_out_collapsed
tree4_out_basic = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='burrito', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=3, form='with', cpos='IN', pos='IN', head=4, deprel='case')
Token(index=4, form='beans', cpos='NNS', pos='NNS', head=2, deprel='nmod')
Token(index=5, form='but', cpos='CC', pos='CC', head=6, deprel='cc')
Token(index=6, form='not', cpos='RB', pos='RB', head=4, deprel='cc')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=4, deprel='conj')
Token(index=8, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree4_out_collapsed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='burrito', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=3, form='with', cpos='IN', pos='IN', head=4, deprel='case')
Token(index=4, form='beans', cpos='NNS', pos='NNS', head=2, deprel='nmod:with')
Token(index=5, form='but', cpos='CC', pos='CC', head=6, deprel='cc')
Token(index=6, form='not', cpos='RB', pos='RB', head=4, deprel='cc')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=4, deprel='conj:negcc')
Token(index=8, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree4_out_CCprocessed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='burrito', cpos='NN', pos='NN', head=0, deprel='root')
Token(index=3, form='with', cpos='IN', pos='IN', head=4, deprel='case')
Token(index=4, form='beans', cpos='NNS', pos='NNS', head=2, deprel='nmod:with')
Token(index=5, form='but', cpos='CC', pos='CC', head=6, deprel='cc')
Token(index=6, form='not', cpos='RB', pos='RB', head=4, deprel='cc')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=2, deprel='nmod:with')
Token(index=7, form='chicken', cpos='NN', pos='NN', head=4, deprel='conj:negcc')
Token(index=8, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree4_out_collapsedTree = tree4_out_collapsed
tree5_out_basic = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', cpos='IN', pos='IN', head=7, deprel='case')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='nmod')
Token(index=8, form='but', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=9, form='not', cpos='RB', pos='RB', head=7, deprel='cc')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_collapsed = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj:and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', cpos='IN', pos='IN', head=7, deprel='case')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='nmod:with')
Token(index=8, form='but', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=9, form='not', cpos='RB', pos='RB', head=7, deprel='cc')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj:negcc')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_CCprocessed = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=4, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj:and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', cpos='IN', pos='IN', head=7, deprel='case')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='nmod:with')
Token(index=8, form='but', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=9, form='not', cpos='RB', pos='RB', head=7, deprel='cc')
Token(index=10, form='rice', cpos='NN', pos='NN', head=5, deprel='nmod:with')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj:negcc')
Token(index=11, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree5_out_collapsedTree = tree5_out_collapsed
tree5_out_collapsedTree_no_punct = '''
Token(index=1, form='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='sells', cpos='VBZ', pos='VBZ', head=2, deprel='conj:and')
Token(index=5, form='burritos', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', cpos='IN', pos='IN', head=7, deprel='case')
Token(index=7, form='beans', cpos='NNS', pos='NNS', head=5, deprel='nmod:with')
Token(index=8, form='but', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=9, form='not', cpos='RB', pos='RB', head=7, deprel='cc')
Token(index=10, form='rice', cpos='NN', pos='NN', head=7, deprel='conj:negcc')
'''.strip()
# nothing gets erased in UD
tree5_out_collapsedTree_erased = tree5_out_collapsedTree
tree5_out_collapsedTree_erased_no_punct = tree5_out_collapsedTree_no_punct
tree5_out_basic_lemmas = '''
Token(index=1, form='Ed', lemma='Ed', cpos='NNP', pos='NNP', head=2, deprel='nsubj')
Token(index=2, form='cooks', lemma='cook', cpos='VBZ', pos='VBZ', head=0, deprel='root')
Token(index=3, form='and', lemma='and', cpos='CC', pos='CC', head=2, deprel='cc')
Token(index=4, form='sells', lemma='sell', cpos='VBZ', pos='VBZ', head=2, deprel='conj')
Token(index=5, form='burritos', lemma='burrito', cpos='NNS', pos='NNS', head=2, deprel='dobj')
Token(index=6, form='with', lemma='with', cpos='IN', pos='IN', head=7, deprel='case')
Token(index=7, form='beans', lemma='bean', cpos='NNS', pos='NNS', head=5, deprel='nmod')
Token(index=8, form='but', lemma='but', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=9, form='not', lemma='not', cpos='RB', pos='RB', head=7, deprel='cc')
Token(index=10, form='rice', lemma='rice', cpos='NN', pos='NN', head=7, deprel='conj')
Token(index=11, form='.', lemma='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree6_out = '''
Token(index=1, form='He', cpos='PRP', pos='PRP', head=6, deprel='nsubj')
Token(index=2, form='also', cpos='RB', pos='RB', head=6, deprel='advmod')
Token(index=3, form='is', cpos='VBZ', pos='VBZ', head=6, deprel='cop')
Token(index=4, form='a', cpos='DT', pos='DT', head=6, deprel='det')
Token(index=5, form='consensus', cpos='NN', pos='NN', head=6, deprel='compound')
Token(index=6, form='manager', cpos='NN', pos='NN', head=9, deprel='ccomp')
Token(index=7, form=',', cpos=',', pos=',', head=9, deprel='punct')
Token(index=8, form='insiders', cpos='NNS', pos='NNS', head=9, deprel='nsubj')
Token(index=9, form='say', cpos='VBP', pos='VBP', head=0, deprel='root')
Token(index=10, form='.', cpos='.', pos='.', head=9, deprel='punct')
'''.strip()
tree7_out = '''
Token(index=1, form='PRIME', cpos='NNP', pos='NNP', head=2, deprel='compound')
Token(index=2, form='RATE', cpos='NNP', pos='NNP', head=0, deprel='root')
Token(index=3, form=':', cpos=':', pos=':', head=2, deprel='punct')
Token(index=4, form='10', cpos='CD', pos='CD', head=6, deprel='nummod')
Token(index=5, form='1/2', cpos='CD', pos='CD', head=6, deprel='nummod')
Token(index=6, form='%', cpos='NN', pos='NN', head=2, deprel='dep')
Token(index=7, form='.', cpos='.', pos='.', head=2, deprel='punct')
'''.strip()
tree8_out = '''
Token(index=1, form='Visitors', cpos='NNS', pos='NNS', head=3, deprel='nsubj')
Token(index=2, form='can', cpos='MD', pos='MD', head=3, deprel='aux')
Token(index=3, form='reach', cpos='VB', pos='VB', head=0, deprel='root')
Token(index=4, form='it', cpos='PRP', pos='PRP', head=3, deprel='dobj')
Token(index=5, form='only', cpos='RB', pos='RB', head=3, deprel='advmod')
Token(index=6, form='under', cpos='IN', pos='IN', head=9, deprel='case')
Token(index=7, form='strict', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=8, form='military', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=9, form='escort', cpos='NN', pos='NN', head=3, deprel='nmod')
Token(index=10, form='and', cpos='CC', pos='CC', head=9, deprel='cc')
Token(index=11, form='with', cpos='IN', pos='IN', head=13, deprel='case')
Token(index=12, form='prior', cpos='JJ', pos='JJ', head=13, deprel='amod')
Token(index=13, form='permission', cpos='NN', pos='NN', head=9, deprel='conj')
Token(index=14, form='from', cpos='IN', pos='IN', head=16, deprel='case')
Token(index=15, form='the', cpos='DT', pos='DT', head=16, deprel='det')
Token(index=16, form='Pentagon', cpos='NNP', pos='NNP', head=13, deprel='nmod')
Token(index=17, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=18, form='aboard', cpos='IN', pos='IN', head=22, deprel='case')
Token(index=19, form='special', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=20, form='small', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=21, form='shuttle', cpos='NN', pos='NN', head=22, deprel='compound')
Token(index=22, form='flights', cpos='NNS', pos='NNS', head=3, deprel='nmod')
Token(index=23, form='that', cpos='WDT', pos='WDT', head=24, deprel='nsubj')
Token(index=24, form='reach', cpos='VBP', pos='VBP', head=22, deprel='acl:relcl')
Token(index=25, form='the', cpos='DT', pos='DT', head=26, deprel='det')
Token(index=26, form='base', cpos='NN', pos='NN', head=24, deprel='dobj')
Token(index=27, form='by', cpos='IN', pos='IN', head=30, deprel='case')
Token(index=28, form='a', cpos='DT', pos='DT', head=30, deprel='det')
Token(index=29, form='circuitous', cpos='JJ', pos='JJ', head=30, deprel='amod')
Token(index=30, form='flight', cpos='NN', pos='NN', head=24, deprel='nmod')
Token(index=31, form='from', cpos='IN', pos='IN', head=34, deprel='case')
Token(index=32, form='the', cpos='DT', pos='DT', head=34, deprel='det')
Token(index=33, form='United', cpos='NNP', pos='NNP', head=34, deprel='compound')
Token(index=34, form='States', cpos='NNPS', pos='NNPS', head=30, deprel='nmod')
Token(index=35, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
tree8_out_collapsed = '''
Token(index=1, form='Visitors', cpos='NNS', pos='NNS', head=3, deprel='nsubj')
Token(index=2, form='can', cpos='MD', pos='MD', head=3, deprel='aux')
Token(index=3, form='reach', cpos='VB', pos='VB', head=0, deprel='root')
Token(index=3, form='reach', cpos='VB', pos='VB', head=3, deprel='conj:and', extra={'dep_is_copy': 1})
Token(index=4, form='it', cpos='PRP', pos='PRP', head=3, deprel='dobj')
Token(index=5, form='only', cpos='RB', pos='RB', head=3, deprel='advmod')
Token(index=6, form='under', cpos='IN', pos='IN', head=9, deprel='case')
Token(index=7, form='strict', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=8, form='military', cpos='JJ', pos='JJ', head=9, deprel='amod')
Token(index=9, form='escort', cpos='NN', pos='NN', head=3, deprel='nmod:under')
Token(index=10, form='and', cpos='CC', pos='CC', head=3, deprel='cc')
Token(index=11, form='with', cpos='IN', pos='IN', head=13, deprel='case')
Token(index=12, form='prior', cpos='JJ', pos='JJ', head=13, deprel='amod')
Token(index=13, form='permission', cpos='NN', pos='NN', head=3, deprel='nmod:with', extra={'gov_is_copy': 1})
Token(index=14, form='from', cpos='IN', pos='IN', head=16, deprel='case')
Token(index=15, form='the', cpos='DT', pos='DT', head=16, deprel='det')
Token(index=16, form='Pentagon', cpos='NNP', pos='NNP', head=13, deprel='nmod:from')
Token(index=17, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=18, form='aboard', cpos='IN', pos='IN', head=22, deprel='case')
Token(index=19, form='special', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=20, form='small', cpos='JJ', pos='JJ', head=22, deprel='amod')
Token(index=21, form='shuttle', cpos='NN', pos='NN', head=22, deprel='compound')
Token(index=22, form='flights', cpos='NNS', pos='NNS', head=3, deprel='nmod:aboard')
Token(index=22, form='flights', cpos='NNS', pos='NNS', head=24, deprel='nsubj')
Token(index=23, form='that', cpos='WDT', pos='WDT', head=22, deprel='ref')
Token(index=24, form='reach', cpos='VBP', pos='VBP', head=22, deprel='acl:relcl')
Token(index=25, form='the', cpos='DT', pos='DT', head=26, deprel='det')
Token(index=26, form='base', cpos='NN', pos='NN', head=24, deprel='dobj')
Token(index=27, form='by', cpos='IN', pos='IN', head=30, deprel='case')
Token(index=28, form='a', cpos='DT', pos='DT', head=30, deprel='det')
Token(index=29, form='circuitous', cpos='JJ', pos='JJ', head=30, deprel='amod')
Token(index=30, form='flight', cpos='NN', pos='NN', head=24, deprel='nmod:by')
Token(index=31, form='from', cpos='IN', pos='IN', head=34, deprel='case')
Token(index=32, form='the', cpos='DT', pos='DT', head=34, deprel='det')
Token(index=33, form='United', cpos='NNP', pos='NNP', head=34, deprel='compound')
Token(index=34, form='States', cpos='NNPS', pos='NNPS', head=30, deprel='nmod:from')
Token(index=35, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
tree9_out = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='total', cpos='NN', pos='NN', head=11, deprel='nsubjpass')
Token(index=3, form='of', cpos='IN', pos='IN', head=7, deprel='case')
Token(index=4, form='17', cpos='CD', pos='CD', head=5, deprel='compound')
Token(index=5, form='million', cpos='CD', pos='CD', head=7, deprel='nummod')
Token(index=6, form='metric', cpos='JJ', pos='JJ', head=7, deprel='amod')
Token(index=7, form='tons', cpos='NNS', pos='NNS', head=2, deprel='nmod')
Token(index=8, form='of', cpos='IN', pos='IN', head=9, deprel='case')
Token(index=9, form='potatoes', cpos='NNS', pos='NNS', head=7, deprel='nmod')
Token(index=10, form='was', cpos='VBD', pos='VBD', head=11, deprel='auxpass')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=0, deprel='root')
Token(index=12, form=',', cpos=',', pos=',', head=11, deprel='punct')
Token(index=13, form='which', cpos='WDT', pos='WDT', head=17, deprel='nsubj')
Token(index=14, form='was', cpos='VBD', pos='VBD', head=17, deprel='cop')
Token(index=15, form='14', cpos='CD', pos='CD', head=16, deprel='nummod')
Token(index=16, form='%', cpos='NN', pos='NN', head=17, deprel='nmod:npmod')
Token(index=17, form='less', cpos='JJR', pos='JJR', head=11, deprel='ccomp')
Token(index=18, form='than', cpos='IN', pos='IN', head=20, deprel='case')
Token(index=19, form='last', cpos='JJ', pos='JJ', head=20, deprel='amod')
Token(index=20, form='year', cpos='NN', pos='NN', head=17, deprel='nmod')
Token(index=21, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=23, deprel='punct')
Token(index=22, form='106', cpos='CD', pos='CD', head=23, deprel='nummod')
Token(index=23, form='quintals', cpos='NNS', pos='NNS', head=20, deprel='dep')
Token(index=24, form='per', cpos='IN', pos='IN', head=25, deprel='case')
Token(index=25, form='hectare', cpos='NN', pos='NN', head=23, deprel='nmod')
Token(index=26, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=23, deprel='punct')
Token(index=27, form=',', cpos=',', pos=',', head=20, deprel='punct')
Token(index=28, form='and', cpos='CC', pos='CC', head=20, deprel='cc')
Token(index=29, form='5.4', cpos='CD', pos='CD', head=30, deprel='compound')
Token(index=30, form='million', cpos='CD', pos='CD', head=32, deprel='nummod')
Token(index=31, form='metric', cpos='JJ', pos='JJ', head=32, deprel='amod')
Token(index=32, form='tons', cpos='NNS', pos='NNS', head=20, deprel='conj')
Token(index=33, form='of', cpos='IN', pos='IN', head=34, deprel='case')
Token(index=34, form='vegetables', cpos='NNS', pos='NNS', head=32, deprel='nmod')
Token(index=35, form=',', cpos=',', pos=',', head=20, deprel='punct')
Token(index=36, form='or', cpos='CC', pos='CC', head=20, deprel='cc')
Token(index=37, form='2.2', cpos='CD', pos='CD', head=38, deprel='nummod')
Token(index=38, form='%', cpos='NN', pos='NN', head=39, deprel='nmod:npmod')
Token(index=39, form='more', cpos='RBR', pos='RBR', head=20, deprel='conj')
Token(index=40, form='than', cpos='IN', pos='IN', head=44, deprel='case')
Token(index=41, form='on', cpos='IN', pos='IN', head=44, deprel='case')
Token(index=42, form='the', cpos='DT', pos='DT', head=44, deprel='det')
Token(index=43, form='same', cpos='JJ', pos='JJ', head=44, deprel='amod')
Token(index=44, form='date', cpos='NN', pos='NN', head=20, deprel='conj')
Token(index=45, form='last', cpos='JJ', pos='JJ', head=46, deprel='amod')
Token(index=46, form='year', cpos='NN', pos='NN', head=44, deprel='nmod:tmod')
Token(index=47, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=49, deprel='punct')
Token(index=48, form='116', cpos='JJ', pos='JJ', head=49, deprel='amod')
Token(index=49, form='quintals', cpos='NNS', pos='NNS', head=17, deprel='dep')
Token(index=50, form='per', cpos='IN', pos='IN', head=51, deprel='case')
Token(index=51, form='hectare', cpos='NN', pos='NN', head=49, deprel='nmod')
Token(index=52, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=49, deprel='punct')
Token(index=53, form='.', cpos='.', pos='.', head=11, deprel='punct')
'''.strip()
tree9_out_collapsed = '''
Token(index=1, form='A', cpos='DT', pos='DT', head=2, deprel='det')
Token(index=2, form='total', cpos='NN', pos='NN', head=11, deprel='nsubjpass')
Token(index=3, form='of', cpos='IN', pos='IN', head=7, deprel='case')
Token(index=4, form='17', cpos='CD', pos='CD', head=5, deprel='compound')
Token(index=5, form='million', cpos='CD', pos='CD', head=7, deprel='nummod')
Token(index=6, form='metric', cpos='JJ', pos='JJ', head=7, deprel='amod')
Token(index=7, form='tons', cpos='NNS', pos='NNS', head=2, deprel='nmod:of')
Token(index=8, form='of', cpos='IN', pos='IN', head=9, deprel='case')
Token(index=9, form='potatoes', cpos='NNS', pos='NNS', head=7, deprel='nmod:of')
Token(index=10, form='was', cpos='VBD', pos='VBD', head=11, deprel='auxpass')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=0, deprel='root')
Token(index=12, form=',', cpos=',', pos=',', head=11, deprel='punct')
Token(index=13, form='which', cpos='WDT', pos='WDT', head=17, deprel='nsubj')
Token(index=14, form='was', cpos='VBD', pos='VBD', head=17, deprel='cop')
Token(index=15, form='14', cpos='CD', pos='CD', head=16, deprel='nummod')
Token(index=16, form='%', cpos='NN', pos='NN', head=17, deprel='nmod:npmod')
Token(index=17, form='less', cpos='JJR', pos='JJR', head=11, deprel='ccomp')
Token(index=17, form='less', cpos='JJR', pos='JJR', head=17, deprel='conj:and', extra={'dep_is_copy': 3})
Token(index=17, form='less', cpos='JJR', pos='JJR', head=17, deprel='conj:and', extra={'dep_is_copy': 4})
Token(index=17, form='less', cpos='JJR', pos='JJR', head=17, deprel='conj:or', extra={'dep_is_copy': 1})
Token(index=17, form='less', cpos='JJR', pos='JJR', head=17, deprel='conj:or', extra={'dep_is_copy': 2})
Token(index=18, form='than', cpos='IN', pos='IN', head=20, deprel='case')
Token(index=19, form='last', cpos='JJ', pos='JJ', head=20, deprel='amod')
Token(index=20, form='year', cpos='NN', pos='NN', head=17, deprel='nmod:than')
Token(index=21, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=23, deprel='punct')
Token(index=22, form='106', cpos='CD', pos='CD', head=23, deprel='nummod')
Token(index=23, form='quintals', cpos='NNS', pos='NNS', head=20, deprel='dep')
Token(index=24, form='per', cpos='IN', pos='IN', head=25, deprel='case')
Token(index=25, form='hectare', cpos='NN', pos='NN', head=23, deprel='nmod:per')
Token(index=26, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=23, deprel='punct')
Token(index=27, form=',', cpos=',', pos=',', head=20, deprel='punct')
Token(index=28, form='and', cpos='CC', pos='CC', head=17, deprel='cc')
Token(index=29, form='5.4', cpos='CD', pos='CD', head=30, deprel='compound')
Token(index=30, form='million', cpos='CD', pos='CD', head=32, deprel='nummod')
Token(index=31, form='metric', cpos='JJ', pos='JJ', head=32, deprel='amod')
Token(index=32, form='tons', cpos='NNS', pos='NNS', head=20, deprel='conj')
Token(index=33, form='of', cpos='IN', pos='IN', head=34, deprel='case')
Token(index=34, form='vegetables', cpos='NNS', pos='NNS', head=32, deprel='nmod:of')
Token(index=35, form=',', cpos=',', pos=',', head=20, deprel='punct')
Token(index=36, form='or', cpos='CC', pos='CC', head=17, deprel='cc')
Token(index=37, form='2.2', cpos='CD', pos='CD', head=38, deprel='nummod')
Token(index=38, form='%', cpos='NN', pos='NN', head=39, deprel='nmod:npmod')
Token(index=39, form='more', cpos='RBR', pos='RBR', head=20, deprel='conj')
Token(index=40, form='than', cpos='IN', pos='IN', head=44, deprel='case')
Token(index=41, form='on', cpos='IN', pos='IN', head=44, deprel='case')
Token(index=42, form='the', cpos='DT', pos='DT', head=44, deprel='det')
Token(index=43, form='same', cpos='JJ', pos='JJ', head=44, deprel='amod')
Token(index=44, form='date', cpos='NN', pos='NN', head=17, deprel='nmod:on', extra={'gov_is_copy': 1})
Token(index=45, form='last', cpos='JJ', pos='JJ', head=46, deprel='amod')
Token(index=46, form='year', cpos='NN', pos='NN', head=44, deprel='nmod:tmod')
Token(index=47, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=49, deprel='punct')
Token(index=48, form='116', cpos='JJ', pos='JJ', head=49, deprel='amod')
Token(index=49, form='quintals', cpos='NNS', pos='NNS', head=17, deprel='dep')
Token(index=50, form='per', cpos='IN', pos='IN', head=51, deprel='case')
Token(index=51, form='hectare', cpos='NN', pos='NN', head=49, deprel='nmod:per')
Token(index=52, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=49, deprel='punct')
Token(index=53, form='.', cpos='.', pos='.', head=11, deprel='punct')
'''.strip()
tree10_out = '''
Token(index=1, form='Hanoi', cpos='NNP', pos='NNP', head=3, deprel='compound')
Token(index=2, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=3, form='May', cpos='NNP', pos='NNP', head=0, deprel='root')
Token(index=4, form='13', cpos='CD', pos='CD', head=3, deprel='nummod')
Token(index=5, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=6, deprel='punct')
Token(index=6, form='VNA', cpos='NNP', pos='NNP', head=3, deprel='appos')
Token(index=7, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=6, deprel='punct')
Token(index=8, form='--', cpos=':', pos=':', head=3, deprel='punct')
Token(index=9, form='Vietnam', cpos='NNP', pos='NNP', head=3, deprel='dep')
Token(index=10, form='has', cpos='VBZ', pos='VBZ', head=11, deprel='aux')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=9, deprel='acl:relcl')
Token(index=12, form='a', cpos='DT', pos='DT', head=13, deprel='det')
Token(index=13, form='variety', cpos='NN', pos='NN', head=11, deprel='dobj')
Token(index=14, form='of', cpos='IN', pos='IN', head=15, deprel='case')
Token(index=15, form='drugs', cpos='NNS', pos='NNS', head=13, deprel='nmod')
Token(index=16, form='to', cpos='TO', pos='TO', head=17, deprel='mark')
Token(index=17, form='control', cpos='VB', pos='VB', head=11, deprel='advcl')
Token(index=18, form='HIV/AIDS', cpos='NNS', pos='NNS', head=17, deprel='dobj')
Token(index=19, form='in', cpos='IN', pos='IN', head=20, deprel='case')
Token(index=20, form='patients', cpos='NNS', pos='NNS', head=17, deprel='nmod')
Token(index=21, form='suffering', cpos='VBG', pos='VBG', head=20, deprel='acl')
Token(index=22, form='with', cpos='IN', pos='IN', head=24, deprel='case')
Token(index=23, form='the', cpos='DT', pos='DT', head=24, deprel='det')
Token(index=24, form='disease', cpos='NN', pos='NN', head=21, deprel='nmod')
Token(index=25, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
tree10_out_collapsed = '''
Token(index=1, form='Hanoi', cpos='NNP', pos='NNP', head=3, deprel='compound')
Token(index=2, form=',', cpos=',', pos=',', head=3, deprel='punct')
Token(index=3, form='May', cpos='NNP', pos='NNP', head=0, deprel='root')
Token(index=4, form='13', cpos='CD', pos='CD', head=3, deprel='nummod')
Token(index=5, form='-LRB-', cpos='-LRB-', pos='-LRB-', head=6, deprel='punct')
Token(index=6, form='VNA', cpos='NNP', pos='NNP', head=3, deprel='appos')
Token(index=7, form='-RRB-', cpos='-RRB-', pos='-RRB-', head=6, deprel='punct')
Token(index=8, form='--', cpos=':', pos=':', head=3, deprel='punct')
Token(index=9, form='Vietnam', cpos='NNP', pos='NNP', head=3, deprel='dep')
Token(index=10, form='has', cpos='VBZ', pos='VBZ', head=11, deprel='aux')
Token(index=11, form='produced', cpos='VBN', pos='VBN', head=9, deprel='acl:relcl')
Token(index=12, form='a', cpos='DT', pos='DT', head=13, deprel='det')
Token(index=13, form='variety', cpos='NN', pos='NN', head=11, deprel='dobj')
Token(index=14, form='of', cpos='IN', pos='IN', head=15, deprel='case')
Token(index=15, form='drugs', cpos='NNS', pos='NNS', head=13, deprel='nmod:of')
Token(index=16, form='to', cpos='TO', pos='TO', head=17, deprel='mark')
Token(index=17, form='control', cpos='VB', pos='VB', head=11, deprel='advcl')
Token(index=18, form='HIV/AIDS', cpos='NNS', pos='NNS', head=17, deprel='dobj')
Token(index=19, form='in', cpos='IN', pos='IN', head=20, deprel='case')
Token(index=20, form='patients', cpos='NNS', pos='NNS', head=17, deprel='nmod:in')
Token(index=21, form='suffering', cpos='VBG', pos='VBG', head=20, deprel='acl')
Token(index=22, form='with', cpos='IN', pos='IN', head=24, deprel='case')
Token(index=23, form='the', cpos='DT', pos='DT', head=24, deprel='det')
Token(index=24, form='disease', cpos='NN', pos='NN', head=21, deprel='nmod:with')
Token(index=25, form='.', cpos='.', pos='.', head=3, deprel='punct')
'''.strip()
| 65.96633 | 538 | 0.618535 |
e900d9eab39c289c4938a44d6d29db9918a2e2dc | 932 | py | Python | harmoni_actuators/harmoni_tts/test/test_tts_client.py | interaction-lab/HARMONI | 9c88019601a983a1739744919a95247a997d3bb1 | [
"MIT"
] | 7 | 2020-09-02T06:31:21.000Z | 2022-02-18T21:16:44.000Z | harmoni_actuators/harmoni_tts/test/test_tts_client.py | micolspitale93/HARMONI | cf6a13fb85e3efb4e421dbfd4555359c0a04acaa | [
"MIT"
] | 61 | 2020-05-15T16:46:32.000Z | 2021-07-28T17:44:49.000Z | harmoni_actuators/harmoni_tts/test/test_tts_client.py | micolspitale93/HARMONI | cf6a13fb85e3efb4e421dbfd4555359c0a04acaa | [
"MIT"
] | 3 | 2020-10-05T23:01:29.000Z | 2022-03-02T11:53:34.000Z | import os
import unittest
from harmoni_tts.local_tts_client import TtsClient
tts_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
content_dir = os.path.abspath(os.path.join(tts_dir, "../../../../model/tts"))
tts_config = os.path.join(content_dir, "config.json")
tts_model = os.path.join(content_dir, "tts_model.pth.tar")
vocoder_config = os.path.join(content_dir, "config_vocoder.json")
vocoder_model = os.path.join(content_dir, "vocoder_model.pth.tar")
scale_stats_path = os.path.join(tts_dir, "scale_stats.npy")
class TestTtsClient(unittest.TestCase):
def setUp(self):
self.client = TtsClient(
tts_config,
tts_model,
vocoder_config,
vocoder_model,
scale_stats_path
)
def test_get_audio(self):
_, _, _, waveform = self.client.get_audio("Hello")
assert len(waveform) > 0
if __name__ == "__main__":
unittest.main()
| 28.242424 | 77 | 0.670601 |
016baf0c1f68a2dd93e3a844956608c34a57b0a0 | 996 | py | Python | problems/test_0883.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | 1 | 2017-06-17T23:47:17.000Z | 2017-06-17T23:47:17.000Z | problems/test_0883.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | problems/test_0883.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | import unittest
import utils
# O(n^2) time. O(n) space. Iteration.
class Solution:
def projectionArea(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
result = 0
max_by_row = [0] * len(grid)
max_by_col = [0] * len(grid)
for row, heights in enumerate(grid):
for col, height in enumerate(heights):
if height:
result += 1
max_by_row[row] = max(max_by_row[row], height)
max_by_col[col] = max(max_by_col[col], height)
return result + sum(max_by_row) + sum(max_by_col)
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().projectionArea(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| 26.210526 | 68 | 0.560241 |
e7e1536d78eeb94df95e5d18c0aa833362bcd371 | 2,755 | py | Python | nation_flags/scrape.py | masayang/nation_flags | 0abe72212d613071ebee4816a46913c0ac812b18 | [
"MIT"
] | null | null | null | nation_flags/scrape.py | masayang/nation_flags | 0abe72212d613071ebee4816a46913c0ac812b18 | [
"MIT"
] | 1 | 2021-03-31T18:36:08.000Z | 2021-03-31T18:36:08.000Z | nation_flags/scrape.py | masayang/nation_flags | 0abe72212d613071ebee4816a46913c0ac812b18 | [
"MIT"
] | null | null | null | import requests
from lxml import html
import json
import re
from settings import config
import urllib2
import time
def get_english_content():
return requests.get(config['ENGLISH_URL'])
def get_japanese_content():
return requests.get(config['JAPANESE_URL'])
def get_dom_tree(content):
return html.fromstring(content)
def get_english_dom_tree():
return get_dom_tree(get_english_content().content)
def get_japanese_dom_tree():
return get_dom_tree(get_japanese_content().content)
def get_nation_name(img_src):
return img_src.attrib['alt'][8:].strip()
def get_flag_url(img_src):
return img_src.attrib['src']
def get_flag(flag_url):
flag = re.search('/(?P<flag>Flag_of.*svg)/', flag_url).group('flag')
flag = flag.replace("_%28converted%29", "")
flag = flag.replace("_%28civil%29", "")
flag = flag.replace("_%28state%29", "")
flag = flag.replace("_%28Pantone%29", "")
if flag == 'Flag_of_Transnistria.svg':
flag = 'Flag_of_Transnistria_%28state%29.svg'
return flag
def get_nation_name_j(j_tree, flag):
img_src_j = j_tree.xpath('//img[contains(@src, "{}")]'.format(flag))
return img_src_j[0].attrib['alt'][:-2].encode('utf-8')
def get_wikipedia_url(nation_name_e):
nation_name_e = nation_name_e.replace("the ", "", 1).replace(" ", "_")
return "{}{}".format(config['WIKIPEDIA_BASE_URL'], nation_name_e)
def get_wikipedia_url_j(nation_name_j):
return "{}{}".format(config['WIKIPEDIA_BASE_URL_J'], urllib2.quote(nation_name_j))
def get_nations():
e_tree = get_english_dom_tree()
j_tree = get_japanese_dom_tree()
nations = []
for img_src in e_tree.xpath('//*[@id="mw-content-text"]//td/a/img'):
nation_name = get_nation_name(img_src)
flag_url = get_flag_url(img_src)
flag = get_flag(flag_url)
nation_name_j = get_nation_name_j(j_tree, flag)
nations.append({
"nation_name_e": nation_name,
"png_url": "http:{}".format(flag_url),
"nation_name_j": nation_name_j,
"wikipedia_url_e": get_wikipedia_url(nation_name),
"wikipedia_url_j": get_wikipedia_url_j(nation_name_j)
})
return nations
if __name__ == '__main__':
for nation in get_nations():
try:
urllib2.urlopen(nation['png_url'])
except Exception, e:
print(str(e), nation['png_url'])
time.sleep(1)
try:
urllib2.urlopen(nation['wikipedia_url'])
except Exception, e:
print(str(e), nation['wikipedia_url'])
time.sleep(1)
try:
urllib2.urlopen(nation['wikipedia_url_j'])
except Exception, e:
print(str(e), nation['wikipedia_url_j'])
time.sleep(1)
| 30.955056 | 86 | 0.654809 |
0a7d8689b3fe3052b87a00196daa457721118020 | 284 | py | Python | app/common/management/commands/app_list.py | MrPeker/acikkaynak-service | 21c3f2faaa84342d2fa95709293bc84d1e2a23ae | [
"Apache-2.0"
] | 5 | 2021-02-28T22:29:13.000Z | 2021-11-29T00:24:28.000Z | app/common/management/commands/app_list.py | MrPeker/acikkaynak-service | 21c3f2faaa84342d2fa95709293bc84d1e2a23ae | [
"Apache-2.0"
] | null | null | null | app/common/management/commands/app_list.py | MrPeker/acikkaynak-service | 21c3f2faaa84342d2fa95709293bc84d1e2a23ae | [
"Apache-2.0"
] | 3 | 2021-03-03T19:56:30.000Z | 2021-03-06T22:10:35.000Z | from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
for app_name in settings.INSTALLED_APPS:
if app_name.startswith("app."):
print(app_name[4:])
| 28.4 | 51 | 0.676056 |
53c39871b8218de218a2c259c538e9bafefd39c6 | 1,810 | py | Python | tests/common/test_run/sqrt_run.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | 1 | 2020-08-31T02:43:43.000Z | 2020-08-31T02:43:43.000Z | tests/common/test_run/sqrt_run.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/sqrt_run.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
sqrt run define
"""
import numpy as np
from akg.utils import kernel_exec as utils
from test_op import sqrt
from tensorio import compare_tensor
from gen_random import random_gaussian
def sqrt_run(shape, dtype, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(sqrt.sqrt, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
expect, input, output = gen_data(dtype, shape)
mod = utils.op_build_test(sqrt.sqrt, [shape], [dtype], kernel_name='sqrt', attrs=attrs)
output = utils.mod_launch(mod, (input, output), expect=expect)
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
# Generate data for testing the op
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
input = np.abs(input)
expect = np.sqrt(input)
output = np.full(expect.shape, np.nan, dtype)
return expect, input, output
| 36.2 | 110 | 0.697238 |
915c272fc68d10efe800032fef07bdb32cc38cb9 | 2,753 | py | Python | tests/fixtures/message.py | BenTopping/lighthouse | 649b442ca89f7deb7c01411faa883c1894b72986 | [
"MIT"
] | null | null | null | tests/fixtures/message.py | BenTopping/lighthouse | 649b442ca89f7deb7c01411faa883c1894b72986 | [
"MIT"
] | null | null | null | tests/fixtures/message.py | BenTopping/lighthouse | 649b442ca89f7deb7c01411faa883c1894b72986 | [
"MIT"
] | null | null | null | import pytest
from lighthouse.constants.events import PLATE_EVENT_SOURCE_ALL_NEGATIVES, PLATE_EVENT_SOURCE_COMPLETED
from lighthouse.messages.message import Message
@pytest.fixture
def message_unknown():
message_content = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": "no_callbacks",
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def message_source_complete():
message_content = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": PLATE_EVENT_SOURCE_COMPLETED,
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [
{
"role_type": "sample",
"subject_type": "sample",
"friendly_name": "friendly_name",
"uuid": "00000000-1111-2222-3333-555555555555",
},
{
"role_type": "cherrypicking_source_labware",
"subject_type": "plate",
"friendly_name": "plate-barcode",
"uuid": "00000000-1111-2222-3333-555555555556",
},
{
"role_type": "robot",
"subject_type": "robot",
"friendly_name": "robot-serial",
"uuid": "00000000-1111-2222-3333-555555555557",
},
],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def message_source_all_negative():
message_content = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": PLATE_EVENT_SOURCE_ALL_NEGATIVES,
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [
{
"role_type": "cherrypicking_source_labware",
"subject_type": "plate",
"friendly_name": "plate-barcode",
"uuid": "00000000-1111-2222-3333-555555555556",
},
{
"role_type": "robot",
"subject_type": "robot",
"friendly_name": "robot-serial",
"uuid": "00000000-1111-2222-3333-555555555557",
},
],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
| 32.388235 | 102 | 0.486378 |
b355429cfa6fdafd211c700ffc2d19197d8e71c2 | 1,982 | py | Python | tests/test_distance.py | pedrocamargo/fast-trips | a2549936b2707b00d6c21b4e6ae4be8fefd0aa46 | [
"Apache-2.0"
] | 3 | 2017-11-03T00:18:23.000Z | 2020-11-30T18:54:46.000Z | tests/test_distance.py | pedrocamargo/fast-trips | a2549936b2707b00d6c21b4e6ae4be8fefd0aa46 | [
"Apache-2.0"
] | null | null | null | tests/test_distance.py | pedrocamargo/fast-trips | a2549936b2707b00d6c21b4e6ae4be8fefd0aa46 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import numpy as np
import pandas as pd
import partridge as ptg
import pytest
from fasttrips import Trip
from fasttrips import Util
TEST_NETWORKS = {"Seattle_Region": "psrc_1_1",
"Springfield": "vermont"}
EXAMPLES_DIR = os.path.join(os.getcwd(), "fasttrips", "Examples")
@pytest.fixture(scope="module")
def network_results(network):
results = {
'Seattle_Region':
{
't1': [0.0000, 0.18204, 0.85835, 1.59093, 1.73259],
't55': [0.0000, 1.40889],
't140': [0.0000, 0.39525, 0.91519],
},
'Seattle_Region':
{
'690': [0.00000, 0.24679, 0.52990, 0.58124, 0.68396, 0.82198,
1.10185, 1.30837, 1.63678, 1.68605, 1.88833, 2.01921,
2.14929, 2.27598, 2.39962, 2.52896, 2.65403, 2.77906,
2.90012, 3.40607, 4.02007, 7.30269, 7.77643, 7.93774,
8.13528, 8.29669, 8.43537, 8.60926, 8.77880, 8.99127],
'3942': [0.00000, 2.98571, 10.86012, 11.00405, 11.21411, 11.41179,
11.69441, 11.85530, 12.20669, 12.26657, 12.41157],
'4023': [0.00000, 0.12492, 0.48199, 7.36683, 9.35049, 10.72752,
11.01201, 11.60369, 13.62171, 17.34048, 17.62048, 19.08759],
}
}
yield results[network]
def test_calculate_distance_miles():
orig_lat, orig_lon = 32.707431, -117.157058
dest_lat, dest_lon = 32.740792, -117.211333
cols = ['orig_lat', 'orig_lon', 'dest_lat', 'dest_lon', 'dist']
df = pd.DataFrame([[orig_lat, orig_lon, dest_lat, dest_lon, np.nan]],
columns=cols)
Util.calculate_distance_miles(df, cols[0], cols[1], cols[2], cols[3], cols[4])
distance = df[cols[4]][0]
print('test_calculate_distance_miles: {:.5f} mi'.format(distance))
assert abs(distance - 3.9116) < 0.0001
| 35.392857 | 85 | 0.562059 |
20beb301db17dec0771b5f4fb0ff1fccbeb622be | 1,487 | py | Python | slashtags/__init__.py | P4RZ1V4L-93/Cogs | 8b2b2bf3bcb2c961efd2f70a2cae0ff1f622c019 | [
"MIT"
] | null | null | null | slashtags/__init__.py | P4RZ1V4L-93/Cogs | 8b2b2bf3bcb2c961efd2f70a2cae0ff1f622c019 | [
"MIT"
] | null | null | null | slashtags/__init__.py | P4RZ1V4L-93/Cogs | 8b2b2bf3bcb2c961efd2f70a2cae0ff1f622c019 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
from pathlib import Path
from redbot.core.bot import Red
from .http import Route
from .models import SlashOptionType
from .slashtags import SlashTags
with open(Path(__file__).parent / "info.json") as fp:
__red_end_user_data_statement__ = json.load(fp)["end_user_data_statement"]
async def setup(bot: Red) -> None:
cog = SlashTags(bot)
await cog.pre_load()
bot.add_cog(cog)
| 35.404762 | 78 | 0.785474 |
0c2a9e2f70e7ad6cc9356845f2c3ac85eb2fc35b | 2,117 | py | Python | ex41.py | thinzaroo15/coding-sprint | 3c146fafb09a5e9bf9ef1d257b9a8f3052e8ab96 | [
"MIT"
] | null | null | null | ex41.py | thinzaroo15/coding-sprint | 3c146fafb09a5e9bf9ef1d257b9a8f3052e8ab96 | [
"MIT"
] | null | null | null | ex41.py | thinzaroo15/coding-sprint | 3c146fafb09a5e9bf9ef1d257b9a8f3052e8ab96 | [
"MIT"
] | null | null | null | import random
from urllib.request import urlopen
import sys
WORDS_URL ="https://learncodethehardway.org/words.txt"
WORDS =[]
PHRASES =[
"class %%%(%%%):"
"Make a class named %%% that is _ a %%%"
"class %%%(object):\n\tdef __init__(self,***)"
"class %%% has-a __init_ that takes self and *** params"
"class %%%(object):\n\tdef ***(self,@@@)"
"class %%% has-a function *** that takes self and @@@ params.",
"*** = %%%()"
"Set *** to an instance of class %%%"
"***.***(@@@)"
"From *** get the *** function,call it with params self,@@@."
"***.*** = '***'"
"From *** get the *** attributes and set it to '***'."]
if len(sys.argv)== 2 and sys.argv[1]=="english":
PHRASE_FIRST = True
else:
PHARSES_FIRST =False
for word in urlopen(WORDS_URL).readlines():
WORDS.append(str(word.strip(),encoding="utf-8"))
def convert(snippet,phrase):
class_names =[w.capitalizes() for w in random.sample(WORDS,snippet.count("%%%"))]
other_names =random.sample(WORDS,snippet.count("***"))
results =[]
param_names =[]
for i in range(0,snippet.count("@@@")):
param_count=random.randint(1,3)
param_names.append(','.join(random.sample(WORDS,param_count)))
for sentence in snippet,phrase:
result =sentence[:]
for word in class_names:
result =result.replace("%%%",word,1)
for word in other_names:
result = result.replace("***",word,1)
for word in param_names:
result=result.replace("@@@",word,1)
results.append(result)
return results
try:
while True:
snippets=list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase=PHRASES[snippet]
question,answer=convert(snippet,phrases)
if PHRASE_FIRST:
question,answer =answer,question
print(question)
input(">")
print(f"ANSWER:{answer}\n\n")
except E0FError:
print("\nBye")
| 34.145161 | 85 | 0.549835 |
992ac9aa11be639f406390041e0d18d34d30684d | 254 | py | Python | Python/age1.py | NewGuonx/DST_algorithm | 8c4b2f0fe1f43044a5c37a7f993d339862d9182c | [
"Unlicense"
] | 1 | 2022-01-23T05:29:17.000Z | 2022-01-23T05:29:17.000Z | Python/age1.py | sonaspy/dst_algorithms | 8c4b2f0fe1f43044a5c37a7f993d339862d9182c | [
"Unlicense"
] | null | null | null | Python/age1.py | sonaspy/dst_algorithms | 8c4b2f0fe1f43044a5c37a7f993d339862d9182c | [
"Unlicense"
] | null | null | null |
age = -1 # an initially invalid choice
while age <= 0:
try:
age = int(input('Enter your age in years: '))
if age <= 0:
print('Your age must be positive')
except (ValueError, EOFError):
print('Invalid response')
| 23.090909 | 57 | 0.574803 |
ad1aa3066f37a04c5aefe63c7e575f2b4f4116a9 | 925 | py | Python | backend/ipproject/core/users/models.py | FedotenkoM/ipsproject | f02ce8acd560b3e10e5357f0605e923396aaafa0 | [
"MIT"
] | null | null | null | backend/ipproject/core/users/models.py | FedotenkoM/ipsproject | f02ce8acd560b3e10e5357f0605e923396aaafa0 | [
"MIT"
] | 11 | 2021-05-14T12:34:18.000Z | 2021-08-22T14:52:01.000Z | backend/ipproject/core/users/models.py | FedotenkoM/ipsproject | f02ce8acd560b3e10e5357f0605e923396aaafa0 | [
"MIT"
] | null | null | null | from ipproject.core.database import db
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True, nullable=False)
password = db.Column(db.String(120), nullable=False)
email = db.Column(db.String(50), unique=True, nullable=False)
session = db.Column(db.String(36), nullable=False)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'), nullable=True)
def jsonify(self, for_card=False):
result = {
'id': self.id,
'username': self.username,
'roleId': self.role_id
}
if for_card:
result['email'] = self.email
return result
@classmethod
async def get_by_identifier(cls, identifier):
return await cls.query.where(
(cls.email == identifier) | (cls.username == identifier)
).gino.first()
| 29.83871 | 77 | 0.621622 |
604423621edf03bfa9d63339e71e16c85f4b2dec | 328 | py | Python | util/vaihingen/vaihingen_extra_data.py | marcelampc/aerial_mtl | c6a2033ec89a89ff06836014c619d66bc907d958 | [
"BSD-3-Clause"
] | 58 | 2019-07-23T09:01:58.000Z | 2022-03-31T23:12:37.000Z | util/vaihingen/vaihingen_extra_data.py | marcelampc/mtl_aerial_images | c6a2033ec89a89ff06836014c619d66bc907d958 | [
"BSD-3-Clause"
] | 5 | 2019-12-09T12:20:35.000Z | 2021-06-23T12:41:48.000Z | util/vaihingen/vaihingen_extra_data.py | marcelampc/aerial_mtl | c6a2033ec89a89ff06836014c619d66bc907d958 | [
"BSD-3-Clause"
] | 8 | 2019-08-05T11:13:23.000Z | 2021-04-26T00:57:40.000Z | # Extra information about nyuv2
import numpy as np
# Number of classes:
n_classes = 7
# set 0 to first label
# weights=np.ones(1, n_classes)
weights = [
0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]
colors = [
[0, 0, 0], [255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], [255, 0, 0]
] | 25.230769 | 103 | 0.527439 |
0b8974ba3768fbcab445f34119c1a90dd89fd88f | 1,096 | py | Python | Python3/0936-Stamping-The-Sequence/soln.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0936-Stamping-The-Sequence/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0936-Stamping-The-Sequence/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def movesToStamp(self, stamp, target):
"""
:type stamp: str
:type target: str
:rtype: List[int]
"""
ns, nt = len(stamp), len(target)
if nt < ns or stamp[0] != target[0] or stamp[-1] != target[-1]:
return []
target = list(target)
res = []
count = 0
visited = [False] * nt
while count < 10 * nt:
count += 1
idx = -1
for i in range(nt - ns + 1):
if visited[i]: continue
num = 0
for j in range(ns):
if '*' != target[i + j] != stamp[j]:
break
elif target[i + j] != '*':
num += 1
else:
if num:
idx = i
break
if idx == -1: break
target[idx: idx + ns] = ['*'] * ns
visited[idx] = True
res.append(idx)
return res[::-1] if all(item == '*' for item in target) else [] | 32.235294 | 71 | 0.367701 |
553d2c8c1391fa21f625507b0573640f2569a28f | 8,126 | py | Python | src/oemof/tabular/tools/postprocessing.py | jnnr/oemof-tabular | ab58d8c3035b6e97d9d45169832745de11e5bb36 | [
"BSD-3-Clause"
] | 2 | 2019-12-09T17:34:31.000Z | 2022-02-04T12:55:15.000Z | src/oemof/tabular/tools/postprocessing.py | jnnr/oemof-tabular | ab58d8c3035b6e97d9d45169832745de11e5bb36 | [
"BSD-3-Clause"
] | 28 | 2018-11-24T16:56:55.000Z | 2022-03-25T12:19:41.000Z | src/oemof/tabular/tools/postprocessing.py | jnnr/oemof-tabular | ab58d8c3035b6e97d9d45169832745de11e5bb36 | [
"BSD-3-Clause"
] | 7 | 2018-12-19T13:42:52.000Z | 2021-11-21T18:43:45.000Z | # -*- coding: utf-8 -*-
"""
"""
import os
import numpy as np
import pandas as pd
from oemof.network import Bus, Sink
from oemof.outputlib import views
from oemof.solph.components import GenericStorage
from oemof.tabular import facades
def component_results(es, results, select="sequences"):
""" Aggregated by component type
"""
c = {}
if not hasattr(es, "typemap"):
setattr(es, "typemap", facades.TYPEMAP)
for k, v in es.typemap.items():
if type(k) == str:
if select == "sequences":
_seq_by_type = [
views.node(results, n, multiindex=True).get("sequences")
for n in es.nodes
if isinstance(n, v) and not isinstance(n, Bus)
]
# check if dataframes / series have been returned
if any(
[
isinstance(i, (pd.DataFrame, pd.Series))
for i in _seq_by_type
]
):
seq_by_type = pd.concat(_seq_by_type, axis=1)
c[str(k)] = seq_by_type
if select == "scalars":
_sca_by_type = [
views.node(results, n, multiindex=True).get("scalars")
for n in es.nodes
if isinstance(n, v) and not isinstance(n, Bus)
]
if [x for x in _sca_by_type if x is not None]:
_sca_by_type = pd.concat(_sca_by_type)
c[str(k)] = _sca_by_type
return c
def bus_results(es, results, select="sequences", concat=False):
""" Aggregated for every bus of the energy system
"""
br = {}
buses = [b for b in es.nodes if isinstance(b, Bus)]
for b in buses:
if select == "sequences":
bus_sequences = pd.concat(
[
views.node(results, b, multiindex=True).get(
"sequences", pd.DataFrame()
)
],
axis=1,
)
br[str(b)] = bus_sequences
if select == "scalars":
br[str(b)] = views.node(results, b, multiindex=True).get("scalars")
if concat:
if select == "sequences":
axis = 1
else:
axis = 0
br = pd.concat([b for b in br.values()], axis=axis)
return br
def supply_results(
types=[
"dispatchable",
"volatile",
"conversion",
"backpressure",
"extraction",
"storage",
"reservoir",
],
bus=None,
results=None,
es=None,
):
"""
"""
if not hasattr(es, "typemap"):
setattr(es, "typemap", facades.TYPEMAP)
selection = pd.DataFrame()
for t in types:
if (
issubclass(es.typemap[t], GenericStorage)
and es.typemap[t] is not facades.Reservoir
):
df = views.net_storage_flow(results, node_type=es.typemap[t])
if df is not None:
selection = pd.concat([selection, df], axis=1)
else:
df = views.node_output_by_type(results, node_type=es.typemap[t])
if df is not None:
selection = pd.concat([selection, df], axis=1)
selection = selection.loc[
:, (slice(None), [es.groups[b] for b in bus], ["flow", "net_flow"])
]
return selection
def demand_results(types=["load"], bus=None, results=None, es=None):
"""
"""
if not hasattr(es, "typemap"):
setattr(es, "typemap", facades.TYPEMAP)
selection = pd.DataFrame()
for t in types:
selection = pd.concat(
[
selection,
views.node_input_by_type(results, node_type=es.typemap[t]),
],
axis=1,
)
selection = selection.loc[
:, ([es.groups[b] for b in bus], slice(None), ["flow"])
]
return selection
def write_results(
m, output_path, raw=False, summary=True, scalars=True, **kwargs
):
"""
"""
def save(df, name, path=output_path):
""" Helper for writing csv files
"""
df.to_csv(os.path.join(path, name + ".csv"))
buses = [b.label for b in m.es.nodes if isinstance(b, Bus)]
link_results = component_results(m.es, m.results).get("link")
if link_results is not None and raw:
save(link_results, "links-oemof")
imports = pd.DataFrame()
for b in buses:
supply = supply_results(results=m.results, es=m.es, bus=[b], **kwargs)
supply.columns = supply.columns.droplevel([1, 2])
demand = demand_results(results=m.results, es=m.es, bus=[b])
excess = component_results(m.es, m.results, select="sequences").get(
"excess"
)
if link_results is not None and m.es.groups[b] in list(
link_results.columns.levels[0]
):
ex = link_results.loc[
:, (m.es.groups[b], slice(None), "flow")
].sum(axis=1)
im = link_results.loc[
:, (slice(None), m.es.groups[b], "flow")
].sum(axis=1)
net_import = im - ex
net_import.name = m.es.groups[b]
imports = pd.concat([imports, net_import], axis=1)
supply["import"] = net_import
if m.es.groups[b] in demand.columns:
_demand = demand.loc[:, (m.es.groups[b], slice(None), "flow")]
_demand.columns = _demand.columns.droplevel([0, 2])
supply = pd.concat([supply, _demand], axis=1)
if excess is not None:
if m.es.groups[b] in excess.columns:
_excess = excess.loc[:, (m.es.groups[b], slice(None), "flow")]
_excess.columns = _excess.columns.droplevel([0, 2])
supply = pd.concat([supply, _excess], axis=1)
save(supply, b)
save(imports, "import")
try:
all = bus_results(m.es, m.results, select="scalars", concat=True)
all.name = "value"
endogenous = all.reset_index()
endogenous["tech"] = [
getattr(t, "tech", np.nan) for t in all.index.get_level_values(0)
]
endogenous["carrier"] = [
getattr(t, "carrier", np.nan)
for t in all.index.get_level_values(0)
]
endogenous.set_index(
["from", "to", "type", "tech", "carrier"], inplace=True
)
except ValueError:
endogenous = pd.DataFrame()
d = dict()
for node in m.es.nodes:
if not isinstance(node, (Bus, Sink, facades.Shortage)):
if getattr(node, "capacity", None) is not None:
if isinstance(node, facades.TYPEMAP["link"]):
pass
else:
key = (
node,
[n for n in node.outputs.keys()][0],
"capacity",
node.tech, # tech & carrier are oemof-tabular specific
node.carrier,
) # for oemof logic
d[key] = {"value": node.capacity}
exogenous = pd.DataFrame.from_dict(d).T # .dropna()
if not exogenous.empty:
exogenous.index = exogenous.index.set_names(
["from", "to", "type", "tech", "carrier"]
)
capacities = pd.concat([endogenous, exogenous])
capacities = pd.concat([endogenous, exogenous])
save(capacities, "capacities")
bresults = bus_results(m.es, m.results, concat=True)
if "duals" in bresults.columns.levels[2]:
duals = bresults.xs("duals", level=2, axis=1)
duals.columns = duals.columns.droplevel(1)
duals = (duals.T / m.objective_weighting).T
save(duals, "shadow_prices")
# check if storages exist in energy system nodes
if [n for n in m.es.nodes if isinstance(n, GenericStorage)]:
filling_levels = views.node_weight_by_type(m.results, GenericStorage)
filling_levels.columns = filling_levels.columns.droplevel(1)
save(filling_levels, "filling_levels")
| 30.780303 | 79 | 0.528304 |
a22c8400b87bcbf8d3c7b05ffa5b709ba5ff14ae | 801 | py | Python | manti_by/apps/profiles/models.py | manti-by/manti.by | 233882fc5e5758ff92f0b7940316f15e4d30af07 | [
"BSD-3-Clause"
] | 1 | 2021-12-11T11:34:04.000Z | 2021-12-11T11:34:04.000Z | manti_by/apps/profiles/models.py | manti-by/manti.by | 233882fc5e5758ff92f0b7940316f15e4d30af07 | [
"BSD-3-Clause"
] | 11 | 2021-03-23T13:59:39.000Z | 2022-02-02T10:16:58.000Z | manti_by/apps/profiles/models.py | manti-by/manti.by | 233882fc5e5758ff92f0b7940316f15e4d30af07 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.templatetags.static import static
from django.utils.translation import gettext_lazy as _
from manti_by.apps.core.models import BaseModel
from manti_by.apps.core.utils import profile_image_name
class Profile(BaseModel):
original_image = models.ImageField(
upload_to=profile_image_name,
blank=True,
null=True,
verbose_name=_("Profile Image"),
)
user = models.OneToOneField(
User, on_delete=models.CASCADE, primary_key=True, related_name="profile"
)
def __str__(self):
return self.user.email
@property
def image(self):
if self.original_image:
return self.original_image.url
return static("img/no-image.png")
| 25.83871 | 80 | 0.706617 |
77d82411a4756b5788ec964fe9a5432604dfd495 | 7,122 | py | Python | scripts/pre_ORgenerator_py/odds_c_a_anydiag_v4-17-13.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | 3 | 2018-03-29T23:02:43.000Z | 2020-08-10T12:01:50.000Z | scripts/pre_ORgenerator_py/odds_c_a_anydiag_v4-17-13.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | scripts/pre_ORgenerator_py/odds_c_a_anydiag_v4-17-13.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 4/6/2013
###Function: Draw charts for each season for week number (x) by odds of children ILI to adult ILI normalized by US child and adult visits of any diagnosis for each flu season
###Import data:
###Command Line: python
##############################################
### notes ###
# child = 5-19 yo, adult = 20-59 yo
### were there changes in reporting requirements over the years? both ILI and any diagnosis counts seem to have increased over the years and they seem particularly high for the 2009-10 flu season for both metrics even though that data is only a partial year
### are adults more likely to develop secondary symptoms such that flu-related illnesses are less likely to be categorized as flu for adults than for children? Or perhaps adults are waiting later to go to the doctor when they have the flu so their illness is categorized as something else
### packages ###
import matplotlib
import csv
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
## local packages ##
### data structures ###
adult2a, adult2b, adult2c, adult2d, adult2e, adult2f, adult2g, adult2h, adult2i, adult2j=[],[],[],[],[],[],[],[],[],[]
child2a, child2b, child2c, child2d, child2e, child2f, child2g, child2h, child2i, child2j=[],[],[],[],[],[],[],[],[],[]
y2a, y2b, y2c, y2d, y2e, y2f, y2g, y2h, y2i, y2j=[],[],[],[],[],[],[],[],[],[]
ad_dict = {}
adult1, child1, adult3a, child3a, adult3b, child3b = [],[],[],[],[],[]
y1, y3a, y3b = [],[],[]
### parameters ###
### functions ###
def importer2 (csvreadfile, adultlist, childlist, ilicol, seasonnum):
for row in csvreadfile:
if row[1] == "A":
adultlist.append(float(row[ilicol])/float(ad_dict[str(seasonnum)+"A"]))
#print float(row[ilicol]), float(ad_dict[str(seasonnum)+"A"])
elif row[1] == "C":
childlist.append(float(row[ilicol])/float(ad_dict[str(seasonnum)+"C"]))
#print float(row[ilicol]), float(ad_dict[str(seasonnum)+"C"])
def ORgen (ylist, childlist, adultlist):
for i in range(0,len(childlist)):
ylist.append((childlist[i]/(1-childlist[i]))/(adultlist[i]/(1-adultlist[i])))
print childlist[i], 1-childlist[i], adultlist[i], 1-adultlist[i]
def importer3 (csvreadfile, adultlist, childlist, ilicol):
for row in csvreadfile:
if row[1] == "A":
adultlist.append(float(row[ilicol])/float(ad_dict[str(row[0])+"A"]))
elif row[1] == "C":
childlist.append(float(row[ilicol])/float(ad_dict[str(row[0])+"C"]))
### import data ###
anydiagin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/anydiag.csv','r')
anydiag=csv.reader(anydiagin, delimiter=',')
d2ain=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_a.csv','r')
d2a=csv.reader(d2ain, delimiter=',')
d2bin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_b.csv','r')
d2b=csv.reader(d2bin, delimiter=',')
d2cin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_c.csv','r')
d2c=csv.reader(d2cin, delimiter=',')
d2din=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_d.csv','r')
d2d=csv.reader(d2din, delimiter=',')
d2ein=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_e.csv','r')
d2e=csv.reader(d2ein, delimiter=',')
d2fin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_f.csv','r')
d2f=csv.reader(d2fin, delimiter=',')
d2gin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_g.csv','r')
d2g=csv.reader(d2gin, delimiter=',')
d2hin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_h.csv','r')
d2h=csv.reader(d2hin, delimiter=',')
d2iin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_i.csv','r')
d2i=csv.reader(d2iin, delimiter=',')
d2jin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a2_j.csv','r')
d2j=csv.reader(d2jin, delimiter=',')
d1in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a1.csv','r')
d1=csv.reader(d1in, delimiter=',')
d3ain=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a3_a.csv','r')
d3a=csv.reader(d3ain, delimiter=',')
d3bin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/odds_c_a3_b.csv','r')
d3b=csv.reader(d3bin, delimiter=',')
### program ###
# import any diagnosis count
for row in anydiag:
ad_smarker = str(row[0])+str(row[1])
ad_visits = float(row[2])
ad_dict[ad_smarker] = ad_visits
# import weekly data normalized by any diagnosis counts for each flu season
importer2(d2a, adult2a, child2a, 3, 1)
importer2(d2b, adult2b, child2b, 3, 2)
importer2(d2c, adult2c, child2c, 3, 3)
importer2(d2d, adult2d, child2d, 3, 4)
importer2(d2e, adult2e, child2e, 3, 5)
importer2(d2f, adult2f, child2f, 3, 6)
importer2(d2g, adult2g, child2g, 3, 7)
importer2(d2h, adult2h, child2h, 3, 8)
importer2(d2i, adult2i, child2i, 3, 9)
importer2(d2j, adult2j, child2j, 3, 10)
# generate odds ratios
ORgen(y2a, child2a, adult2a)
ORgen(y2b, child2b, adult2b)
ORgen(y2c, child2c, adult2c)
ORgen(y2d, child2d, adult2d)
ORgen(y2e, child2e, adult2e)
ORgen(y2f, child2f, adult2f)
ORgen(y2g, child2g, adult2g)
ORgen(y2h, child2h, adult2h)
ORgen(y2i, child2i, adult2i)
ORgen(y2j, child2j, adult2j)
x2 = range(0,33)
x2lab=range(40,53)+range(1,21)
y2e.pop(13)
y2j.pop(13)
# plot
#plt.plot(x2, y2a, marker='o', color = 'grey', label= "season 1")
plt.plot(x2, y2b, marker='o', color = 'black', label= "01-02")
plt.plot(x2, y2c, marker='o', color = 'red', label= "02-03")
plt.plot(x2, y2d, marker='o', color = 'orange', label= "03-04")
plt.plot(x2, y2e, marker='o', color = 'gold', label= "04-05")
plt.plot(x2, y2f, marker='o', color = 'green', label= "05-06")
plt.plot(x2, y2g, marker='o', color = 'blue', label= "06-07")
plt.plot(x2, y2h, marker='o', color = 'cyan', label= "07-08")
plt.plot(x2, y2i, marker='o', color = 'darkviolet', label= "08-09")
plt.plot(x2, y2j, marker='o', color = 'hotpink', label= "09-10")
plt.xlabel('Week number')
plt.ylabel('Odds ratio of attack rate, child:adult (any visit ct normalized)') #5-16-13 changed label
ylim([0,15])
plt.legend(loc="lower right")
plt.xticks(x2, x2lab, rotation = 90)
plt.show()
# adults and children have similar numbers of ILI visits, but adults visit the doctor magnitudes more often than do children
############# all seasons chart ###############
# import total data
importer3(d1, adult1, child1, 2)
importer3(d3a, adult3a, child3a, 2)
importer3(d3b, adult3b, child3b, 2)
x1 = range(1,11,1)
# generate child:adult attack rate odds ratio for each season
ORgen(y1, child1, adult1)
ORgen(y3a, child3a, adult3a)
ORgen(y3b, child3b, adult3b)
# plot
plt.plot(x1, y1, marker='o', color = 'black', label= "total")
plt.plot(x1, y3a, marker='o', color = 'red', label= "severe cases")
plt.plot(x1, y3b, marker='o', color = 'green', label = "milder cases")
plt.xlabel('Season number')
plt.ylabel('Odds ratio of attack rate, child:adult (any visit ct normalized)')
ylim([0,12])
plt.legend(loc="upper left")
plt.show()
| 40.465909 | 288 | 0.701067 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.