hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae34666b723061be80352705629ca5b56c7bdea1 | 202 | py | Python | nomadgram/users/tests/test_models.py | tonky0110/nomadgram | 2a203a91598156c91b546ff1c4c67e14ca54df6d | [
"MIT"
] | null | null | null | nomadgram/users/tests/test_models.py | tonky0110/nomadgram | 2a203a91598156c91b546ff1c4c67e14ca54df6d | [
"MIT"
] | 9 | 2021-03-10T10:00:53.000Z | 2022-02-18T22:00:51.000Z | nomadgram/users/tests/test_models.py | tonky0110/nomadgram | 2a203a91598156c91b546ff1c4c67e14ca54df6d | [
"MIT"
] | null | null | null | import pytest
from nomadgram.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 20.2 | 64 | 0.772277 |
2912c10e05e938bf94ca6acec458e0e104495cce | 299 | py | Python | app/errors/handlers.py | MicroprocessorX069/Todo-web-app | c68f2e26eec1c89ab2257a22071133216c743ca0 | [
"Apache-2.0"
] | null | null | null | app/errors/handlers.py | MicroprocessorX069/Todo-web-app | c68f2e26eec1c89ab2257a22071133216c743ca0 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:55:48.000Z | 2021-06-02T00:55:48.000Z | app/errors/handlers.py | MicroprocessorX069/Todo-web-app | c68f2e26eec1c89ab2257a22071133216c743ca0 | [
"Apache-2.0"
] | null | null | null | from flask import render_template
from app import db
from app.errors import bp
@bp.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@bp.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('errors/500.html'), 500
| 23 | 47 | 0.789298 |
caae1b7c4f85356f1c31fa560be5a95a2357b1ff | 51,489 | py | Python | Lib/test/test_fstring.py | pxeger/cpython | 959580bd9ff8824590e8b24895bc2276f3f10b35 | [
"0BSD"
] | 12 | 2021-04-22T14:52:17.000Z | 2021-12-21T12:51:31.000Z | Lib/test/test_fstring.py | pxeger/cpython | 959580bd9ff8824590e8b24895bc2276f3f10b35 | [
"0BSD"
] | 31 | 2017-09-04T16:47:24.000Z | 2022-03-01T10:01:34.000Z | Lib/test/test_fstring.py | pxeger/cpython | 959580bd9ff8824590e8b24895bc2276f3f10b35 | [
"0BSD"
] | 5 | 2021-04-25T22:26:29.000Z | 2022-01-25T22:22:30.000Z | # -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import os
import re
import types
import decimal
import unittest
from test.support.os_helper import temp_cwd
from test.support.script_helper import assert_python_failure
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f"-{x()}-"}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
# check the first binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the second binop location
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
# check the third binop location
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
def test_ast_line_numbers_multiline_fstring(self):
# See bpo-30465 for details.
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
# NOTE: the following lineno information and col_offset is correct for
# expressions within FormattedValues.
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_with_parentheses(self):
expr = """
x = (
f" {test(t)}"
)"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 1)
# check the test(t) location
call = t.body[0].value.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.end_lineno, 3)
self.assertEqual(call.col_offset, 8)
self.assertEqual(call.end_col_offset, 15)
expr = """
x = (
'PERL_MM_OPT', (
f'wat'
f'some_string={f(x)} '
f'wat'
),
)
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 1)
# check the fstring
fstring = t.body[0].value.elts[1]
self.assertEqual(type(fstring), ast.JoinedStr)
self.assertEqual(len(fstring.values), 3)
wat1, middle, wat2 = fstring.values
# check the first wat
self.assertEqual(type(wat1), ast.Constant)
self.assertEqual(wat1.lineno, 4)
self.assertEqual(wat1.end_lineno, 6)
self.assertEqual(wat1.col_offset, 12)
self.assertEqual(wat1.end_col_offset, 18)
# check the call
call = middle.value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 5)
self.assertEqual(call.end_lineno, 5)
self.assertEqual(call.col_offset, 27)
self.assertEqual(call.end_col_offset, 31)
# check the second wat
self.assertEqual(type(wat2), ast.Constant)
self.assertEqual(wat2.lineno, 4)
self.assertEqual(wat2.end_lineno, 6)
self.assertEqual(wat2.col_offset, 12)
self.assertEqual(wat2.end_col_offset, 18)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{" + "("*500 + "}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{"{{}}"}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{"#"}', '#')
self.assertEqual(f'{d["#"]}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa')
self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{"s"!r{":10"}}'""",
# This looks like a nested format spec.
])
self.assertAllRaise(SyntaxError, "f-string: invalid syntax",
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{"s"!{"r"}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, r"invalid non-printable character U\+00A0",
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'unterminated string literal',
["f'{\n}'",
])
def test_newlines_before_syntax_error(self):
self.assertAllRaise(SyntaxError, "invalid syntax",
["f'{.}'", "\nf'{.}'", "\n\nf'{.}'"])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning): # invalid escape sequence
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
f'{yield}'
g = fn(4)
self.assertEqual(next(g), 8)
self.assertEqual(next(g), None)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'''x'''}", 'x')
self.assertEqual(f"{'''eric's'''}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f"{0}"*3}', '000')
self.assertEqual(f'{f"{y}"*3}', '555')
def test_invalid_string_prefixes(self):
single_quote_cases = ["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",]
double_quote_cases = [case.replace("'", '"') for case in single_quote_cases]
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
single_quote_cases + double_quote_cases)
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
self.assertEqual(f'{"a"!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, r'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{"{"}', '{')
self.assertEqual(f'{"}"}', '}')
self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d["a"]}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
def test_filename_in_syntaxerror(self):
# see issue 38964
with temp_cwd() as cwd:
file_path = os.path.join(cwd, 't.py')
with open(file_path, 'w', encoding="utf-8") as f:
f.write('f"{a b}"') # This generates a SyntaxError
_, _, stderr = assert_python_failure(file_path,
PYTHONIOENCODING='ascii')
self.assertIn(file_path.encode('ascii', 'backslashreplace'), stderr)
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d["'"]}''', 'squote')
self.assertEqual(f"""{d['"']}""", 'dquote')
self.assertEqual(f'{d["foo"]}', 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{"Σ"=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f"{3.1415=:.1f}":*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:="5")}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{"="}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a="3=")}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
def test_invalid_syntax_error_message(self):
with self.assertRaisesRegex(SyntaxError, "f-string: invalid syntax"):
compile("f'{a $ b}'", "?", "exec")
def test_with_two_commas_in_format_specifier(self):
error_msg = re.escape("Cannot specify ',' with ','.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,,}'
def test_with_two_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify '_' with '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:__}'
def test_with_a_commas_and_an_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,_}'
def test_with_an_underscore_and_a_comma_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:_,}'
def test_syntax_error_for_starred_expressions(self):
error_msg = re.escape("cannot use starred expression here")
with self.assertRaisesRegex(SyntaxError, error_msg):
compile("f'{*a}'", "?", "exec")
error_msg = re.escape("cannot use double starred expression here")
with self.assertRaisesRegex(SyntaxError, error_msg):
compile("f'{**a}'", "?", "exec")
if __name__ == '__main__':
unittest.main()
| 39.944919 | 150 | 0.51209 |
99480bb5ac64179bb0ba039a6d07cc47d73af0d8 | 3,090 | py | Python | application/migrations/0006_auto_20141218_1524.py | dhosterman/hebrew_order_david | c86a83c9e3e1e22dd0427c7c03525f2503fff574 | [
"MIT"
] | null | null | null | application/migrations/0006_auto_20141218_1524.py | dhosterman/hebrew_order_david | c86a83c9e3e1e22dd0427c7c03525f2503fff574 | [
"MIT"
] | null | null | null | application/migrations/0006_auto_20141218_1524.py | dhosterman/hebrew_order_david | c86a83c9e3e1e22dd0427c7c03525f2503fff574 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('application', '0005_remove_contactdetails_email'),
]
operations = [
migrations.AddField(
model_name='contactdetails',
name='postal_same_as_home',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='business_address',
field=models.CharField(max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='business_city',
field=models.CharField(max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='business_name',
field=models.CharField(max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='business_state',
field=models.CharField(max_length=2),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='business_zip',
field=models.PositiveIntegerField(),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='home_address',
field=models.CharField(max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='home_city',
field=models.CharField(max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='home_phone',
field=models.CharField(max_length=20),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='home_state',
field=models.CharField(max_length=2),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='home_zip',
field=models.PositiveIntegerField(),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='mobile_phone',
field=models.CharField(max_length=20),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='occupation',
field=models.CharField(max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='contactdetails',
name='work_phone',
field=models.CharField(max_length=20),
preserve_default=True,
),
]
| 31.212121 | 60 | 0.565696 |
c859272fe3606142e41fef19e4bdb41c4de60aa5 | 2,559 | py | Python | datamart_isi/joiners/joiner_base.py | usc-isi-i2/datamart-user-end | b3111d67d3c9a7f69885e44b4645724f9a629c19 | [
"MIT"
] | 1 | 2020-09-19T14:51:14.000Z | 2020-09-19T14:51:14.000Z | datamart_isi/joiners/joiner_base.py | usc-isi-i2/datamart-user-end | b3111d67d3c9a7f69885e44b4645724f9a629c19 | [
"MIT"
] | null | null | null | datamart_isi/joiners/joiner_base.py | usc-isi-i2/datamart-user-end | b3111d67d3c9a7f69885e44b4645724f9a629c19 | [
"MIT"
] | 1 | 2020-11-06T22:52:30.000Z | 2020-11-06T22:52:30.000Z | from abc import ABC, abstractmethod
import pandas as pd
from enum import Enum
import typing
from datamart_isi.joiners.join_result import JoinResult
class JoinerBase(ABC):
"""Abstract class of Joiner, should be extended for other joiners.
"""
@abstractmethod
def join(self, **kwargs) -> JoinResult:
"""Implement join method which returns a pandas Dataframe
"""
pass
class JoinerType(Enum):
DEFAULT = "default"
RLTK = "rltk"
EXACT_MATCH = "exact_match"
class DefaultJoiner(JoinerBase):
"""
Default join class.
Do exact join by using pandas merge join function
"""
@staticmethod
def join(left_df: pd.DataFrame,
right_df: pd.DataFrame,
left_columns: typing.List[typing.List[int]],
right_columns: typing.List[typing.List[int]],
**kwargs
) -> JoinResult:
left_columns = [x[0] for x in left_columns]
right_columns = [x[0] for x in right_columns]
if len(left_columns) != len(right_columns):
raise ValueError("Default join only perform on 1-1 mapping")
right_df = right_df.rename(columns={
right_df.columns[right_columns[idx]]: left_df.columns[left_columns[idx]] for idx in range(len(left_columns))
})
df = pd.merge(left=left_df,
right=right_df,
left_on=[left_df.columns[idx] for idx in left_columns],
right_on=[right_df.columns[idx] for idx in right_columns],
how='left')
return JoinResult(df=df)
class JoinerPrepare(object):
@staticmethod
def prepare_joiner(joiner: JoinerType = JoinerType.DEFAULT) -> typing.Optional[JoinerBase]:
"""Prepare joiner, lazy evaluation for joiners,
should be useful because joiner like RLTK may need many dependency packages.
Args:
joiner: string of joiner type
Returns:
joiner instance
"""
try:
JoinerType(joiner)
except ValueError:
return None
if JoinerType(joiner) == JoinerType.RLTK:
from datamart_isi.joiners.rltk_joiner import RLTKJoiner
return RLTKJoiner()
if JoinerType(joiner) == JoinerType.DEFAULT:
return DefaultJoiner()
if JoinerType(joiner) == JoinerType.EXACT_MATCH:
from datamart_isi.joiners.exact_match_joiner import ExactMatchJoiner
return ExactMatchJoiner()
return None
| 27.516129 | 120 | 0.619383 |
e287a6f55ade8a93f03aa520d54d685012a69f55 | 69,970 | py | Python | django/db/models/sql/compiler.py | smjreynolds/django | b9db423d3c525697ad59b14c0dcaaccf2770d062 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-02-07T07:16:02.000Z | 2020-02-07T07:16:02.000Z | django/db/models/sql/compiler.py | smjreynolds/django | b9db423d3c525697ad59b14c0dcaaccf2770d062 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/models/sql/compiler.py | smjreynolds/django | b9db423d3c525697ad59b14c0dcaaccf2770d062 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-26T09:40:10.000Z | 2020-10-26T09:40:10.000Z | import collections
import re
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref, Value
from django.db.models.functions import Cast
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, NotSupportedError
from django.utils.hashable import make_hashable
class SQLCompiler:
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
# Multiline ordering SQL clause may appear from RawSQL.
self.ordering_parts = re.compile(r'^(.*)\s(ASC|DESC)(.*)', re.MULTILINE | re.DOTALL)
self._meta_ordering = None
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
# Skip References to the select clause, as all expressions in the
# select clause are already part of the group by.
if not expr.contains_aggregate and not is_ref:
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or (
getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if (
hasattr(expr, 'target') and
expr.target.primary_key and
self.connection.features.allows_group_by_selected_pks_on_model(expr.target.model)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
else:
sql, params = col.select_format(self, sql, params)
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif self.query.get_meta().ordering:
ordering = self.query.get_meta().ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = order == 'DESC'
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
order_by.append((OrderBy(expr, descending=descending), False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query.extra or col not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator:
src = resolved.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias:
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
# Add column used in ORDER BY clause without an alias to
# the selected columns.
self.query.add_select_col(src)
resolved.set_source_expressions([RawSQL('%d' % len(self.query.select), ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values((
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
))
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = 'SELECT * FROM ({})'.format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif not features.supports_slicing_ordering_in_compound:
part_sql = '({})'.format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [', '.join(out_cols), 'FROM', *from_]
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
# If it's a NOWAIT/SKIP LOCKED/OF query but the backend
# doesn't support it, raise NotSupportedError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
order_by = order_by or self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if self.query.explain_query:
result.insert(0, self.connection.ops.explain_query_prefix(
self.query.explain_format,
**self.query.explain_options
))
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limit_offset:
result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark))
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = 'col%d' % index
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) subquery' % (
', '.join(sub_selects),
' '.join(result),
), tuple(sub_params + params)
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == 'DESC'
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, 'resolve_expression') and not isinstance(item, OrderBy):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append((item, False))
continue
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices, self.query._filtered_relations)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'local_setter': f.set_cached_value,
'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': f.remote_field.set_cached_value,
'remote_setter': f.set_cached_value,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias)
model = join_opts.model
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(obj, from_obj):
setattr(from_obj, name, obj)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': local_setter,
'remote_setter': remote_setter,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select, opts=model._meta, root_alias=alias,
cur_depth=cur_depth + 1, requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
field = klass_info['field']
if klass_info['reverse']:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
parts = [] if name == 'self' else name.split(LOOKUP_SEP)
klass_info = self.klass_info
for part in parts:
for related_klass_info in klass_info.get('related_klass_infos', []):
field = related_klass_info['field']
if related_klass_info['reverse']:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
select_index = klass_info['select_fields'][0]
col = self.select[select_index][0]
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
for row in result[0]:
if not isinstance(row, str):
yield ' '.join(str(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError(
'Aggregate functions are not allowed in this query '
'(%s=%r).' % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
'Window expressions are not allowed in this query (%s=%r).'
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts)
result = ['%s %s' % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.returning_fields and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql(
ignore_conflicts=self.query.ignore_conflicts
)
if self.returning_fields and self.connection.features.can_return_columns_from_insert:
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, r_params = self.connection.ops.return_insert_columns(self.returning_fields)
if r_sql:
result.append(r_sql)
params += [r_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields and len(self.query.objs) != 1 and
not self.connection.features.can_return_rows_from_bulk_insert
)
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_rows(cursor)
if self.connection.features.can_return_columns_from_insert:
if (
len(self.returning_fields) > 1 and
not self.connection.features.can_return_multiple_columns_from_insert
):
raise NotSupportedError(
'Returning multiple columns from INSERT statements is '
'not supported on this database backend.'
)
assert len(self.query.objs) == 1
return self.connection.ops.fetch_returned_insert_columns(cursor)
return [self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
)]
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.base_table)]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError(
'Aggregate functions are not allowed in this query '
'(%s=%r).' % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
'Window expressions are not allowed in this query '
'(%s=%r).' % (field.name, val)
)
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
| 45.8519 | 118 | 0.572288 |
ddc7aa53f8b28903e8c9c0dc37dd8e09ee1d6061 | 14,378 | py | Python | test/test_geometry.py | robustrobotics/forgg | 97fc0896dfe4522156a42b8eddb641216149c5ff | [
"MIT"
] | 4 | 2016-12-06T19:23:09.000Z | 2022-02-14T07:06:24.000Z | test/test_geometry.py | robustrobotics/forgg | 97fc0896dfe4522156a42b8eddb641216149c5ff | [
"MIT"
] | null | null | null | test/test_geometry.py | robustrobotics/forgg | 97fc0896dfe4522156a42b8eddb641216149c5ff | [
"MIT"
] | null | null | null | """Tests for geometry module"""
import numpy
import Box2D.b2 as b2
import shapely.geometry
import shapely.ops
import metis
from metis.debug import graphical_debug, draw_polygon, draw_polygons
def example_shapes():
"""Generate example shapes for testing"""
obstacle_geometry = shapely.geometry.box(0, 0, 10, 10)
obstacle_geometry = obstacle_geometry.difference(obstacle_geometry.buffer(-.2))
obstacle_geometry = obstacle_geometry.union(
shapely.geometry.LineString([(5, 0), (5, 9)]).buffer(.1, cap_style=2))
return {
'square': shapely.geometry.Polygon(
[(1, 1), (-1, 1), (-1, -1), (1, -1)]),
'monotonic': shapely.geometry.Polygon(
[(1, 2), (-1, 2), (-3, 1), (-3, -1), (-1, -2), (1, -2), (0, -1),
(0, 1), (1, 2)]),
'star': shapely.geometry.Polygon(
[(0, 1), (-1, 0.5), (-2, 0.7), (-1.5, 0), (-2, -0.5), (0, -0.3),
(2, -0.5), (1.5, 0), (2, 0.7), (1, 0.5), (0, 1)]),
'holes': shapely.geometry.box(0, 0, 2, 2).difference(
shapely.geometry.box(.5, .5, 1.5, 1.5)),
'split_holes': shapely.geometry.box(0, 0, 2, 2).difference(
shapely.geometry.box(.3, .3, 1.7, 1.7)).union(
shapely.geometry.box(.9, 0, 1.1, 2)),
'almost_split_holes': shapely.geometry.box(0, 0, 2, 2).difference(
shapely.geometry.box(.1, .1, 1.9, 1.9)).union(
shapely.geometry.box(.9, 0, 1.1, 1.7)),
'pathological': shapely.geometry.Polygon(
[(0, 0), (5, 0), (5, 3), (2, 3), (1, 1), (1, 3), (0, 3)],
[[(1.9, 2), (2, 2.5), (2.5, 2), (2, 1.5)],
[(3, 2), (3.5, 2.5), (4, 2), (3.5, 1.5)]]),
'multipolygon': shapely.geometry.box(0, 0, 1, 1).union(
shapely.geometry.box(2, 0, 3, 1)),
'obstacle': obstacle_geometry
}
def test_sparsify_point_set():
"""Check if sparsify returns a list of the specified length"""
old_array = numpy.random.random_sample((20, 3))
new_array = metis.geometry.sparsify_point_set(old_array, 10)
assert len(new_array) == 10
def test_triangulate():
"""Generate tests of triangulation routine"""
for name, shape in example_shapes().iteritems():
yield check_triangulate, name, shape
def check_triangulate(name, shape):
"""Check if reconstructing a triangulation yields the original shape"""
triangles = metis.geometry.triangulate(shape)
reconstructed = shapely.ops.cascaded_union(triangles)
difference = reconstructed.symmetric_difference(shape).area
assert difference < 1e-6, graphical_debug(
"triangulation failed for {} (error {})".format(name, difference),
lambda ax: draw_polygon(ax, shape, label='shape'),
lambda ax: draw_polygon(ax, reconstructed, label='reconstructed'))
def test_conversion():
"""Generate tests of shapely/box2d conversion functions"""
for name, shape in example_shapes().iteritems():
yield check_exact_conversion, name, shape
yield check_convex_conversion, name, shape
if name in {'square', 'multipolygon'}:
yield check_direct_conversion, name, shape
def check_exact_conversion(name, shape):
"""Check if exact conversion between shapely and box2d works as expected"""
b2_shapes = metis.geometry.box2d_triangles_from_shapely(shape)
reconstructed = shapely.ops.cascaded_union([
metis.geometry.shapely_from_box2d_shape(b2_shape)
for b2_shape in b2_shapes])
difference = reconstructed.symmetric_difference(shape).area
assert difference < 1e-5, graphical_debug(
"exact conversion failed for {} (error {})".format(name, difference),
lambda ax: draw_polygon(ax, shape, label='shape'),
lambda ax: draw_polygon(ax, reconstructed, label='reconstructed'))
def check_convex_conversion(name, shape):
"""Check conversion of convex hull between shapely and box2d"""
b2_shape = metis.geometry.convex_box2d_shape_from_shapely(shape)
reconstructed = metis.geometry.shapely_from_box2d_shape(b2_shape)
convex_hull = shape.convex_hull
difference = reconstructed.symmetric_difference(convex_hull).area
assert difference < 1e-6, graphical_debug(
"convex conversion failed for {} (error {})".format(name, difference),
lambda ax: draw_polygon(ax, shape, label='shape'),
lambda ax: draw_polygon(ax, reconstructed, label='reconstructed'))
def check_direct_conversion(name, shape):
"""Check direct conversion between shapely and box2d for convex shapes"""
b2_shapes = metis.geometry.box2d_shapes_from_shapely(shape)
reconstructed = shapely.ops.cascaded_union([
metis.geometry.shapely_from_box2d_shape(b2_shape)
for b2_shape in b2_shapes])
difference = reconstructed.symmetric_difference(shape).area
assert difference < 1e-6, graphical_debug(
"reconstruction failed for {} (error {})".format(name, difference),
lambda ax: draw_polygon(ax, shape, label='shape'),
lambda ax: draw_polygon(ax, reconstructed, label='reconstructed'))
def example_world():
"""Generate a simple example world for testing"""
convex_from_shapely = metis.geometry.convex_box2d_shape_from_shapely
obstacle_geometry = shapely.geometry.box(0, 0, 2, 2).difference(
shapely.geometry.box(.1, .1, 1.9, 1.9))
moveable_geometry = shapely.geometry.box(-.1, -.1, .1, .1)
world = b2.world()
obstacles = world.CreateStaticBody()
for triangle in metis.geometry.triangulate(obstacle_geometry):
obstacles.CreateFixture(shape=convex_from_shapely(triangle))
moveable = world.CreateDynamicBody()
moveable.CreateFixture(shape=convex_from_shapely(moveable_geometry))
return world, obstacles, moveable
def test_point_free():
"""Test if point_free performs as expected"""
world, _, _ = example_world()
assert metis.geometry.point_free(world, (.5, .5))
assert not metis.geometry.point_free(world, (.05, .05))
def test_segment_free():
"""Test if segment_free performs as expected"""
world, _, _ = example_world()
assert metis.geometry.segment_free(world, (.5, .5), (1, 1))
assert not metis.geometry.segment_free(world, (.5, .5), (2, 2))
assert not metis.geometry.segment_free(world, (0, 0), (1, 1))
def test_pose_free():
"""Generate the actual tests for pose_free"""
yield check_pose_free, "clear", True, (0.3, 0.3, 0)
yield check_pose_free, "rotated", True, (2.09, 2.09, numpy.pi/4)
yield check_pose_free, "collision", False, (0, 0, 0)
def test_path_free():
"""Generate the actual tests for path_free"""
yield check_path_free, "clear", True, (0.4, 0.4, 0), (.6, .6, 0.5)
yield check_path_free, "start_collision", False, (0, 0, 0), (.5, .5, 0)
yield check_path_free, "end_collision", False, (1, 1, 0), (2, 2, 0)
def check_pose_free(name, expected, pose):
"""Test if pose_free performs as expected"""
pose_free = metis.geometry.pose_free
world, obstacles, moveable = example_world()
shapes = {
'obstacles': metis.geometry.shapely_from_box2d_body(obstacles),
'moveable': metis.geometry.shapely_from_box2d_body(moveable, pose)}
actual = pose_free(world, moveable.fixtures, pose)
assert expected == actual, graphical_debug(
"case {}: pose_free(world, moveable.fixtures, {}) was {};"
"expected {}".format(name, pose, actual, expected),
lambda ax: draw_polygons(ax, shapes))
def check_path_free(name, expected, start, end):
"""Test if path_free performs as expected"""
pose_free = metis.geometry.pose_free
world, obstacles, moveable = example_world()
shapes = {
'obstacles': metis.geometry.shapely_from_box2d_body(obstacles),
'start': metis.geometry.shapely_from_box2d_body(moveable, start),
'end': metis.geometry.shapely_from_box2d_body(moveable, end)}
actual = pose_free(world, moveable.fixtures, start, end)
assert expected == actual, graphical_debug(
"case {}: pose_free(world, moveable.fixtures, {}, {}) was {};"
"expected {}".format(name, start, end, actual, expected),
lambda ax: draw_polygons(ax, shapes))
def test_r2geometry():
"""Test if point geometry performs as expected"""
world, obstacles, moveable = example_world()
geometry = metis.geometry.R2Geometry(world)
for _ in xrange(100):
start = geometry.sample_configuration()
end = geometry.sample_configuration()
assert len(start) == 2
assert geometry.configuration_is_free(start)
assert len(end) == 2
assert geometry.configuration_is_free(end)
shapes = {
'obstacles': metis.geometry.shapely_from_box2d_body(obstacles),
'moveable': metis.geometry.shapely_from_box2d_body(moveable),
'segment': shapely.geometry.LineString([start, end])
}
# path_is_free should be reflexive
forward_free = geometry.path_is_free(start, end)
backward_free = geometry.path_is_free(end, start)
assert forward_free == backward_free
# If both points are in the interior of the world, the path
# should be free
interior = lambda x, y: 0 < x < 2 and 0 < y < 2
if interior(*start) and interior(*end):
assert forward_free, graphical_debug(
"Expected path from {} to {} to be free".format(start, end),
lambda ax: draw_polygons(ax, shapes))
assert 3. < geometry.mu_free < 4.
def test_se2geometry():
"""Test if shape geometry performs as expected"""
world, obstacles, moveable = example_world()
bounds = metis.geometry.world_bounding_box(
world, ignore_fixtures=moveable.fixtures)
geometry = metis.geometry.SE2Geometry(
world, moveable.fixtures, bounds=bounds)
for _ in xrange(100):
start = geometry.sample_configuration()
end = geometry.sample_configuration()
assert len(start) == 3
assert geometry.configuration_is_free(start)
assert len(end) == 3
assert geometry.configuration_is_free(end)
shapes = {
'obstacles': metis.geometry.shapely_from_box2d_body(obstacles),
'start': metis.geometry.shapely_from_box2d_body(moveable, start),
'end': metis.geometry.shapely_from_box2d_body(moveable, end),
}
# path_is_free should be reflexive
forward_free = geometry.path_is_free(start, end)
backward_free = geometry.path_is_free(end, start)
assert forward_free == backward_free
# Because the free space is convex, all configurations should be
# in the interior of the world and hence all paths should be
# free
assert forward_free, graphical_debug(
"Expected path from {} to {} to be free".format(start, end),
lambda ax: draw_polygons(ax, shapes))
def test_multiobjectgeometry():
convex_from_shapely = metis.geometry.convex_box2d_shape_from_shapely
obstacle_geometry = shapely.geometry.box(0, 0, 2, 2).difference(
shapely.geometry.box(.1, .1, 1.9, 1.9))
moveable_geometry = shapely.geometry.box(-.1, -.1, .1, .1)
world = b2.world()
obstacles = world.CreateStaticBody(userData="obstacles")
for triangle in metis.geometry.triangulate(obstacle_geometry):
obstacles.CreateFixture(shape=convex_from_shapely(triangle))
box1 = world.CreateDynamicBody(userData="box1")
box1.CreateFixture(shape=convex_from_shapely(moveable_geometry))
box2 = world.CreateDynamicBody(userData="box2")
box2.CreateFixture(shape=convex_from_shapely(moveable_geometry))
geometry = metis.geometry.ManyShapeGeometry(
world, {"box1": box1, "box2": box2})
yield check_multiobject_configuration, "free", True, geometry, {
'box1': (.5, .5, 0), 'box2': (1.5, 1.5, 0)}
yield check_multiobject_configuration, "rotated", True, geometry, {
'box1': (.5, .5, numpy.pi/4), 'box2': (.65, .65, numpy.pi/4)}
yield check_multiobject_configuration, "obstacle_collision", False, \
geometry, {'box1': (0, 0, 0), 'box2': (1.5, 1.5, 0)}
yield check_multiobject_configuration, "moveable_collision", False, \
geometry, {'box1': (.5, .5, 0), 'box2': (.6, .6, 0)}
yield (check_multiobject_path, "free", True, geometry,
{'box1': (.5, .5, 0), 'box2': (1.5, 1.5, 0)},
{'box1': (.5, 1.5, 0), 'box2': (1.5, .5, 0)})
yield (check_multiobject_path, "collision", False, geometry,
{'box1': (.5, .5, 0), 'box2': (1.5, 1.5, 0)},
{'box1': (-1.5, 1.5, 0), 'box2': (1.5, .5, 0)})
yield (check_multiobject_path, "missed_collision", True, geometry,
{'box1': (.5, .5, 0), 'box2': (1.5, 1.5, 0)},
{'box1': (1.5, 1.5, 0), 'box2': (.5, .5, 0)})
yield (check_multiobject_path, "caught_collision", False, geometry,
{'box1': (.5, .5, 0), 'box2': (1.5, 1.5, 0)},
{'box1': (1.5, 1.5, 0)})
def check_multiobject_configuration(name, expected, geometry, configuration):
"""Check if multiobject geometry computes free configurations correctly"""
actual = geometry.configuration_is_free(configuration)
shapes = shapely.ops.cascaded_union([
metis.geometry.shapely_from_box2d_body(body)
for body in geometry.world.bodies])
assert expected == actual, graphical_debug(
"case {}: geometry.configuration_is_free({}) was {};"
"expected {}".format(name, configuration, actual, expected),
lambda ax: draw_polygon(ax, shapes, label='shapes'))
def check_multiobject_path(name, expected, geometry, parent, child):
"""Check if multiobject geometry computes free paths correctly"""
actual = geometry.path_is_free(parent, child)
shapes = shapely.ops.cascaded_union([
metis.geometry.shapely_from_box2d_body(body)
for body in geometry.world.bodies])
children = shapely.ops.cascaded_union([
metis.geometry.shapely_from_box2d_body(geometry.bodies[name], pose)
for name, pose in child.iteritems()])
assert expected == actual, graphical_debug(
"case {}: geometry.path_is_free({}, {}) was {};"
"expected {}".format(name, parent, child, actual, expected),
lambda ax: draw_polygon(ax, shapes, label='shapes'),
lambda ax: draw_polygon(ax, children, label='children'))
| 45.0721 | 83 | 0.65475 |
a456bf25893a3c8e5aeb35029dae77e5e32ceb42 | 14,205 | py | Python | tests/unit/test_utils.py | stepnem/tmt | 1f2ecdaa6a4b0429e5eea77143057ef1143e17a3 | [
"MIT"
] | null | null | null | tests/unit/test_utils.py | stepnem/tmt | 1f2ecdaa6a4b0429e5eea77143057ef1143e17a3 | [
"MIT"
] | null | null | null | tests/unit/test_utils.py | stepnem/tmt | 1f2ecdaa6a4b0429e5eea77143057ef1143e17a3 | [
"MIT"
] | null | null | null | # coding: utf-8
import re
import tmt
import unittest
import pytest
from tmt.utils import (StructuredField, StructuredFieldError, public_git_url,
listify, duration_to_seconds)
def test_public_git_url():
""" Verify url conversion """
examples = [
{
'original': 'git@github.com:psss/tmt.git',
'expected': 'https://github.com/psss/tmt.git',
}, {
'original': 'ssh://psplicha@pkgs.devel.redhat.com/tests/bash',
'expected': 'git://pkgs.devel.redhat.com/tests/bash',
}, {
'original': 'git+ssh://psplicha@pkgs.devel.redhat.com/tests/bash',
'expected': 'git://pkgs.devel.redhat.com/tests/bash',
}, {
'original': 'ssh://pkgs.devel.redhat.com/tests/bash',
'expected': 'git://pkgs.devel.redhat.com/tests/bash',
}, {
'original': 'git+ssh://psss@pkgs.fedoraproject.org/tests/shell',
'expected': 'https://pkgs.fedoraproject.org/tests/shell',
}, {
'original': 'ssh://psss@pkgs.fedoraproject.org/tests/shell',
'expected': 'https://pkgs.fedoraproject.org/tests/shell',
}, {
'original': 'ssh://git@pagure.io/fedora-ci/metadata.git',
'expected': 'https://pagure.io/fedora-ci/metadata.git',
},
]
for example in examples:
assert public_git_url(example['original']) == example['expected']
def test_listify():
""" Check listify functionality """
assert listify(['abc']) == ['abc']
assert listify('abc') == ['abc']
assert listify('a b c') == ['a b c']
assert listify('a b c', split=True) == ['a', 'b', 'c']
assert listify(dict(a=1, b=2)) == dict(a=[1], b=[2])
assert listify(dict(a=1, b=2), keys=['a']) == dict(a=[1], b=2)
def test_config():
""" Config smoke test """
run = '/var/tmp/tmt/test'
config1 = tmt.utils.Config()
config1.last_run(run)
config2 = tmt.utils.Config()
assert config2.last_run() == run
def test_duration_to_seconds():
""" Check conversion from sleep time format to seconds """
assert duration_to_seconds(5) == 5
assert duration_to_seconds('5') == 5
assert duration_to_seconds('5s') == 5
assert duration_to_seconds('5m') == 300
assert duration_to_seconds('5h') == 18000
assert duration_to_seconds('5d') == 432000
with pytest.raises(tmt.utils.SpecificationError):
duration_to_seconds('bad')
class test_structured_field(unittest.TestCase):
""" Self Test """
def setUp(self):
self.header = "This is a header.\n"
self.footer = "This is a footer.\n"
self.start = (
"[structured-field-start]\n"
"This is StructuredField version 1. "
"Please, edit with care.\n")
self.end = "[structured-field-end]\n"
self.zeroend = "[end]\n"
self.one = "[one]\n1\n"
self.two = "[two]\n2\n"
self.three = "[three]\n3\n"
self.sections = "\n".join([self.one, self.two, self.three])
def test_everything(self):
""" Everything """
# Version 0
text0 = "\n".join([
self.header,
self.sections, self.zeroend,
self.footer])
inited0 = StructuredField(text0, version=0)
loaded0 = StructuredField()
loaded0.load(text0, version=0)
self.assertEqual(inited0.save(), text0)
self.assertEqual(loaded0.save(), text0)
# Version 1
text1 = "\n".join([
self.header,
self.start, self.sections, self.end,
self.footer])
inited1 = StructuredField(text1)
loaded1 = StructuredField()
loaded1.load(text1)
self.assertEqual(inited1.save(), text1)
self.assertEqual(loaded1.save(), text1)
# Common checks
for field in [inited0, loaded0, inited1, loaded1]:
self.assertEqual(field.header(), self.header)
self.assertEqual(field.footer(), self.footer)
self.assertEqual(field.sections(), ["one", "two", "three"])
self.assertEqual(field.get("one"), "1\n")
self.assertEqual(field.get("two"), "2\n")
self.assertEqual(field.get("three"), "3\n")
def test_no_header(self):
""" No header """
# Version 0
text0 = "\n".join([self.sections, self.zeroend, self.footer])
field0 = StructuredField(text0, version=0)
self.assertEqual(field0.save(), text0)
# Version 1
text1 = "\n".join(
[self.start, self.sections, self.end, self.footer])
field1 = StructuredField(text1)
self.assertEqual(field1.save(), text1)
# Common checks
for field in [field0, field1]:
self.assertEqual(field.header(), "")
self.assertEqual(field.footer(), self.footer)
self.assertEqual(field.get("one"), "1\n")
self.assertEqual(field.get("two"), "2\n")
self.assertEqual(field.get("three"), "3\n")
def test_no_footer(self):
""" No footer """
# Version 0
text0 = "\n".join([self.header, self.sections, self.zeroend])
field0 = StructuredField(text0, version=0)
self.assertEqual(field0.save(), text0)
# Version 1
text1 = "\n".join(
[self.header, self.start, self.sections, self.end])
field1 = StructuredField(text1)
self.assertEqual(field1.save(), text1)
# Common checks
for field in [field0, field1]:
self.assertEqual(field.header(), self.header)
self.assertEqual(field.footer(), "")
self.assertEqual(field.get("one"), "1\n")
self.assertEqual(field.get("two"), "2\n")
self.assertEqual(field.get("three"), "3\n")
def test_just_sections(self):
""" Just sections """
# Version 0
text0 = "\n".join([self.sections, self.zeroend])
field0 = StructuredField(text0, version=0)
self.assertEqual(field0.save(), text0)
# Version 1
text1 = "\n".join([self.start, self.sections, self.end])
field1 = StructuredField(text1)
self.assertEqual(field1.save(), text1)
# Common checks
for field in [field0, field1]:
self.assertEqual(field.header(), "")
self.assertEqual(field.footer(), "")
self.assertEqual(field.get("one"), "1\n")
self.assertEqual(field.get("two"), "2\n")
self.assertEqual(field.get("three"), "3\n")
def test_plain_text(self):
""" Plain text """
text = "Some plain text.\n"
field0 = StructuredField(text, version=0)
field1 = StructuredField(text)
for field in [field0, field1]:
self.assertEqual(field.header(), text)
self.assertEqual(field.footer(), "")
self.assertEqual(field.save(), text)
self.assertEqual(list(field), [])
self.assertEqual(bool(field), False)
def test_missing_end_tag(self):
""" Missing end tag """
text = "\n".join([self.header, self.sections, self.footer])
self.assertRaises(StructuredFieldError, StructuredField, text, 0)
def test_broken_field(self):
""" Broken field"""
text = "[structured-field-start]"
self.assertRaises(StructuredFieldError, StructuredField, text)
def test_set_content(self):
""" Set section content """
field0 = StructuredField(version=0)
field1 = StructuredField()
for field in [field0, field1]:
field.set("one", "1")
self.assertEqual(field.get("one"), "1\n")
field.set("two", "2")
self.assertEqual(field.get("two"), "2\n")
field.set("three", "3")
self.assertEqual(field.get("three"), "3\n")
self.assertEqual(field0.save(), "\n".join(
[self.sections, self.zeroend]))
self.assertEqual(field1.save(), "\n".join(
[self.start, self.sections, self.end]))
def test_remove_section(self):
""" Remove section """
field0 = StructuredField(
"\n".join([self.sections, self.zeroend]), version=0)
field1 = StructuredField(
"\n".join([self.start, self.sections, self.end]))
for field in [field0, field1]:
field.remove("one")
field.remove("two")
self.assertEqual(
field0.save(), "\n".join([self.three, self.zeroend]))
self.assertEqual(
field1.save(), "\n".join([self.start, self.three, self.end]))
def test_section_tag_escaping(self):
""" Section tag escaping """
field = StructuredField()
field.set("section", "\n[content]\n")
reloaded = StructuredField(field.save())
self.assertTrue("section" in reloaded)
self.assertTrue("content" not in reloaded)
self.assertEqual(reloaded.get("section"), "\n[content]\n")
def test_nesting(self):
""" Nesting """
# Prepare structure parent -> child -> grandchild
grandchild = StructuredField()
grandchild.set('name', "Grand Child\n")
child = StructuredField()
child.set('name', "Child Name\n")
child.set("child", grandchild.save())
parent = StructuredField()
parent.set("name", "Parent Name\n")
parent.set("child", child.save())
# Reload back and check the names
parent = StructuredField(parent.save())
child = StructuredField(parent.get("child"))
grandchild = StructuredField(child.get("child"))
self.assertEqual(parent.get("name"), "Parent Name\n")
self.assertEqual(child.get("name"), "Child Name\n")
self.assertEqual(grandchild.get("name"), "Grand Child\n")
def test_section_tags_in_header(self):
""" Section tags in header """
field = StructuredField("\n".join(
["[something]", self.start, self.one, self.end]))
self.assertTrue("something" not in field)
self.assertTrue("one" in field)
self.assertEqual(field.get("one"), "1\n")
def test_empty_section(self):
""" Empty section """
field = StructuredField()
field.set("section", "")
reloaded = StructuredField(field.save())
self.assertEqual(reloaded.get("section"), "")
def test_section_item_get(self):
""" Get section item """
text = "\n".join([self.start, "[section]\nx = 3\n", self.end])
field = StructuredField(text)
self.assertEqual(field.get("section", "x"), "3")
def test_section_item_set(self):
""" Set section item """
text = "\n".join([self.start, "[section]\nx = 3\n", self.end])
field = StructuredField()
field.set("section", "3", "x")
self.assertEqual(field.save(), text)
def test_section_item_remove(self):
""" Remove section item """
text = "\n".join(
[self.start, "[section]\nx = 3\ny = 7\n", self.end])
field = StructuredField(text)
field.remove("section", "x")
self.assertEqual(field.save(), "\n".join(
[self.start, "[section]\ny = 7\n", self.end]))
def test_unicode_header(self):
""" Unicode text in header """
text = u"Už abychom měli unicode jako defaultní kódování!"
field = StructuredField(text)
field.set("section", "content")
self.assertTrue(text in field.save())
def test_unicode_section_content(self):
""" Unicode in section content """
chars = u"ěščřžýáíéů"
text = "\n".join([self.start, "[section]", chars, self.end])
field = StructuredField(text)
self.assertEqual(field.get("section").strip(), chars)
def test_unicode_section_name(self):
""" Unicode in section name """
chars = u"ěščřžýáíéů"
text = "\n".join([self.start, u"[{0}]\nx".format(chars), self.end])
field = StructuredField(text)
self.assertEqual(field.get(chars).strip(), "x")
def test_header_footer_modify(self):
""" Modify header & footer """
original = StructuredField()
original.set("field", "field-content")
original.header("header-content\n")
original.footer("footer-content\n")
copy = StructuredField(original.save())
self.assertEqual(copy.header(), "header-content\n")
self.assertEqual(copy.footer(), "footer-content\n")
def test_trailing_whitespace(self):
""" Trailing whitespace """
original = StructuredField()
original.set("name", "value")
# Test with both space and tab appended after the section tag
for char in [" ", "\t"]:
spaced = re.sub(r"\]\n", "]{0}\n".format(char), original.save())
copy = StructuredField(spaced)
self.assertEqual(original.get("name"), copy.get("name"))
def test_carriage_returns(self):
""" Carriage returns """
text1 = "\n".join([self.start, self.sections, self.end])
text2 = re.sub(r"\n", "\r\n", text1)
field1 = StructuredField(text1)
field2 = StructuredField(text2)
self.assertEqual(field1.save(), field2.save())
def test_multiple_values(self):
""" Multiple values """
# Reading multiple values
section = "[section]\nkey=val1 # comment\nkey = val2\n key = val3 "
text = "\n".join([self.start, section, self.end])
field = StructuredField(text, multi=True)
self.assertEqual(
field.get("section", "key"), ["val1", "val2", "val3"])
# Writing multiple values
values = ['1', '2', '3']
field = StructuredField(multi=True)
field.set("section", values, "key")
self.assertEqual(field.get("section", "key"), values)
self.assertTrue("key = 1\nkey = 2\nkey = 3" in field.save())
# Remove multiple values
field.remove("section", "key")
self.assertTrue("key = 1\nkey = 2\nkey = 3" not in field.save())
self.assertRaises(
StructuredFieldError, field.get, "section", "key")
| 39.132231 | 78 | 0.575079 |
1f128326d4eb93f1dec22528128bd0e7da22c1c6 | 1,267 | py | Python | host/function/list_clouds.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 2 | 2020-04-15T11:20:59.000Z | 2021-05-12T13:01:36.000Z | host/function/list_clouds.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-06-05T04:48:56.000Z | 2018-06-05T04:48:56.000Z | host/function/list_clouds.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-08-15T06:45:46.000Z | 2018-08-15T06:45:46.000Z | from common.BaseCommand import BaseCommand
from common_util import Success
from host import Cloud
__author__ = 'zadjii'
################################################################################
class ListCloudsCommand(BaseCommand):
def add_parser(self, subparsers):
list_clouds = subparsers.add_parser('list-clouds', description='list all current clouds')
return list_clouds
def do_command_with_args(self, instance, args):
# type: (Instance, Namespace) -> ResultAndData
db = instance.get_db()
clouds = db.session.query(Cloud).all()
print 'There are ', len(clouds), 'clouds.'
print '[{}] {:5} {:16} {:24} {:16}'.format('id'
, 'my_id'
, 'name'
, 'root'
, 'address')
for cloud in clouds:
print '[{}] {:5} {}/{}\t\t{:24} {}:{}'\
.format(cloud.id, cloud.my_id_from_remote, cloud.uname(), cloud.cname()
, cloud.root_directory
, cloud.remote.remote_address, cloud.remote.remote_port)
return Success() | 39.59375 | 97 | 0.469613 |
7f5125d14410e5153f3374915806b332bb8d09df | 6,293 | py | Python | uuv_control/uuv_trajectory_control/src/uuv_trajectory_generator/path_generator/cs_interpolator.py | ignaciotb/uuv_simulator | b3c46ef713dbcca615627d9f537c88edc8ad8f95 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-11-10T08:59:19.000Z | 2021-11-10T08:59:19.000Z | uuv_control/uuv_trajectory_control/src/uuv_trajectory_generator/path_generator/cs_interpolator.py | ignaciotb/uuv_simulator | b3c46ef713dbcca615627d9f537c88edc8ad8f95 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2019-06-13T10:58:38.000Z | 2019-09-24T14:09:05.000Z | uuv_control/uuv_trajectory_control/src/uuv_trajectory_generator/path_generator/cs_interpolator.py | ignaciotb/uuv_simulator | b3c46ef713dbcca615627d9f537c88edc8ad8f95 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2017-10-24T15:02:53.000Z | 2019-04-02T15:14:58.000Z | # Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scipy.interpolate import splrep, splev
import numpy as np
from ..waypoint import Waypoint
from ..waypoint_set import WaypointSet
from ..trajectory_point import TrajectoryPoint
from tf.transformations import quaternion_multiply, quaternion_about_axis
from line_segment import LineSegment
from bezier_curve import BezierCurve
from path_generator import PathGenerator
class CSInterpolator(PathGenerator):
"""
Interpolator that will generate cubic Bezier curve segments for a set of waypoints.
"""
LABEL = 'cubic_interpolator'
def __init__(self):
super(CSInterpolator, self).__init__(self)
# Set of interpolation functions for each degree of freedom
# The heading function interpolates the given heading offset and its
# value is added to the heading computed from the trajectory
self._interp_fcns = dict(pos=None,
heading=None)
self._heading_spline = None
def init_interpolator(self):
if self._waypoints is None:
return False
self._interp_fcns['pos'] = list()
self._segment_to_wp_map = [0]
if self._waypoints.num_waypoints == 2:
self._interp_fcns['pos'].append(
LineSegment(self._waypoints.get_waypoint(0).pos,
self._waypoints.get_waypoint(1).pos))
self._segment_to_wp_map.append(1)
elif self._waypoints.num_waypoints > 2:
tangents = [np.zeros(3) for _ in range(self._waypoints.num_waypoints)]
lengths = [self._waypoints.get_waypoint(i + 1).dist(
self._waypoints.get_waypoint(i).pos) for i in range(self._waypoints.num_waypoints - 1)]
lengths = [0] + lengths
# Initial vector of parametric variables for the curve
u = np.cumsum(lengths) / np.sum(lengths)
delta_u = lambda k: u[k] - u[k - 1]
delta_q = lambda k: self._waypoints.get_waypoint(k).pos - self._waypoints.get_waypoint(k - 1).pos
lamb_k = lambda k: delta_q(k) / delta_u(k)
alpha_k = lambda k: delta_u(k) / (delta_u(k) + delta_u(k + 1))
for i in range(1, len(u) - 1):
tangents[i] = (1 - alpha_k(i)) * lamb_k(i) + alpha_k(i) * lamb_k(i + 1)
if i == 1:
tangents[0] = 2 * lamb_k(i) - tangents[1]
tangents[-1] = 2 * lamb_k(len(u) - 1) - tangents[-2]
# Normalize tangent vectors
for i in range(len(tangents)):
tangents[i] = tangents[i] / np.linalg.norm(tangents[i])
# Generate the cubic Bezier curve segments
for i in range(len(tangents) - 1):
self._interp_fcns['pos'].append(
BezierCurve(
[self._waypoints.get_waypoint(i).pos,
self._waypoints.get_waypoint(i + 1).pos], 3, tangents[i:i + 2]))
self._segment_to_wp_map.append(i + 1)
else:
return False
# Reparametrizing the curves
lengths = [seg.get_length() for seg in self._interp_fcns['pos']]
lengths = [0] + lengths
self._s = np.cumsum(lengths) / np.sum(lengths)
mean_vel = np.mean(
[self._waypoints.get_waypoint(k).max_forward_speed for k in range(self._waypoints.num_waypoints)])
if self._duration is None:
self._duration = np.sum(lengths) / mean_vel
if self._start_time is None:
self._start_time = 0.0
# Set a simple spline to interpolate heading offset, if existent
heading = [self._waypoints.get_waypoint(k).heading_offset for k in range(self._waypoints.num_waypoints)]
self._heading_spline = splrep(self._s, heading, k=3, per=False)
self._interp_fcns['heading'] = lambda x: splev(x, self._heading_spline)
return True
def get_samples(self, max_time, step=0.005):
if self._waypoints is None:
return None
s = np.arange(0, 1 + step, step)
t = s * self._duration + self._start_time
pnts = list()
for i in range(t.size):
pnt = TrajectoryPoint()
pnt.pos = self.generate_pos(s[i]).tolist()
pnt.t = t[i]
pnts.append(pnt)
return pnts
def generate_pos(self, s):
idx = self.get_segment_idx(s)
if idx == 0:
u_k = 0
pos = self._interp_fcns['pos'][idx].interpolate(u_k)
else:
u_k = (s - self._s[idx - 1]) / (self._s[idx] - self._s[idx - 1])
pos = self._interp_fcns['pos'][idx - 1].interpolate(u_k)
return pos
def generate_pnt(self, s, t=0.0):
pnt = TrajectoryPoint()
# Trajectory time stamp
pnt.t = t
# Set position vector
pnt.pos = self.generate_pos(s).tolist()
# Set rotation quaternion
pnt.rotq = self.generate_quat(s)
return pnt
def generate_quat(self, s):
s = max(0, s)
s = min(s, 1)
last_s = s - self._s_step
if last_s == 0:
last_s = 0
this_pos = self.generate_pos(s)
last_pos = self.generate_pos(last_s)
dx = this_pos[0] - last_pos[0]
dy = this_pos[1] - last_pos[1]
dz = this_pos[2] - last_pos[2]
rotq = self._compute_rot_quat(dx, dy, dz)
# Calculating the step for the heading offset
q_step = quaternion_about_axis(
self._interp_fcns['heading'](s),
np.array([0, 0, 1]))
# Adding the heading offset to the rotation quaternion
rotq = quaternion_multiply(rotq, q_step)
return rotq
| 38.371951 | 112 | 0.606547 |
930e6b981f2e5cbb8159c600139875223f00a2cc | 1,419 | py | Python | azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine_image_resource.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | null | null | null | azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine_image_resource.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | null | null | null | azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine_image_resource.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualMachineImageResource(SubResource):
"""Virtual machine image resource information.
:param id: Resource Id
:type id: str
:param name: the name of the resource.
:type name: str
:param location: the location of the resource.
:type location: str
:param tags: the tags attached to the resource.
:type tags: dict
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, name, location, id=None, tags=None):
super(VirtualMachineImageResource, self).__init__(id=id)
self.name = name
self.location = location
self.tags = tags
| 31.533333 | 76 | 0.561663 |
2ddd1b0582bd08b8943daa0c8dbc3a926e0a1904 | 7,844 | py | Python | dan_tools.py | alicechi2/LargeScaleCoverSongId | d33a8425ce8761f09537d657d29c0e4b87e05249 | [
"MIT"
] | 35 | 2015-07-08T17:51:08.000Z | 2021-12-24T07:28:49.000Z | dan_tools.py | alicechi2/LargeScaleCoverSongId | d33a8425ce8761f09537d657d29c0e4b87e05249 | [
"MIT"
] | 1 | 2017-08-07T13:37:05.000Z | 2017-08-07T13:37:05.000Z | dan_tools.py | alicechi2/LargeScaleCoverSongId | d33a8425ce8761f09537d657d29c0e4b87e05249 | [
"MIT"
] | 8 | 2016-07-27T08:46:44.000Z | 2020-11-12T02:45:56.000Z | """
Translation of Dan Ellis' MATLAB tools
----
Author:
Thierry Bertin-Mahieux (tb2332@columbia.edu)
----
License:
This code is distributed under the GNU LESSER PUBLIC LICENSE
(LGPL, see www.gnu.org).
Copyright (c) 2012-2013 MARL@NYU.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of MARL, NYU nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import copy
import numpy as np
import scipy.fftpack
import scipy.signal
import hdf5_getters as GETTERS
def L1norm(F):
"""divide over the sum of the absolute values."""
return F/np.sum(np.abs(F))
def chromnorm(F, P=2.):
"""
N = chromnorm(F,P)
Normalize each column of a chroma ftrvec to unit norm
so cross-correlation will give cosine distance
S returns the per-column original norms, for reconstruction
P is optional exponent for the norm, default 2.
2006-07-14 dpwe@ee.columbia.edu
-> python: TBM, 2011-11-05, TESTED
"""
nchr, nbts = F.shape
if not np.isinf(P):
S = np.power(np.sum(np.power(F,P), axis=0),(1./P));
else:
S = F.max();
return F/S
def chrompwr(X, P=.5):
"""
Y = chrompwr(X,P) raise chroma columns to a power, preserving norm
2006-07-12 dpwe@ee.columbia.edu
-> python: TBM, 2011-11-05, TESTED
"""
nchr, nbts = X.shape
# norms of each input col
CMn = np.tile(np.sqrt(np.sum(X * X, axis=0)), (nchr, 1))
CMn[np.where(CMn==0)] = 1
# normalize each input col, raise to power
CMp = np.power(X/CMn, P)
# norms of each resulant column
CMpn = np.tile(np.sqrt(np.sum(CMp * CMp, axis=0)), (nchr, 1))
CMpn[np.where(CMpn==0)] = 1.
# rescale cols so norm of output cols match norms of input cols
return CMn * (CMp / CMpn)
def chromhpf(F, alpha=.9):
"""
G = chromhpf(F,alpha) high-pass filter a chroma matrix
F is a chroma matrix (12 rows x N time steps)
Apply a one-pole, one-zero high pass filter to each
row, with a pole at alpha (0..1, default 0.99)
2007-06-17 Dan Ellis dpwe@ee.columbia.edu
-> python: TBM, 2011-11-05, TESTED
"""
nr, nc = F.shape
G = np.zeros((nr, nc))
for i in range(nr):
G[i,:] = scipy.signal.lfilter([1,-1],[1,-alpha], F[i,:])
return G
def bttonnetz_to_fftmat(bttonnetz, win=75):
"""
Stack the flattened result of fft2 on patches 12 x win
Translation of my own matlab function
-> python: TBM, 2011-11-05, TESTED
"""
# 12 semitones
nchrm, nbeats = bttonnetz.shape
assert nchrm == 6, 'beat-aligned matrix transposed?'
if nbeats < win:
return None
# output
fftmat = np.zeros((nchrm * win, nbeats - win + 1))
for i in range(nbeats-win+1):
patch = fftshift(magnitude(fft2(bttonnetz[:,i:i+win])))
# 'F' to copy Matlab, otherwise 'C'
fftmat[:, i] = patch.flatten('F')
return fftmat
def btchroma_to_fftmat(btchroma, win=75):
"""
Stack the flattened result of fft2 on patches 12 x win
Translation of my own matlab function
-> python: TBM, 2011-11-05, TESTED
"""
# 12 semitones
nchrm, nbeats = btchroma.shape
assert nchrm == 12, 'beat-aligned matrix transposed?'
if nbeats < win:
return None
# output
fftmat = np.zeros((nchrm * win, nbeats - win + 1))
for i in range(nbeats-win+1):
patch = fftshift(magnitude(fft2(btchroma[:,i:i+win])))
# 'F' to copy Matlab, otherwise 'C'
fftmat[:, i] = patch.flatten('F')
return fftmat
def fft2(X):
"""
Same as fft2 in Matlab
-> python: TBM, 2011-11-05, TESTED
ok, useless, but needed to be sure it was doing the same thing
"""
return scipy.fftpack.fft2(X)
def fftshift(X):
"""
Same as fftshift in Matlab
-> python: TBM, 2011-11-05, TESTED
ok, useless, but needed to be sure it was doing the same thing
"""
return scipy.fftpack.fftshift(X)
def magnitude(X):
"""
Magnitude of a complex matrix
"""
r = np.real(X)
i = np.imag(X)
return np.sqrt(r * r + i * i);
def msd_beatchroma(filename):
"""
Get the same beatchroma as Dan
Our filename is the full path
TESTED
"""
nchr=12
# get segments, pitches, beats, loudness
h5 = GETTERS.open_h5_file_read(filename)
pitches = GETTERS.get_segments_pitches(h5).T
loudness = GETTERS.get_segments_loudness_start(h5)
Tsegs = GETTERS.get_segments_start(h5)
Tbeats = GETTERS.get_beats_start(h5)
h5.close()
# sanity checks
if len(Tsegs) < 3 or len(Tbeats) < 2:
return None
# get chroma and apply per segments loudness
Segs = pitches * np.tile(np.power(10., loudness/20.), (nchr, 1))
if Segs.shape[0] < 12 or Segs.shape[1] < 3:
return None
# properly figure time overlaps and weights
C = resample_mx(Segs, Tsegs, Tbeats)
# renormalize columns
n = C.max(axis=0)
return C * np.tile(1./n, (nchr, 1))
def resample_mx(X, incolpos, outcolpos):
"""
Y = resample_mx(X, incolpos, outcolpos)
X is taken as a set of columns, each starting at 'time'
colpos, and continuing until the start of the next column.
Y is a similar matrix, with time boundaries defined by
outcolpos. Each column of Y is a duration-weighted average of
the overlapping columns of X.
2010-04-14 Dan Ellis dpwe@ee.columbia.edu based on samplemx/beatavg
-> python: TBM, 2011-11-05, TESTED
"""
noutcols = len(outcolpos)
Y = np.zeros((X.shape[0], noutcols))
# assign 'end times' to final columns
if outcolpos.max() > incolpos.max():
incolpos = np.concatenate([incolpos,[outcolpos.max()]])
X = np.concatenate([X, X[:,-1].reshape(X.shape[0],1)], axis=1)
outcolpos = np.concatenate([outcolpos, [outcolpos[-1]]])
# durations (default weights) of input columns)
incoldurs = np.concatenate([np.diff(incolpos), [1]])
for c in range(noutcols):
firstincol = np.where(incolpos <= outcolpos[c])[0][-1]
firstincolnext = np.where(incolpos < outcolpos[c+1])[0][-1]
lastincol = max(firstincol,firstincolnext)
# default weights
wts = copy.deepcopy(incoldurs[firstincol:lastincol+1])
# now fix up by partial overlap at ends
if len(wts) > 1:
wts[0] = wts[0] - (outcolpos[c] - incolpos[firstincol])
wts[-1] = wts[-1] - (incolpos[lastincol+1] - outcolpos[c+1])
wts = wts * 1. /sum(wts)
Y[:,c] = np.dot(X[:,firstincol:lastincol+1], wts)
# done
return Y
| 33.097046 | 75 | 0.655405 |
2a45207cbff1f411b8d461d456269d3945175dda | 7,262 | py | Python | sdk/python/feast/job.py | fossabot/feast | 0c0b50927ce023315e25edba16b0f573431ef2d8 | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/job.py | fossabot/feast | 0c0b50927ce023315e25edba16b0f573431ef2d8 | [
"Apache-2.0"
] | 6 | 2020-10-26T17:50:33.000Z | 2022-02-10T02:01:28.000Z | sdk/python/feast/job.py | fossabot/feast | 0c0b50927ce023315e25edba16b0f573431ef2d8 | [
"Apache-2.0"
] | 1 | 2022-01-08T12:05:29.000Z | 2022-01-08T12:05:29.000Z | from typing import List
from urllib.parse import urlparse
import fastavro
import grpc
import pandas as pd
from feast.constants import CONFIG_TIMEOUT_KEY
from feast.constants import FEAST_DEFAULT_OPTIONS as defaults
from feast.serving.ServingService_pb2 import (
DATA_FORMAT_AVRO,
JOB_STATUS_DONE,
GetJobRequest,
)
from feast.serving.ServingService_pb2 import Job as JobProto
from feast.serving.ServingService_pb2_grpc import ServingServiceStub
from feast.staging.storage_client import get_staging_client
from feast.wait import wait_retry_backoff
from tensorflow_metadata.proto.v0 import statistics_pb2
# Maximum no of seconds to wait until the retrieval jobs status is DONE in Feast
# Currently set to the maximum query execution time limit in BigQuery
DEFAULT_TIMEOUT_SEC: int = 21600
# Maximum no of seconds to wait before reloading the job status in Feast
MAX_WAIT_INTERVAL_SEC: int = 60
class RetrievalJob:
"""
A class representing a job for feature retrieval in Feast.
"""
def __init__(
self,
job_proto: JobProto,
serving_stub: ServingServiceStub,
auth_metadata_plugin: grpc.AuthMetadataPlugin = None,
):
"""
Args:
job_proto: Job proto object (wrapped by this job object)
serving_stub: Stub for Feast serving service
auth_metadata_plugin: plugin to fetch auth metadata
"""
self.job_proto = job_proto
self.serving_stub = serving_stub
self.auth_metadata = auth_metadata_plugin
@property
def id(self):
"""
Getter for the Job Id
"""
return self.job_proto.id
@property
def status(self):
"""
Getter for the Job status from Feast Core
"""
return self.job_proto.status
def reload(self):
"""
Reload the latest job status
Returns: None
"""
self.job_proto = self.serving_stub.GetJob(
GetJobRequest(job=self.job_proto),
metadata=self.auth_metadata.get_signed_meta() if self.auth_metadata else (),
).job
def get_avro_files(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):
"""
Wait until job is done to get the file uri to Avro result files on
Google Cloud Storage.
Args:
timeout_sec (int):
Max no of seconds to wait until job is done. If "timeout_sec"
is exceeded, an exception will be raised.
Returns:
str: Google Cloud Storage file uris of the returned Avro files.
"""
def try_retrieve():
self.reload()
return None, self.status == JOB_STATUS_DONE
wait_retry_backoff(
retry_fn=try_retrieve,
timeout_secs=timeout_sec,
timeout_msg="Timeout exceeded while waiting for result. Please retry "
"this method or use a longer timeout value.",
)
if self.job_proto.error:
raise Exception(self.job_proto.error)
if self.job_proto.data_format != DATA_FORMAT_AVRO:
raise Exception(
"Feast only supports Avro data format for now. Please check "
"your Feast Serving deployment."
)
return [urlparse(uri) for uri in self.job_proto.file_uris]
def result(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):
"""
Wait until job is done to get an iterable rows of result. The row can
only represent an Avro row in Feast 0.3.
Args:
timeout_sec (int):
Max no of seconds to wait until job is done. If "timeout_sec"
is exceeded, an exception will be raised.
Returns:
Iterable of Avro rows.
"""
uris = self.get_avro_files(timeout_sec)
for file_uri in uris:
file_obj = get_staging_client(file_uri.scheme).download_file(file_uri)
file_obj.seek(0)
avro_reader = fastavro.reader(file_obj)
for record in avro_reader:
yield record
def to_dataframe(
self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])
) -> pd.DataFrame:
"""
Wait until a job is done to get an iterable rows of result. This method
will split the response into chunked DataFrame of a specified size to
to be yielded to the instance calling it.
Args:
max_chunk_size (int):
Maximum number of rows that the DataFrame should contain.
timeout_sec (int):
Max no of seconds to wait until job is done. If "timeout_sec"
is exceeded, an exception will be raised.
Returns:
pd.DataFrame:
Pandas DataFrame of the feature values.
"""
records = [r for r in self.result(timeout_sec=timeout_sec)]
return pd.DataFrame.from_records(records)
def to_chunked_dataframe(
self,
max_chunk_size: int = -1,
timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY]),
) -> pd.DataFrame:
"""
Wait until a job is done to get an iterable rows of result. This method
will split the response into chunked DataFrame of a specified size to
to be yielded to the instance calling it.
Args:
max_chunk_size (int):
Maximum number of rows that the DataFrame should contain.
timeout_sec (int):
Max no of seconds to wait until job is done. If "timeout_sec"
is exceeded, an exception will be raised.
Returns:
pd.DataFrame:
Pandas DataFrame of the feature values.
"""
# Object is Avro row type object, refer to self.result function for this type
records: List[dict] = []
# Max chunk size defined by user
for result in self.result(timeout_sec=timeout_sec):
result.append(records)
if len(records) == max_chunk_size:
df = pd.DataFrame.from_records(records)
records.clear() # Empty records array
yield df
# Handle for last chunk that is < max_chunk_size
if not records:
yield pd.DataFrame.from_records(records)
def __iter__(self):
return iter(self.result())
def statistics(
self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""
Get statistics computed over the retrieved data set. Statistics will only be computed for
columns that are part of Feast, and not the columns that were provided.
Args:
timeout_sec (int):
Max no of seconds to wait until job is done. If "timeout_sec"
is exceeded, an exception will be raised.
Returns:
DatasetFeatureStatisticsList containing statistics of Feast features over the retrieved dataset.
"""
self.get_avro_files(timeout_sec) # wait for job completion
if self.job_proto.error:
raise Exception(self.job_proto.error)
return self.job_proto.dataset_feature_statistics_list
| 33.776744 | 108 | 0.630543 |
9a997791ceab3be6f77653ecd1e7b85602f11517 | 1,209 | py | Python | adhoc/export_users.py | lund5000/chirpradio | e084d53e92d897766ac19fe85eeabad68a1d7e39 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2016-01-26T08:09:59.000Z | 2019-03-23T23:27:07.000Z | adhoc/export_users.py | lund5000/chirpradio | e084d53e92d897766ac19fe85eeabad68a1d7e39 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2016-02-16T03:36:14.000Z | 2020-05-04T16:19:00.000Z | adhoc/export_users.py | lund5000/chirpradio | e084d53e92d897766ac19fe85eeabad68a1d7e39 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2016-04-16T02:51:35.000Z | 2021-12-06T19:25:15.000Z | from sqlite3 import dbapi2 as sqlite
import csv
import optparse
def main():
p = optparse.OptionParser(usage='%prog [options] path/to/chirp.db')
(options, args) = p.parse_args()
if len(args) != 1:
p.error('incorrect args')
chirp_db = args[0]
connection = sqlite.connect(chirp_db)
cursor = connection.cursor()
cursor.execute("select * from auth_user where is_active=1")
users = cursor.fetchall()
users_file = open('users.csv', 'w')
writer = csv.writer(users_file)
fields = [d[0] for d in cursor.description]
for user in users:
data = dict(zip(fields, user))
if not data['email']:
raise ValueError('active DJs cannot have empty emails: %s' % data)
writer.writerow([data['email'], data['first_name'], data['last_name'],
# empty password so that reset is forced after import:
'', data['is_active'], data['is_superuser'],
data['date_joined'].split('.')[0], 'dj'])
cursor.close()
connection.close()
print "Wrote %s" % users_file.name
users_file.close()
if __name__ == '__main__':
main()
| 30.225 | 78 | 0.583127 |
24a7cb64926371d5a91edb1ffeaf8bf7917bfc13 | 362 | py | Python | nltkma/test/childes_fixt.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | nltkma/test/childes_fixt.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | nltkma/test/childes_fixt.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | def setup_module():
import pytest
import nltkma.data
try:
nltkma.data.find("corpora/childes/data-xml/Eng-USA-MOR/")
except LookupError as e:
pytest.skip(
"The CHILDES corpus is not found. "
"It should be manually downloaded and saved/unpacked "
"to [NLTK_Data_Dir]/corpora/childes/"
)
| 27.846154 | 66 | 0.604972 |
a44b7c40401fef1d4bf34e86f2907fc50eb799ad | 3,780 | py | Python | Experiments/STMeta/Runner_singleGraph.py | nj-czy/UCTB | bddb8b47953bef1f44cb06f1a57a3d7efbd31c3a | [
"MIT"
] | 2 | 2020-07-07T14:17:38.000Z | 2020-07-07T14:17:41.000Z | Experiments/STMeta/Runner_singleGraph.py | nj-czy/UCTB | bddb8b47953bef1f44cb06f1a57a3d7efbd31c3a | [
"MIT"
] | null | null | null | Experiments/STMeta/Runner_singleGraph.py | nj-czy/UCTB | bddb8b47953bef1f44cb06f1a57a3d7efbd31c3a | [
"MIT"
] | null | null | null | import os
#############################################
# BenchMark Bike
#############################################
########### NYC ###########
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_nyc.data.yml '
# '-p graph:Distance,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_nyc.data.yml '
# '-p graph:Correlation,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_nyc.data.yml '
# '-p graph:Interaction,MergeIndex:12')
# ########### Chicago ###########
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_chicago.data.yml '
# '-p graph:Distance,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_chicago.data.yml '
# '-p graph:Correlation,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_chicago.data.yml '
# '-p graph:Interaction,MergeIndex:12')
# ############# DC #############
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_dc.data.yml '
# '-p graph:Distance,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_dc.data.yml '
# '-p graph:Correlation,MergeIndex:12')
os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d bike_dc.data.yml '
'-p graph:Interaction,MergeIndex:12')
###############################################
# BenchMark DiDi
###############################################
############# Xian #############
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d didi_xian.data.yml '
# '-p graph:Distance,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d didi_xian.data.yml '
# '-p graph:Correlation,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d didi_xian.data.yml '
# '-p graph:Interaction,MergeIndex:12')
# # ############# Chengdu #############
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d didi_chengdu.data.yml '
# '-p graph:Distance,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d didi_chengdu.data.yml '
# '-p graph:Correlation,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d didi_chengdu.data.yml '
# '-p graph:Interaction,MergeIndex:12')
###############################################
# BenchMark Metro
###############################################
############# Chongqing #############
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d metro_chongqing.data.yml '
# '-p graph:Distance,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d metro_chongqing.data.yml '
# '-p graph:Correlation,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d metro_chongqing.data.yml '
# '-p graph:Line,MergeIndex:12')
# # ############# Shanghai #############
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d metro_shanghai.data.yml '
# '-p graph:Distance,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d metro_shanghai.data.yml '
# '-p graph:Correlation,MergeIndex:12')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml -d metro_shanghai.data.yml '
# '-p graph:Line,MergeIndex:12')
# ###############################################
# # BenchMark ChargeStation
# ###############################################
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml'
# ' -d chargestation_beijing.data.yml -p graph:Distance,MergeIndex:2')
# os.system('python STMeta_Obj.py -m STMeta_v3.model.yml'
# ' -d chargestation_beijing.data.yml -p graph:Correlation,MergeIndex:2')
| 41.538462 | 86 | 0.583598 |
ad75655e5357363044223fb7a21416f29d670b41 | 6,258 | py | Python | opfunu/cec/cec2005/F24.py | ElliottP-13/opfunu | 7f4de3c34a91bb37fd8784fd28dbcf550e06d8a7 | [
"MIT"
] | 28 | 2020-09-12T09:19:49.000Z | 2022-03-25T07:25:01.000Z | opfunu/cec/cec2005/F24.py | ElliottP-13/opfunu | 7f4de3c34a91bb37fd8784fd28dbcf550e06d8a7 | [
"MIT"
] | 2 | 2020-05-22T10:16:02.000Z | 2020-08-06T15:46:39.000Z | opfunu/cec/cec2005/F24.py | ElliottP-13/opfunu | 7f4de3c34a91bb37fd8784fd28dbcf550e06d8a7 | [
"MIT"
] | 11 | 2020-02-16T05:00:49.000Z | 2020-07-21T19:40:18.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 20:31, 20/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from opfunu.cec.cec2005.root import Root
from numpy import sum, dot, sin, sqrt, abs, array, cos, pi, exp, e, ones, max
from numpy.random import normal
class Model(Root):
def __init__(self, f_name="Rotated Hybrid Composition Function 4", f_shift_data_file="data_hybrid_func4",
f_ext='.txt', f_bias=260, f_matrix=None):
Root.__init__(self, f_name, f_shift_data_file, f_ext, f_bias)
self.f_matrix = f_matrix
def __f1__(self, solution=None, a=0.5, b=3, k_max=20):
result = 0.0
for i in range(len(solution)):
result += sum([a ** k * cos(2 * pi * b ** k * (solution + 0.5)) for k in range(0, k_max)])
return result - len(solution) * sum([a ** k * cos(2 * pi * b ** k * 0.5) for k in range(0, k_max)])
def __f2__(self, solution=None):
def __xy__(x, y):
return 0.5 + (sin(sqrt(x ** 2 + y ** 2)) ** 2 - 0.5) / (1 + 0.001 * (x ** 2 + y ** 2)) ** 2
result = __xy__(solution[-1], solution[0])
for i in range(0, len(solution) - 1):
result += __xy__(solution[i], solution[i + 1])
return result
def __f3__(self, solution=None):
def __f8__(x):
return x ** 2 / 4000 - cos(x / sqrt(x)) + 1
def __f2__(x, y):
return 100 * (x ** 2 - y) ** 2 + (x - 1) ** 2
result = __f8__(__f2__(solution[-1], solution[0]))
for i in range(0, len(solution) - 1):
result += __f8__(__f2__(solution[i], solution[i + 1]))
return result
def __f4__(self, solution=None):
return -20 * exp(-0.2 * sqrt(sum(solution ** 2) / len(solution))) - exp(sum(cos(2 * pi * solution)) / len(solution)) + 20 + e
def __f5__(self, solution=None):
return sum(solution ** 2 - 10 * cos(2 * pi * solution) + 10)
def __f6__(self, solution=None):
result = sum(solution ** 2) / 4000
temp = 1.0
for i in range(len(solution)):
temp *= cos(solution[i] / sqrt(i + 1))
return result - temp + 1
def __f7__(self, solution=None):
def __fxy__(x, y):
return 0.5 + (sin(sqrt(x**2 + y**2))**2 - 0.5) / (1 + 0.001*(x**2 + y**2))**2
for i in range(0, len(solution)):
if abs(solution[i]) >= 0.5:
solution[i] = round(2 * solution[i]) / 2
result = __fxy__(solution[-1], solution[0])
for i in range(0, len(solution) - 1):
result += __fxy__(solution[i], solution[i+1])
return result
def __f8__(self, solution=None):
for i in range(0, len(solution)):
if abs(solution[i]) >= 0.5:
solution[i] = round(2 * solution[i]) / 2
return sum(solution**2 - 10*cos(2*pi*solution) + 10)
def __f9__(self, solution=None):
result = 0.0
for i in range(0, len(solution)):
result += (10**6)**(i / (len(solution)-1)) * solution[i]**2
return result
def __f10__(self, solution=None):
return sum(solution**2)*(1 + 0.1*abs(normal(0, 1)))
def __fi__(self, solution=None, idx=None):
if idx == 0:
return self.__f1__(solution)
elif idx == 1:
return self.__f2__(solution)
elif idx == 2:
return self.__f3__(solution)
elif idx == 3:
return self.__f4__(solution)
elif idx == 4:
return self.__f5__(solution)
elif idx == 5:
return self.__f6__(solution)
elif idx == 6:
return self.__f7__(solution)
elif idx == 7:
return self.__f8__(solution)
elif idx == 8:
return self.__f9__(solution)
else:
return self.__f10__(solution)
def _main__(self, solution=None):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2005 not support for problem size > 100")
return 1
if problem_size == 10 or problem_size == 30 or problem_size == 50:
self.f_matrix = "hybrid_func4_M_D" + str(problem_size)
else:
print("CEC 2005 F24 function only support problem size 10, 30, 50")
return 1
num_funcs = 10
C = 2000
xichma = 2 * ones(problem_size)
lamda = array([10.0, 5.0 / 20.0, 1.0, 5.0 / 32.0, 1.0, 5.0 / 100.0, 5.0 / 50.0, 1.0, 5.0 / 100.0, 5.0 / 100.0])
bias = array([0, 100, 200, 300, 400, 500, 600, 700, 800, 900])
y = 5 * ones(problem_size)
shift_data = self.load_matrix_data(self.f_shift_data_file)
shift_data = shift_data[:, :problem_size]
matrix = self.load_matrix_data(self.f_matrix)
weights = ones(num_funcs)
fits = ones(num_funcs)
for i in range(0, num_funcs):
w_i = exp(-sum((solution - shift_data[i]) ** 2) / (2 * problem_size * xichma[i] ** 2))
z = dot((solution - shift_data[i]) / lamda[i], matrix[i * problem_size:(i + 1) * problem_size, :])
fit_i = self.__fi__(z, i)
f_maxi = self.__fi__(dot((y / lamda[i]), matrix[i * problem_size:(i + 1) * problem_size, :]), i)
fit_i = C * fit_i / f_maxi
weights[i] = w_i
fits[i] = fit_i
sw = sum(weights)
maxw = max(weights)
for i in range(0, num_funcs):
if weights[i] != maxw:
weights[i] = weights[i] * (1 - maxw ** 10)
weights[i] = weights[i] / sw
fx = sum(dot(weights, (fits + bias)))
return fx + self.f_bias
| 40.901961 | 133 | 0.494247 |
ce300040f4385552ae97d6d20b8bfc24f2a63579 | 7,493 | py | Python | qiskit/circuit/library/standard_gates/swap.py | Elliot-Coupe/qiskit-terra | 8a604e156ba4c2fa099b1c24cd941f59b9408398 | [
"Apache-2.0"
] | 1 | 2021-07-06T09:07:47.000Z | 2021-07-06T09:07:47.000Z | qiskit/circuit/library/standard_gates/swap.py | Elliot-Coupe/qiskit-terra | 8a604e156ba4c2fa099b1c24cd941f59b9408398 | [
"Apache-2.0"
] | 1 | 2019-10-03T12:22:41.000Z | 2019-10-03T12:22:41.000Z | qiskit/circuit/library/standard_gates/swap.py | Elliot-Coupe/qiskit-terra | 8a604e156ba4c2fa099b1c24cd941f59b9408398 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Swap gate."""
import numpy
from qiskit.circuit.controlledgate import ControlledGate
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class SwapGate(Gate):
r"""The SWAP gate.
This is a symmetric and Clifford gate.
**Circuit symbol:**
.. parsed-literal::
q_0: ─X─
│
q_1: ─X─
**Matrix Representation:**
.. math::
SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
The gate is equivalent to a state swap and is a classical logic gate.
.. math::
|a, b\rangle \rightarrow |b, a\rangle
"""
def __init__(self, label=None):
"""Create new SWAP gate."""
super().__init__("swap", 2, [], label=label)
def _define(self):
"""
gate swap a,b { cx a,b; cx b,a; cx a,b; }
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .x import CXGate
q = QuantumRegister(2, "q")
qc = QuantumCircuit(q, name=self.name)
rules = [
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(CXGate(), [q[0], q[1]], []),
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Return a (multi-)controlled-SWAP gate.
One control returns a CSWAP (Fredkin) gate.
Args:
num_ctrl_qubits (int): number of control qubits.
label (str or None): An optional label for the gate [Default: None]
ctrl_state (int or str or None): control state expressed as integer,
string (e.g. '110'), or None. If None, use all 1s.
Returns:
ControlledGate: controlled version of this gate.
"""
if num_ctrl_qubits == 1:
gate = CSwapGate(label=label, ctrl_state=ctrl_state)
gate.base_gate.label = self.label
return gate
return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)
def inverse(self):
"""Return inverse Swap gate (itself)."""
return SwapGate() # self-inverse
def __array__(self, dtype=None):
"""Return a numpy.array for the SWAP gate."""
return numpy.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=dtype)
class CSwapGate(ControlledGate):
r"""Controlled-SWAP gate, also known as the Fredkin gate.
**Circuit symbol:**
.. parsed-literal::
q_0: ─X─
│
q_1: ─X─
│
q_2: ─■─
**Matrix representation:**
.. math::
CSWAP\ q_0, q_1, q_2 =
|0 \rangle \langle 0| \otimes I \otimes I +
|1 \rangle \langle 1| \otimes SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In many textbooks, controlled gates are
presented with the assumption of more significant qubits as control,
which in our case would be q_2. Thus a textbook matrix for this
gate will be:
.. parsed-literal::
q_0: ─■─
│
q_1: ─X─
│
q_2: ─X─
.. math::
CSWAP\ q_2, q_1, q_0 =
|0 \rangle \langle 0| \otimes I \otimes I +
|1 \rangle \langle 1| \otimes SWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
\end{pmatrix}
In the computational basis, this gate swaps the states of
the two target qubits if the control qubit is in the
:math:`|1\rangle` state.
.. math::
|0, b, c\rangle \rightarrow |0, b, c\rangle
|1, b, c\rangle \rightarrow |1, c, b\rangle
"""
# Define class constants. This saves future allocation time.
_matrix1 = numpy.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
_matrix0 = numpy.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
def __init__(self, label=None, ctrl_state=None):
"""Create new CSWAP gate."""
super().__init__(
"cswap",
3,
[],
num_ctrl_qubits=1,
label=label,
ctrl_state=ctrl_state,
base_gate=SwapGate(),
)
def _define(self):
"""
gate cswap a,b,c
{ cx c,b;
ccx a,b,c;
cx c,b;
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .x import CXGate, CCXGate
q = QuantumRegister(3, "q")
qc = QuantumCircuit(q, name=self.name)
rules = [
(CXGate(), [q[2], q[1]], []),
(CCXGate(), [q[0], q[1], q[2]], []),
(CXGate(), [q[2], q[1]], []),
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def inverse(self):
"""Return inverse CSwap gate (itself)."""
return CSwapGate(ctrl_state=self.ctrl_state) # self-inverse
def __array__(self, dtype=None):
"""Return a numpy.array for the Fredkin (CSWAP) gate."""
mat = self._matrix1 if self.ctrl_state else self._matrix0
if dtype:
return numpy.asarray(mat, dtype=dtype)
return mat
| 29.972 | 99 | 0.472708 |
aeaf7eafe25aab1c66858f493c0b24bea6386b20 | 810 | py | Python | setup.py | Guilouf/AgenDjang | bc6c0afcf3f8b71b4bac9a4477294329a729faeb | [
"Apache-2.0"
] | null | null | null | setup.py | Guilouf/AgenDjang | bc6c0afcf3f8b71b4bac9a4477294329a729faeb | [
"Apache-2.0"
] | 24 | 2017-11-19T16:42:19.000Z | 2021-12-28T14:41:23.000Z | setup.py | Guilouf/AgenDjang | bc6c0afcf3f8b71b4bac9a4477294329a729faeb | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name='django-agendjang',
version='1.0.3',
packages=['agendjang', 'agendjang.migrations', 'agendjang.static', 'agendjang.templates'],
package_dir={'': 'hostproject'},
url='https://github.com/Guilouf/AgenDjang',
license='Apache 2.0',
author='Guilouf',
description='Django app for task scheduling',
install_requires=['djangorestframework',
'markdown'],
long_description=long_description, # will be included in METADATA file in dist-info folder
long_description_content_type='text/markdown',
include_package_data=True # for non python files, e.g html templates or static css
)
| 35.217391 | 95 | 0.709877 |
2dd5b719d0489694ef7e11d88a03374d2e4faf6f | 12,501 | py | Python | tests/kafkatest/tests/core/upgrade_test.py | sknop/kafka | 2bf16e59840faec0536f794e81bd94631a442890 | [
"Apache-2.0"
] | 2 | 2021-01-20T05:10:46.000Z | 2021-04-01T14:21:28.000Z | tests/kafkatest/tests/core/upgrade_test.py | sknop/kafka | 2bf16e59840faec0536f794e81bd94631a442890 | [
"Apache-2.0"
] | 1 | 2019-07-30T21:04:27.000Z | 2019-07-31T16:38:51.000Z | tests/kafkatest/tests/core/upgrade_test.py | sknop/kafka | 2bf16e59840faec0536f794e81bd94631a442890 | [
"Apache-2.0"
] | 3 | 2018-07-11T16:56:43.000Z | 2019-04-19T23:58:24.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.kafka import config_property
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
from kafkatest.utils.remote_account import java_version
from kafkatest.version import LATEST_0_8_2, LATEST_0_9, LATEST_0_10, LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0, LATEST_1_1, LATEST_2_0, LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, LATEST_2_5, LATEST_2_6, LATEST_2_7, V_0_9_0_0, V_0_11_0_0, DEV_BRANCH, KafkaVersion
from kafkatest.services.kafka.util import new_jdk_not_supported
class TestUpgrade(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(TestUpgrade, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.partitions = 3
self.replication_factor = 3
# Producer and consumer
self.producer_throughput = 1000
self.num_producers = 1
self.num_consumers = 1
def wait_until_rejoin(self):
for partition in range(0, self.partitions):
wait_until(lambda: len(self.kafka.isr_idx_list(self.topic, partition)) == self.replication_factor, timeout_sec=60,
backoff_sec=1, err_msg="Replicas did not rejoin the ISR in a reasonable amount of time")
def perform_upgrade(self, from_kafka_version, to_message_format_version=None):
self.logger.info("Upgrade ZooKeeper from %s to %s" % (str(self.zk.nodes[0].version), str(DEV_BRANCH)))
self.zk.set_version(DEV_BRANCH)
self.zk.restart_cluster()
# Confirm we have a successful ZooKeeper upgrade by describing the topic.
# Not trying to detect a problem here leads to failure in the ensuing Kafka roll, which would be a less
# intuitive failure than seeing a problem here, so detect ZooKeeper upgrade problems before involving Kafka.
self.zk.describe(self.topic)
self.logger.info("First pass bounce - rolling upgrade")
for node in self.kafka.nodes:
self.kafka.stop_node(node)
node.version = DEV_BRANCH
node.config[config_property.INTER_BROKER_PROTOCOL_VERSION] = from_kafka_version
node.config[config_property.MESSAGE_FORMAT_VERSION] = from_kafka_version
self.kafka.start_node(node)
self.wait_until_rejoin()
self.logger.info("Second pass bounce - remove inter.broker.protocol.version config")
for node in self.kafka.nodes:
self.kafka.stop_node(node)
del node.config[config_property.INTER_BROKER_PROTOCOL_VERSION]
if to_message_format_version is None:
del node.config[config_property.MESSAGE_FORMAT_VERSION]
else:
node.config[config_property.MESSAGE_FORMAT_VERSION] = to_message_format_version
self.kafka.start_node(node)
self.wait_until_rejoin()
@cluster(num_nodes=6)
@parametrize(from_kafka_version=str(LATEST_2_7), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_7), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_2_7), to_message_format_version=None, compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_2_6), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_6), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_2_6), to_message_format_version=None, compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_2_5), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_5), to_message_format_version=None, compression_types=["zstd"])
@parametrize(from_kafka_version=str(LATEST_2_4), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_4), to_message_format_version=None, compression_types=["zstd"])
@parametrize(from_kafka_version=str(LATEST_2_3), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_3), to_message_format_version=None, compression_types=["zstd"])
@parametrize(from_kafka_version=str(LATEST_2_2), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_2), to_message_format_version=None, compression_types=["zstd"])
@parametrize(from_kafka_version=str(LATEST_2_1), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_1), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_2_0), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_2_0), to_message_format_version=None, compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_1_1), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_1_1), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_1_0), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_1_0), to_message_format_version=None, compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_0_11_0), to_message_format_version=None, compression_types=["gzip"])
@parametrize(from_kafka_version=str(LATEST_0_11_0), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_0_10_2), to_message_format_version=str(LATEST_0_9), compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_0_10_2), to_message_format_version=str(LATEST_0_10), compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_0_10_2), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_0_10_2), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_0_10_1), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_0_10_1), to_message_format_version=None, compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_0_10_0), to_message_format_version=None, compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_0_10_0), to_message_format_version=None, compression_types=["lz4"])
@cluster(num_nodes=7)
@parametrize(from_kafka_version=str(LATEST_0_9), to_message_format_version=None, compression_types=["none"], security_protocol="SASL_SSL")
@cluster(num_nodes=6)
@parametrize(from_kafka_version=str(LATEST_0_9), to_message_format_version=None, compression_types=["snappy"])
@parametrize(from_kafka_version=str(LATEST_0_9), to_message_format_version=None, compression_types=["lz4"])
@parametrize(from_kafka_version=str(LATEST_0_9), to_message_format_version=str(LATEST_0_9), compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_0_9), to_message_format_version=str(LATEST_0_9), compression_types=["lz4"])
@cluster(num_nodes=7)
@parametrize(from_kafka_version=str(LATEST_0_8_2), to_message_format_version=None, compression_types=["none"])
@parametrize(from_kafka_version=str(LATEST_0_8_2), to_message_format_version=None, compression_types=["snappy"])
def test_upgrade(self, from_kafka_version, to_message_format_version, compression_types,
security_protocol="PLAINTEXT"):
"""Test upgrade of Kafka broker cluster from various versions to the current version
from_kafka_version is a Kafka version to upgrade from
If to_message_format_version is None, it means that we will upgrade to default (latest)
message format version. It is possible to upgrade to 0.10 brokers but still use message
format version 0.9
- Start 3 node broker cluster on version 'from_kafka_version'
- Start producer and consumer in the background
- Perform two-phase rolling upgrade
- First phase: upgrade brokers to 0.10 with inter.broker.protocol.version set to
from_kafka_version and log.message.format.version set to from_kafka_version
- Second phase: remove inter.broker.protocol.version config with rolling bounce; if
to_message_format_version is set to 0.9, set log.message.format.version to
to_message_format_version, otherwise remove log.message.format.version config
- Finally, validate that every message acked by the producer was consumed by the consumer
"""
self.zk = ZookeeperService(self.test_context, num_nodes=1, version=KafkaVersion(from_kafka_version))
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk,
version=KafkaVersion(from_kafka_version),
topics={self.topic: {"partitions": self.partitions,
"replication-factor": self.replication_factor,
'configs': {"min.insync.replicas": 2}}})
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
jdk_version = java_version(self.kafka.nodes[0])
if jdk_version > 9 and from_kafka_version in new_jdk_not_supported:
self.logger.info("Test ignored! Kafka " + from_kafka_version + " not support jdk " + str(jdk_version))
return
self.zk.start()
self.kafka.start()
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int,
compression_types=compression_types,
version=KafkaVersion(from_kafka_version))
if from_kafka_version <= LATEST_0_10_0:
assert self.kafka.cluster_id() is None
# With older message formats before KIP-101, message loss may occur due to truncation
# after leader change. Tolerate limited data loss for this case to avoid transient test failures.
self.may_truncate_acked_records = False if from_kafka_version >= V_0_11_0_0 else True
new_consumer = from_kafka_version >= V_0_9_0_0
# TODO - reduce the timeout
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, new_consumer=new_consumer, consumer_timeout_ms=30000,
message_validator=is_int, version=KafkaVersion(from_kafka_version))
self.run_produce_consume_validate(core_test_action=lambda: self.perform_upgrade(from_kafka_version,
to_message_format_version))
cluster_id = self.kafka.cluster_id()
assert cluster_id is not None
assert len(cluster_id) == 22
assert self.kafka.check_protocol_errors(self)
| 67.209677 | 296 | 0.739941 |
30ad931396a135a7f19e43dad5ac72aef215bcc8 | 916 | py | Python | spacy/lang/tl/__init__.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | spacy/lang/tl/__init__.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | 4 | 2021-06-02T00:49:27.000Z | 2022-01-13T01:59:34.000Z | spacy/lang/tl/__init__.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | 2 | 2020-02-15T18:33:35.000Z | 2022-02-13T14:11:41.000Z | # coding: utf8
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...attrs import LANG, NORM
from ...util import update_exc, add_lookups
def _return_tl(_):
return "tl"
class TagalogDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = _return_tl
lex_attr_getters[NORM] = add_lookups(
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
)
lex_attr_getters.update(LEX_ATTRS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
class Tagalog(Language):
lang = "tl"
Defaults = TagalogDefaults
__all__ = ["Tagalog"]
| 26.171429 | 76 | 0.779476 |
ce4b8b27a51a97c5144780812dd366f29b744a19 | 3,063 | py | Python | homeassistant/components/intent_script/__init__.py | joopert/home-assistant | a3f6fbb3774004b15c397c0556f98c5f7a59cb22 | [
"Apache-2.0"
] | 3 | 2020-10-23T14:39:11.000Z | 2021-02-17T14:40:17.000Z | homeassistant/components/intent_script/__init__.py | joopert/home-assistant | a3f6fbb3774004b15c397c0556f98c5f7a59cb22 | [
"Apache-2.0"
] | 3 | 2021-02-08T20:54:46.000Z | 2021-09-08T02:30:04.000Z | homeassistant/components/intent_script/__init__.py | joopert/home-assistant | a3f6fbb3774004b15c397c0556f98c5f7a59cb22 | [
"Apache-2.0"
] | 4 | 2020-05-30T08:19:47.000Z | 2021-05-14T11:39:19.000Z | """Handle intents with scripts."""
import copy
import logging
import voluptuous as vol
from homeassistant.helpers import intent, template, script, config_validation as cv
DOMAIN = "intent_script"
CONF_INTENTS = "intents"
CONF_SPEECH = "speech"
CONF_ACTION = "action"
CONF_CARD = "card"
CONF_TYPE = "type"
CONF_TITLE = "title"
CONF_CONTENT = "content"
CONF_TEXT = "text"
CONF_ASYNC_ACTION = "async_action"
DEFAULT_CONF_ASYNC_ACTION = False
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
cv.string: {
vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(
CONF_ASYNC_ACTION, default=DEFAULT_CONF_ASYNC_ACTION
): cv.boolean,
vol.Optional(CONF_CARD): {
vol.Optional(CONF_TYPE, default="simple"): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Required(CONF_CONTENT): cv.template,
},
vol.Optional(CONF_SPEECH): {
vol.Optional(CONF_TYPE, default="plain"): cv.string,
vol.Required(CONF_TEXT): cv.template,
},
}
}
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Activate Alexa component."""
intents = copy.deepcopy(config[DOMAIN])
template.attach(hass, intents)
for intent_type, conf in intents.items():
if CONF_ACTION in conf:
conf[CONF_ACTION] = script.Script(
hass, conf[CONF_ACTION], f"Intent Script {intent_type}"
)
intent.async_register(hass, ScriptIntentHandler(intent_type, conf))
return True
class ScriptIntentHandler(intent.IntentHandler):
"""Respond to an intent with a script."""
def __init__(self, intent_type, config):
"""Initialize the script intent handler."""
self.intent_type = intent_type
self.config = config
async def async_handle(self, intent_obj):
"""Handle the intent."""
speech = self.config.get(CONF_SPEECH)
card = self.config.get(CONF_CARD)
action = self.config.get(CONF_ACTION)
is_async_action = self.config.get(CONF_ASYNC_ACTION)
slots = {key: value["value"] for key, value in intent_obj.slots.items()}
if action is not None:
if is_async_action:
intent_obj.hass.async_create_task(
action.async_run(slots, intent_obj.context)
)
else:
await action.async_run(slots)
response = intent_obj.create_response()
if speech is not None:
response.async_set_speech(
speech[CONF_TEXT].async_render(slots), speech[CONF_TYPE]
)
if card is not None:
response.async_set_card(
card[CONF_TITLE].async_render(slots),
card[CONF_CONTENT].async_render(slots),
card[CONF_TYPE],
)
return response
| 29.451923 | 83 | 0.599412 |
3a6b95218d102c4cc185627569f994785d6138f2 | 6,223 | py | Python | pr2_robot/sensor_stick/scripts/object_recognition.py | Shubodh/RoboND_Perception_Project | 1f7a09c62c93c2130d4f06069d317f758800b0a4 | [
"MIT"
] | null | null | null | pr2_robot/sensor_stick/scripts/object_recognition.py | Shubodh/RoboND_Perception_Project | 1f7a09c62c93c2130d4f06069d317f758800b0a4 | [
"MIT"
] | null | null | null | pr2_robot/sensor_stick/scripts/object_recognition.py | Shubodh/RoboND_Perception_Project | 1f7a09c62c93c2130d4f06069d317f758800b0a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# TODO: Convert ROS msg to PCL data:
#This function takes in a ROS message of type PointCloud2 and converts it to PCL PointXYZRGB format.
cloud_ros = ros_to_pcl(pcl_msg)
# TODO: Voxel Grid Downsampling
vox = cloud_ros.make_voxel_grid_filter()
LEAF_SIZE = 0.01
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
# TODO: PassThrough Filter
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
# TODO: RANSAC Plane Segmentation
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.01
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# TODO: Extract inliers and outliers
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
# TODO: Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(extracted_outliers) # Apply function to convert XYZRGB to XYZ
tree = white_cloud.make_kdtree()
ec = white_cloud.make_EuclideanClusterExtraction()
#Cluster Tolerance will determine which cluster will get which color, i.e. if it is too high, nearby objects might get same colour. So experiment and optimize.
ec.set_ClusterTolerance(0.03)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(2500)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# TODO: Convert PCL data to ROS messages
cloud_objects = pcl_to_ros(extracted_outliers)
ros_cloud_table = pcl_to_ros(extracted_inliers)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Publish ROS messages
# pcl_objects_pub.publish(cloud_objects)
# pcl_table_pub.publish(ros_cloud_table)
# pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects = []
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster from the extracted outliers (cloud_objects)
pcl_cluster = extracted_outliers.extract(pts_list)
# TODO: convert the cluster from pcl to ROS using helper function
ros_cluster = pcl_to_ros(pcl_cluster)
# Extract histogram features
# TODO: complete this step just as is covered in capture_features.py
chists = compute_color_histograms(ros_cluster, using_hsv=False)
normals = get_normals(ros_cluster)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
#labeled_features.append([feature, model_name])
# Make the prediction, retrieve the label for the result
# and add it to detected_objects_labels list
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Publish the list of detected objects
# This is the output you'll need to complete the upcoming project!
detected_objects_pub.publish(detected_objects)
if __name__ == '__main__':
# TODO: ROS node initialization #Change the name
rospy.init_node('object_recog', anonymous=True)
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
# TODO: Create Publishers
# TODO: here you need to create two publishers
# Call them object_markers_pub and detected_objects_pub
# Have them publish to "/object_markers" and "/detected_objects" with
# Message Types "Marker" and "DetectedObjectsArray" , respectively
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# TODO: Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| 34.005464 | 161 | 0.763297 |
a7e0379f37896330cb7f7a897f3546c8354f01a1 | 3,092 | py | Python | modules/dbnd/src/dbnd/_core/decorator/schemed_result.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/dbnd/_core/decorator/schemed_result.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/dbnd/_core/decorator/schemed_result.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | from typing import Any
import attr
from dbnd._core.errors import friendly_error
from dbnd._core.errors.friendly_error.task_execution import (
failed_to_read_value_from_target,
)
from dbnd._core.parameter.parameter_definition import ParameterDefinition, T
from targets.errors import NotSupportedValue
from targets.multi_target import MultiTarget
class ResultProxyTarget(MultiTarget):
target_no_traverse = True
def __init__(self, source, names, properties=None):
super(ResultProxyTarget, self).__init__(
targets=None, properties=properties, source=source
)
self.names = names
@property
def targets(self):
return list(self)
def get_sub_result(self, name):
return getattr(self.source_task, name)
def __iter__(self):
for name in self.names:
yield self.get_sub_result(name)
def __repr__(self):
return "result(%s)" % ",".join(self.names)
def as_dict(self):
return {name: self.get_sub_result(name) for name in self.names}
def as_task(self):
return self.source.task
@attr.s(hash=False, repr=False, str=False)
class FuncResultParameter(ParameterDefinition):
schema = attr.ib(default=None)
@property
def names(self):
return [p.name for p in self.schema]
def build_output(self, task):
return ResultProxyTarget(source=self._target_source(task), names=self.names)
def dump_to_target(self, target, value, **kwargs):
"""
We don't need to dump this value, it's just a map to all other outputs
"""
pass
def _validate_result(self, result):
if not isinstance(result, (tuple, list, dict)):
raise friendly_error.task_execution.wrong_return_value_type(
self.task_cls, self.names, result
)
elif len(result) != len(self.schema):
raise friendly_error.task_execution.wrong_return_value_len(
self.task_cls, self.names, result
)
if isinstance(result, dict):
if set(result.keys()).symmetric_difference(set(self.names)):
raise NotSupportedValue(
"Returned result doesn't match expected schema. Expected {}, got {}".format(
self.names, result.keys()
)
)
def named_results(self, result):
self._validate_result(result)
if isinstance(result, dict):
return [(name, result[name]) for name in self.names]
return zip(self.names, result)
def load_from_target(
self, target, **kwargs
): # type: (FuncResultParameter, ResultProxyTarget, **Any)-> T
results = []
for p in self.schema:
p_target = target.get_sub_result(p.name)
try:
results.append(p.load_from_target(p_target, **kwargs))
except Exception as ex:
raise failed_to_read_value_from_target(
ex, p_target.source.task, p, target
)
return tuple(results)
| 31.232323 | 96 | 0.629043 |
852c459153a83d97c1a83edb25958ea8149e9693 | 9,931 | py | Python | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/dateperiod_extractor_config.py | inloco/Recognizers-Text | 9f4ac7cd4170fe39e48ccf52c028877e7c421e60 | [
"MIT"
] | 1 | 2019-01-03T16:41:29.000Z | 2019-01-03T16:41:29.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/dateperiod_extractor_config.py | inloco/Recognizers-Text | 9f4ac7cd4170fe39e48ccf52c028877e7c421e60 | [
"MIT"
] | 76 | 2018-11-09T18:19:44.000Z | 2019-08-20T20:29:53.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/dateperiod_extractor_config.py | inloco/Recognizers-Text | 9f4ac7cd4170fe39e48ccf52c028877e7c421e60 | [
"MIT"
] | 6 | 2017-05-04T17:24:59.000Z | 2019-07-23T15:48:44.000Z | from typing import List, Pattern
from recognizers_text import Extractor, Parser, RegExpUtility
from recognizers_number import ChineseNumberExtractor, ChineseNumberParserConfiguration, BaseNumberParser, \
ChineseCardinalExtractor, ChineseOrdinalExtractor
from ...resources.base_date_time import BaseDateTime
from ...resources.chinese_date_time import ChineseDateTime
from ..extractors import DateTimeExtractor
from ..base_dateperiod import DatePeriodExtractorConfiguration, MatchedIndex
from .date_extractor import ChineseDateExtractor
class ChineseDatePeriodExtractorConfiguration(DatePeriodExtractorConfiguration):
@property
def previous_prefix_regex(self) -> Pattern:
return self._previous_prefix_regex
@property
def check_both_before_after(self) -> Pattern:
return self._check_both_before_after
@property
def time_unit_regex(self) -> Pattern:
return self._time_unit_regex
@property
def ordinal_extractor(self) -> Extractor:
return self._ordinal_extractor
@property
def cardinal_extractor(self) -> Extractor:
return self._cardinal_extractor
@property
def within_next_prefix_regex(self) -> Pattern:
return self._within_next_prefix_regex
@property
def future_suffix_regex(self) -> Pattern:
return self._future_suffix_regex
@property
def ago_regex(self) -> Pattern:
return self._ago_regex
@property
def later_regex(self) -> Pattern:
return self._later_regex
@property
def less_than_regex(self) -> Pattern:
return self._less_than_regex
@property
def more_than_regex(self) -> Pattern:
return self._more_than_regex
@property
def duration_date_restrictions(self) -> [str]:
return self._duration_date_restrictions
@property
def year_period_regex(self) -> Pattern:
return self._year_period_regex
@property
def century_suffix_regex(self) -> Pattern:
return self._century_suffix_regex
@property
def month_num_regex(self) -> Pattern:
return self._month_num_regex
@property
def simple_cases_regexes(self) -> List[Pattern]:
return self._simple_cases_regexes
@property
def illegal_year_regex(self) -> Pattern:
return self._illegal_year_regex
@property
def year_regex(self) -> Pattern:
return self._year_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def followed_unit(self) -> Pattern:
return self._followed_unit
@property
def number_combined_with_unit(self) -> Pattern:
return self._number_combined_with_unit
@property
def past_regex(self) -> Pattern:
return self._past_regex
@property
def future_regex(self) -> Pattern:
return self._future_regex
@property
def date_point_extractor(self) -> DateTimeExtractor:
return self._date_point_extractor
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def number_parser(self) -> Parser:
return self._number_parser
@property
def now_regex(self) -> Pattern:
return self._now_regex
@property
def day_regex(self):
return self._day_regex
@property
def day_regex_in_chinese(self) -> Pattern:
return self._day_regex_in_chinese
@property
def relative_month_regex(self) -> Pattern:
return self._relative_month_regex
@property
def zero_to_nine_integer_regex_chinese(self) -> Pattern:
return self._zero_to_nine_integer_regex_chinese
@property
def month_regex(self) -> Pattern:
return self._month_regex
@property
def this_regex(self) -> Pattern:
return self._this_regex
@property
def last_regex(self) -> Pattern:
return self._last_regex
@property
def next_regex(self) -> Pattern:
return self._next_regex
@property
def strict_year_regex(self) -> Pattern:
return self._strict_year_regex
@property
def year_regex_in_number(self) -> Pattern:
return self._year_regex_in_number
@property
def month_suffix_regex(self) -> Pattern:
return self._month_suffix_regex
@property
def season_regex(self) -> Pattern:
return self._season_regex
@property
def week_of_regex(self) -> Pattern:
return None
@property
def month_of_regex(self) -> Pattern:
return None
@property
def date_unit_regex(self) -> Pattern:
return None
@property
def in_connector_regex(self) -> Pattern:
return None
@property
def range_unit_regex(self) -> Pattern:
return None
@property
def duration_extractor(self) -> DateTimeExtractor:
return None
@property
def range_connector_regex(self) -> Pattern:
return None
def __init__(self):
self._season_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.SeasonRegex
)
self._month_suffix_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.MonthSuffixRegex
)
self._year_regex_in_number = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.YearRegexInNumber
)
self._strict_year_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.StrictYearRegex
)
self._last_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DatePeriodLastRegex
)
self._next_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DatePeriodNextRegex
)
self._this_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DatePeriodThisRegex
)
self._month_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.MonthRegex
)
self._zero_to_nine_integer_regex_chinese = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.ZeroToNineIntegerRegexCJK
)
self._relative_month_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.RelativeMonthRegex
)
self._day_regex_in_chinese = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DatePeriodDayRegexInCJK
)
self._day_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DayRegex
)
self._simple_cases_regexes = [
RegExpUtility.get_safe_reg_exp(ChineseDateTime.SimpleCasesRegex),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.OneWordPeriodRegex),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.StrictYearRegex),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.YearToYear),
RegExpUtility.get_safe_reg_exp(
ChineseDateTime.YearToYearSuffixRequired),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.MonthToMonth),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.MonthToMonthSuffixRequired),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.YearAndMonth),
RegExpUtility.get_safe_reg_exp(
ChineseDateTime.PureNumYearAndMonth),
RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DatePeriodYearInCJKRegex),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.WeekOfMonthRegex),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.SeasonWithYear),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.QuarterRegex),
RegExpUtility.get_safe_reg_exp(ChineseDateTime.DecadeRegex)
]
self._illegal_year_regex = RegExpUtility.get_safe_reg_exp(
BaseDateTime.IllegalYearRegex)
self._year_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.YearRegex)
self._till_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DatePeriodTillRegex)
self._followed_unit = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.FollowedUnit)
self._number_combined_with_unit = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.NumberCombinedWithUnit)
self._past_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.PastRegex)
self._future_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.FutureRegex)
self._date_point_extractor = ChineseDateExtractor()
self._integer_extractor = ChineseNumberExtractor()
self._number_parser = BaseNumberParser(
ChineseNumberParserConfiguration())
self._now_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.NowRegex)
self._month_num_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.MonthNumRegex)
self._cardinal_extractor = ChineseCardinalExtractor()
self._ordinal_extractor = ChineseOrdinalExtractor()
# TODO When the implementation for these properties is added, change the None values to their respective Regexps
self._previous_prefix_regex = None
self._check_both_before_after = None
self._century_suffix_regex = None
self._year_period_regex = None
self._duration_date_restrictions = None
self._more_than_regex = None
self._less_than_regex = None
self._later_regex = None
self._ago_regex = None
self._future_suffix_regex = None
self._within_next_prefix_regex = None
self._time_unit_regex = None
self._previous_prefix_regex = None
def get_from_token_index(self, source: str) -> MatchedIndex:
if source.endswith('从'):
return MatchedIndex(True, source.rindex('从'))
return MatchedIndex(False, -1)
def get_between_token_index(self, source: str) -> MatchedIndex:
return MatchedIndex(False, -1)
def has_connector_token(self, source: str) -> bool:
return False
| 32.667763 | 120 | 0.699124 |
9ea7029808e7b9dd188df87167c2c08ab649c24e | 1,485 | py | Python | env/lib/python3.7/site-packages/banal/cache.py | keshavm02/cyber_security-CTF-server | 6af715b4c108349aa728fe9630f4b0be6a3c07a7 | [
"Apache-2.0"
] | null | null | null | env/lib/python3.7/site-packages/banal/cache.py | keshavm02/cyber_security-CTF-server | 6af715b4c108349aa728fe9630f4b0be6a3c07a7 | [
"Apache-2.0"
] | null | null | null | env/lib/python3.7/site-packages/banal/cache.py | keshavm02/cyber_security-CTF-server | 6af715b4c108349aa728fe9630f4b0be6a3c07a7 | [
"Apache-2.0"
] | null | null | null | import six
import types
from itertools import chain
from hashlib import sha1
from datetime import date, datetime
from banal.dicts import is_mapping
from banal.lists import is_sequence
def bytes_iter(obj):
"""Turn a complex object into an iterator of byte strings.
The resulting iterator can be used for caching.
"""
if obj is None:
return
elif isinstance(obj, six.binary_type):
yield obj
elif isinstance(obj, six.string_types):
yield obj
elif isinstance(obj, (date, datetime)):
yield obj.isoformat()
elif is_mapping(obj):
for key in sorted(obj.keys()):
for out in chain(bytes_iter(key), bytes_iter(obj[key])):
yield out
elif is_sequence(obj):
if isinstance(obj, (list, set)):
try:
obj = sorted(obj)
except Exception:
pass
for item in obj:
for out in bytes_iter(item):
yield out
elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)):
yield getattr(obj, 'func_name', '')
else:
yield six.text_type(obj)
def hash_data(obj):
"""Generate a SHA1 from a complex object."""
collect = sha1()
for text in bytes_iter(obj):
if isinstance(text, six.text_type):
text = text.encode('utf-8')
collect.update(text)
return collect.hexdigest()
| 29.117647 | 72 | 0.612121 |
bee7ee05d85b0d3f4dabdb86e9c98cffea3a9c0c | 10,056 | py | Python | python_modules/dagster/dagster_tests/daemon_tests/test_dagster_daemon_health.py | keypointt/dagster | 45683a29cbe2429d4e538254fac9498198f53879 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/daemon_tests/test_dagster_daemon_health.py | keypointt/dagster | 45683a29cbe2429d4e538254fac9498198f53879 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/daemon_tests/test_dagster_daemon_health.py | keypointt/dagster | 45683a29cbe2429d4e538254fac9498198f53879 | [
"Apache-2.0"
] | null | null | null | import time
import pendulum
from dagster import DagsterInvariantViolationError
from dagster.core.test_utils import instance_for_test
from dagster.daemon.controller import (
all_daemons_healthy,
all_daemons_live,
daemon_controller_from_instance,
get_daemon_status,
)
from dagster.utils.error import SerializableErrorInfo
def test_healthy():
with instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster.core.run_coordinator.queued_run_coordinator",
"class": "QueuedRunCoordinator",
},
}
) as instance:
init_time = pendulum.now("UTC")
assert not all_daemons_healthy(instance, curr_time_seconds=init_time.float_timestamp)
assert not all_daemons_live(instance, curr_time_seconds=init_time.float_timestamp)
with daemon_controller_from_instance(
instance, wait_for_processes_on_exit=True
) as controller:
while True:
now = pendulum.now("UTC")
if all_daemons_healthy(
instance, curr_time_seconds=now.float_timestamp
) and all_daemons_live(instance, curr_time_seconds=now.float_timestamp):
controller.check_daemons()
beyond_tolerated_time = now.float_timestamp + 100
assert not all_daemons_healthy(
instance, curr_time_seconds=beyond_tolerated_time
)
assert not all_daemons_live(instance, curr_time_seconds=beyond_tolerated_time)
break
if (now - init_time).total_seconds() > 10:
raise Exception("timed out waiting for instance to become healthy")
time.sleep(0.5)
def test_healthy_with_different_daemons():
with instance_for_test() as instance:
with daemon_controller_from_instance(instance, wait_for_processes_on_exit=True):
with instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster.core.run_coordinator.queued_run_coordinator",
"class": "QueuedRunCoordinator",
},
}
) as other_instance:
now = pendulum.now("UTC")
assert not all_daemons_healthy(
other_instance, curr_time_seconds=now.float_timestamp
)
assert not all_daemons_live(other_instance, curr_time_seconds=now.float_timestamp)
def test_thread_die_daemon(monkeypatch):
with instance_for_test(overrides={}) as instance:
from dagster.daemon.daemon import SchedulerDaemon, SensorDaemon
iteration_ran = {"ran": False}
def run_iteration_error(_, _instance, _grpc_server_registry):
iteration_ran["ran"] = True
raise KeyboardInterrupt
yield # pylint: disable=unreachable
monkeypatch.setattr(SensorDaemon, "run_iteration", run_iteration_error)
init_time = pendulum.now("UTC")
with daemon_controller_from_instance(
instance, wait_for_processes_on_exit=True
) as controller:
while True:
now = pendulum.now("UTC")
status = get_daemon_status(
instance, SchedulerDaemon.daemon_type(), now.float_timestamp
)
if iteration_ran["ran"] and status.healthy:
try:
controller.check_daemons() # Should eventually throw since the sensor thread is interrupted
except Exception as e: # pylint: disable=broad-except
assert (
"Stopping dagster-daemon process since the following threads are no longer sending heartbeats: ['SENSOR']"
in str(e)
)
break
if (now - init_time).total_seconds() > 20:
raise Exception("timed out waiting for check_daemons to fail")
time.sleep(0.5)
def test_error_daemon(monkeypatch):
with instance_for_test() as instance:
from dagster.daemon.daemon import SensorDaemon
def run_iteration_error(_, _instance, _grpc_server_registry):
raise DagsterInvariantViolationError("foobar")
yield # pylint: disable=unreachable
monkeypatch.setattr(SensorDaemon, "run_iteration", run_iteration_error)
init_time = pendulum.now("UTC")
with daemon_controller_from_instance(
instance, wait_for_processes_on_exit=True
) as controller:
while True:
now = pendulum.now("UTC")
if all_daemons_live(instance):
# Despite error, daemon should still be running
controller.check_daemons()
status = get_daemon_status(
instance, SensorDaemon.daemon_type(), now.float_timestamp
)
assert status.healthy == False
assert len(status.last_heartbeat.errors) == 1
assert (
status.last_heartbeat.errors[0].message.strip()
== "dagster.core.errors.DagsterInvariantViolationError: foobar"
)
assert not all_daemons_healthy(instance, curr_time_seconds=now.float_timestamp)
assert all_daemons_live(instance, curr_time_seconds=now.float_timestamp)
break
if (now - init_time).total_seconds() > 10:
raise Exception("timed out waiting for heartbeat error")
time.sleep(0.5)
def test_multiple_error_daemon(monkeypatch):
with instance_for_test() as instance:
from dagster.daemon.daemon import SensorDaemon
def run_iteration_error(_, _instance, _grpc_server_registry):
# ?message stack cls_name cause"
yield SerializableErrorInfo("foobar", None, None, None)
yield SerializableErrorInfo("bizbuz", None, None, None)
monkeypatch.setattr(SensorDaemon, "run_iteration", run_iteration_error)
init_time = pendulum.now("UTC")
with daemon_controller_from_instance(
instance, wait_for_processes_on_exit=True
) as controller:
while True:
now = pendulum.now("UTC")
if all_daemons_live(instance):
# Despite error, daemon should still be running
controller.check_daemons()
status = get_daemon_status(
instance, SensorDaemon.daemon_type(), now.float_timestamp
)
if status.healthy == False:
assert len(status.last_heartbeat.errors) == 2
assert status.last_heartbeat.errors[0].message.strip() == "foobar"
assert status.last_heartbeat.errors[1].message.strip() == "bizbuz"
break
if (now - init_time).total_seconds() > 10:
raise Exception("timed out waiting for heartbeat error")
time.sleep(0.5)
def test_warn_multiple_daemons(capsys):
from dagster.daemon.daemon import SensorDaemon
with instance_for_test() as instance:
init_time = pendulum.now("UTC")
with daemon_controller_from_instance(instance, wait_for_processes_on_exit=True):
while True:
now = pendulum.now("UTC")
if all_daemons_live(instance):
captured = capsys.readouterr()
assert "Taking over from another SENSOR daemon process" not in captured.out
break
if (now - init_time).total_seconds() > 10:
raise Exception("timed out waiting for daemon status")
time.sleep(0.5)
capsys.readouterr()
init_time = pendulum.now("UTC")
status = get_daemon_status(instance, SensorDaemon.daemon_type(), now.float_timestamp)
last_heartbeat_time = status.last_heartbeat.timestamp
# No warning when a second controller starts up again
with daemon_controller_from_instance(instance, wait_for_processes_on_exit=True):
while True:
now = pendulum.now("UTC")
status = get_daemon_status(
instance, SensorDaemon.daemon_type(), now.float_timestamp
)
if status.last_heartbeat and status.last_heartbeat.timestamp != last_heartbeat_time:
captured = capsys.readouterr()
assert "Taking over from another SENSOR daemon process" not in captured.out
break
if (now - init_time).total_seconds() > 10:
raise Exception("timed out waiting for new daemon status")
time.sleep(0.5)
status = get_daemon_status(instance, SensorDaemon.daemon_type(), now.float_timestamp)
last_heartbeat_time = status.last_heartbeat.timestamp
# Starting up a controller while one is running produces the warning though
with daemon_controller_from_instance(instance, wait_for_processes_on_exit=True):
# Wait for heartbeats while two controllers are running at once and there will
# be a warning
init_time = pendulum.now("UTC")
while True:
now = pendulum.now("UTC")
captured = capsys.readouterr()
if "Taking over from another SENSOR daemon process" in captured.out:
break
if (now - init_time).total_seconds() > 60:
raise Exception("timed out waiting for heartbeats")
time.sleep(5)
| 38.235741 | 134 | 0.591289 |
cf4d328b48eb37eb7c18a8890822a5cff7f2395a | 2,019 | py | Python | models/utils.py | teachteamgithub/coursebuilder | 178c0ff3cd28858079488c3c2a0bc22a5baf58e3 | [
"Apache-2.0"
] | 13 | 2015-02-07T12:43:40.000Z | 2020-02-10T18:30:36.000Z | models/utils.py | opsschool/coursebuilder | 178c0ff3cd28858079488c3c2a0bc22a5baf58e3 | [
"Apache-2.0"
] | null | null | null | models/utils.py | opsschool/coursebuilder | 178c0ff3cd28858079488c3c2a0bc22a5baf58e3 | [
"Apache-2.0"
] | 8 | 2015-08-29T03:10:16.000Z | 2019-09-13T22:59:03.000Z | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to work with various models."""
__author__ = 'Sean Lip (sll@google.com)'
import transforms
def set_answer(answers, assessment_name, answer):
"""Stores the answer array for the given student and assessment.
The caller must call answers.put() to commit.
This does not do any type-checking on 'answer'; it just stores whatever
is passed in.
Args:
answers: the StudentAnswers entity in which the answer should be stored.
assessment_name: the name of the assessment.
answer: an array containing the student's answers.
"""
if not answers.data:
score_dict = {}
else:
score_dict = transforms.loads(answers.data)
score_dict[assessment_name] = answer
answers.data = transforms.dumps(score_dict)
def set_score(student, assessment_name, score):
"""Stores the score for the given student and assessment.
The caller must call student.put() to commit.
This does not do any type-checking on 'score'; it just stores whatever
is passed in.
Args:
student: the student whose answer should be stored.
assessment_name: the name of the assessment.
score: the student's score.
"""
if not student.scores:
score_dict = {}
else:
score_dict = transforms.loads(student.scores)
score_dict[assessment_name] = score
student.scores = transforms.dumps(score_dict)
| 33.098361 | 80 | 0.709262 |
75da3202f5ab62541074ed3ea6477a9a02a19d82 | 2,350 | py | Python | python_framework/api/test/apitests/testone/api/src/controller/ActuatorHealthTestController.py | AChillFeeder/python_framework | 4521e1acb90f749ebd9724a87dc121c6f37b3dca | [
"MIT"
] | 5 | 2020-09-02T20:05:44.000Z | 2022-03-04T21:02:13.000Z | python_framework/api/test/apitests/testone/api/src/controller/ActuatorHealthTestController.py | AChillFeeder/python_framework | 4521e1acb90f749ebd9724a87dc121c6f37b3dca | [
"MIT"
] | 1 | 2021-05-23T22:55:58.000Z | 2021-05-24T15:33:50.000Z | python_framework/api/test/apitests/testone/api/src/controller/ActuatorHealthTestController.py | AChillFeeder/python_framework | 4521e1acb90f749ebd9724a87dc121c6f37b3dca | [
"MIT"
] | 3 | 2020-11-01T01:13:09.000Z | 2022-02-22T15:01:19.000Z | from python_helper import EnvironmentHelper
from python_framework.api.src.enumeration.HttpStatus import HttpStatus
from python_framework.api.src.service.flask.FlaskManager import Controller, ControllerMethod
from python_framework.api.src.dto import ActuatorHealthDto
from dto import EnumAsQueryDto
from dto import TestRequestDto
@Controller(url='/test/actuator/health', tag='HealthCheckTest', description='HealthCheck controller test')
class ActuatorHealthTestController:
@ControllerMethod(
requestClass = EnumAsQueryDto.EnumAsQueryRequestDto,
responseClass = [[ActuatorHealthDto.ActuatorHealthResponseDto]],
logResponse = True
)
def post(self, dto):
return self.service.status.findAllByStatus(dto), HttpStatus.OK
@ControllerMethod(
url=f'/{EnvironmentHelper.get("URL_VARIANT")}',
logRequest = True,
requestHeaderClass = [TestRequestDto.TestRequestHeaderDto],
requestParamClass = [TestRequestDto.TestRequestParamDto],
requestClass = ActuatorHealthDto.ActuatorHealthResponseDto,
responseClass = [[ActuatorHealthDto.ActuatorHealthResponseDto]],
logResponse = True
)
def put(self, dto, headers=None, params=None):
# from flask import request
# print(f'{self.__class__.__name__}.put -> request.headers: {request.headers}')
# print(f'headers.firstHeader: {headers.firstHeader}')
# print(f'headers.secondHeader: {headers.secondHeader}')
# print(f'params.first: {params.first}')
# print(f'params.second: {params.second}')
return [dto], HttpStatus.OK
@ControllerMethod(
url = f'/{EnvironmentHelper.get("URL_VARIANT")}/<string:status>/<string:secondStatus>',
requestHeaderClass = [TestRequestDto.TestRequestHeaderDto],
requestParamClass = [TestRequestDto.TestRequestParamDto],
responseClass = [TestRequestDto.TestResponseDto],
logResponse = True
)
def get(self, status=None, secondStatus=None, headers=None, params=None) :
# from flask import request
# print(f'{self.__class__.__name__}.get -> request.headers: {request.headers}')
return self.globals.api.resource.client.some.getOnActuatorHealth(ActuatorHealthDto.ActuatorHealthResponseDto(status+secondStatus), headers=headers, params=params), HttpStatus.OK
| 47 | 185 | 0.728085 |
05ba061bc66c13f53fce48aa23f5e7aa736d51af | 17,950 | py | Python | python/mxnet/gluon/utils.py | liuxiaotiao/mxnet_DGT | 023382a9e73047624d511605fb391eac0d7fdb8e | [
"Apache-2.0",
"MIT"
] | null | null | null | python/mxnet/gluon/utils.py | liuxiaotiao/mxnet_DGT | 023382a9e73047624d511605fb391eac0d7fdb8e | [
"Apache-2.0",
"MIT"
] | null | null | null | python/mxnet/gluon/utils.py | liuxiaotiao/mxnet_DGT | 023382a9e73047624d511605fb391eac0d7fdb8e | [
"Apache-2.0",
"MIT"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Parallelization utility optimizer."""
__all__ = ['split_data', 'split_and_load', 'clip_global_norm',
'check_sha1', 'download', 'replace_file']
import os
import sys
import hashlib
import uuid
import warnings
import collections
import weakref
import requests
import numpy as np
from .. import ndarray
from ..util import is_np_shape, is_np_array
from .. import numpy as _mx_np # pylint: disable=reimported
def split_data(data, num_slice, batch_axis=0, even_split=True):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
n_each_section, extras = divmod(size, num_slice)
section_sizes = [0] + (extras * [n_each_section + 1] +
(num_slice - extras) * [n_each_section])
div_points = np.array(section_sizes).cumsum()
if is_np_array():
slices = _mx_np.split(data, indices_or_sections=list(div_points[1: -1]), axis=batch_axis)
else:
slices = []
if batch_axis != 0:
for i in range(num_slice):
st = div_points[i]
end = div_points[i + 1]
slices.append(ndarray.slice_axis(data, axis=batch_axis, begin=st, end=end))
else:
# Fixes issue: https://github.com/apache/incubator-mxnet/issues/19268
slices = [data[div_points[i]:div_points[i + 1]] if i < num_slice - 1 else data[div_points[i]:size]
for i in range(num_slice)]
return slices
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray or ndarray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArrays or ndarrays
Each corresponds to a context in `ctx_list`.
"""
array_fn = _mx_np.array if is_np_array() else ndarray.array
if not isinstance(data, ndarray.NDArray):
data = array_fn(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
def clip_global_norm(arrays, max_norm, check_isfinite=True):
"""Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`.
Parameters
----------
arrays : list of NDArray
max_norm : float
check_isfinite : bool, default True
If True, check that the total_norm is finite (not nan or inf). This
requires a blocking .asscalar() call.
Returns
-------
NDArray or float
Total norm. Return type is NDArray of shape (1,) if check_isfinite is
False. Otherwise a float is returned.
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return ndarray.dot(x, x)
return array.norm().square()
assert len(arrays) > 0
ctx = arrays[0].context
total_norm = ndarray.add_n(*[_norm(arr).as_in_context(ctx) for arr in arrays])
total_norm = ndarray.sqrt(total_norm)
if check_isfinite:
if not np.isfinite(total_norm.asscalar()):
warnings.warn(
UserWarning('nan or inf is detected. '
'Clipping results will be undefined.'), stacklevel=2)
scale = max_norm / (total_norm + 1e-8)
scale = ndarray.min(ndarray.concat(scale, ndarray.ones(1, ctx=ctx), dim=0))
for arr in arrays:
arr *= scale.as_in_context(arr.context)
if check_isfinite:
return total_norm.asscalar()
else:
return total_norm
def _indent(s_, numSpaces):
"""Indent string
"""
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [first] + [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
return s
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
if not sys.platform.startswith('win32'):
# refer to https://github.com/untitaker/python-atomicwrites
def replace_file(src, dst):
"""Implement atomic os.replace with linux and OSX.
Parameters
----------
src : source file path
dst : destination file path
"""
try:
os.rename(src, dst)
except OSError:
try:
os.remove(src)
except OSError:
pass
finally:
raise OSError(
'Moving downloaded temp file - {}, to {} failed. \
Please retry the download.'.format(src, dst))
else:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
# Setting this value guarantees that a move performed as a copy
# and delete operation is flushed to disk before the function returns.
# The flush occurs at the end of the copy operation.
_MOVEFILE_WRITE_THROUGH = 0x8
_windows_default_flags = _MOVEFILE_WRITE_THROUGH
def _str_to_unicode(x):
"""Handle text decoding. Internal use only"""
if not isinstance(x, str):
return x.decode(sys.getfilesystemencoding())
return x
def _handle_errors(rv, src):
"""Handle WinError. Internal use only"""
if not rv:
msg = ctypes.FormatError(ctypes.GetLastError())
# if the MoveFileExW fails(e.g. fail to acquire file lock), removes the tempfile
try:
os.remove(src)
except OSError:
pass
finally:
raise OSError(msg)
def replace_file(src, dst):
"""Implement atomic os.replace with windows.
refer to https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-movefileexw
The function fails when one of the process(copy, flush, delete) fails.
Parameters
----------
src : source file path
dst : destination file path
"""
_handle_errors(ctypes.windll.kernel32.MoveFileExW(
_str_to_unicode(src), _str_to_unicode(dst),
_windows_default_flags | _MOVEFILE_REPLACE_EXISTING
), src)
def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download a given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0, currently it's {}".format(
retries)
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading {} from {}...'.format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError('Failed downloading url {}'.format(url))
# create uuid for temporary files
random_uuid = str(uuid.uuid4())
with open('{}.{}'.format(fname, random_uuid), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# if the target file exists(created by other processes)
# and have the same hash with target file
# delete the temporary file
if not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
# atmoic operation in the same file system
replace_file('{}.{}'.format(fname, random_uuid), fname)
else:
try:
os.remove('{}.{}'.format(fname, random_uuid))
except OSError:
pass
finally:
warnings.warn(
'File {} exists in file system so the downloaded file is deleted'.format(fname))
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning(
'File {} is downloaded but the content hash does not match.'
' The repo may be outdated or download may be incomplete. '
'If the "repo_url" is overridden, consider switching to '
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
print('download failed due to {}, retrying, {} attempt{} left'
.format(repr(e), retries, 's' if retries > 1 else ''))
return fname
def _get_repo_url():
"""Return the base URL for Gluon dataset and model repository."""
default_repo = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'
repo_url = os.environ.get('MXNET_GLUON_REPO', default_repo)
if repo_url[-1] != '/':
repo_url = repo_url+'/'
return repo_url
def _get_repo_file_url(namespace, filename):
"""Return the URL for hosted file in Gluon repository.
Parameters
----------
namespace : str
Namespace of the file.
filename : str
Name of the file
"""
return '{base_url}{namespace}/{filename}'.format(base_url=_get_repo_url(),
namespace=namespace,
filename=filename)
def _brief_print_list(lst, limit=7):
"""Print at most `limit` elements of list."""
lst = list(lst)
if len(lst) > limit:
return _brief_print_list(lst[:limit//2], limit) + ', ..., ' + \
_brief_print_list(lst[-limit//2:], limit)
return ', '.join(["'%s'"%str(i) for i in lst])
class HookHandle(object):
"""A handle that can attach/detach a hook."""
def __init__(self):
self._hooks_dict_ref = None
self._id = None
def attach(self, hooks_dict, hook):
assert not self._hooks_dict_ref, 'The same handle cannot be attached twice.'
self._id = id(hook)
hooks_dict[self._id] = hook
self._hooks_dict_ref = weakref.ref(hooks_dict)
def detach(self):
hooks_dict = self._hooks_dict_ref()
if hooks_dict is not None and self._id in hooks_dict:
del hooks_dict[self._id]
def __getstate__(self):
return (self._hooks_dict_ref(), self._id)
def __setstate__(self, state):
if state[0] is None:
self._hooks_dict_ref = weakref.ref(collections.OrderedDict())
else:
self._hooks_dict_ref = weakref.ref(state[0])
self._id = state[1]
def __enter__(self):
return self
def __exit__(self, ptype, value, trace):
self.detach()
def shape_is_known(shape):
"""Check whether a shape is completely known with or without np semantics.
Please see the doc of is_np_shape for more details.
"""
if shape is None:
return False
unknown_dim_size = -1 if is_np_shape() else 0
if len(shape) == 0:
return unknown_dim_size == -1
for dim_size in shape:
if dim_size == unknown_dim_size:
return False
assert dim_size > unknown_dim_size, "shape dimension size cannot be less than {}, while " \
"received {}".format(unknown_dim_size, dim_size)
return True
def _check_same_symbol_type(symbols):
"""Check whether all the symbols in the list are of the same type.
Raise type error if the types are different. Return the class of
the symbols."""
from ..symbol.numpy import _Symbol as np_symbol
from ..symbol import Symbol as nd_symbol
is_np_sym = isinstance(symbols[0], np_symbol)
for s in symbols[1:]:
if is_np_sym != isinstance(s, np_symbol):
raise TypeError('Found both classic symbol (mx.sym.Symbol) and numpy symbol '
'(mx.sym.np._Symbol) in outputs. This will prevent you from building '
'a computation graph by grouping them since different types of symbols '
'are not allowed to be grouped in Gluon to form a computation graph. '
'You will need to convert them to the same type of symbols, either '
'classic or numpy following this rule: if you want numpy ndarray '
'output(s) from the computation graph, please convert all the classic '
'symbols in the list to numpy symbols by calling `as_np_ndarray()` '
'on each of them; if you want classic ndarray output(s) from the '
'computation graph, please convert all the numpy symbols in the list '
'to classic symbols by calling `as_nd_ndarray()` on each of them.')
return np_symbol if is_np_sym else nd_symbol
def _check_all_np_ndarrays(out):
"""Check if ndarrays/symbols in out are all np.ndarray/np._Symbol."""
from ..numpy import ndarray as np_ndarray
from ..symbol.numpy import _Symbol as np_symbol
from ..symbol import Symbol as nd_symbol
from ..ndarray import NDArray as nd_ndarray
# pylint: disable=no-else-raise
if isinstance(out, (nd_ndarray, nd_symbol)) and not isinstance(out, (np_ndarray, np_symbol)):
raise TypeError("Block's output ndarrays/symbols must be of type `mxnet.numpy.ndarray`"
" or `mxnet.symbol.numpy._Symbol`, while got output type {}"
.format(str(type(out))))
elif isinstance(out, (list, tuple)):
for i in out:
_check_all_np_ndarrays(i)
# pylint: enable=no-else-raise
| 37.086777 | 110 | 0.6039 |
90cf041d5eb01ba382431175556e212e1196d214 | 34,574 | py | Python | tests/unit/test_mock.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | tests/unit/test_mock.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/test_mock.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # -*- coding: utf-8 -*-
'''
Tests for our mock_open helper
'''
# Import Python Libs
from __future__ import absolute_import, unicode_literals, print_function
import errno
import logging
import textwrap
# Import Salt libs
import salt.utils.data
import salt.utils.files
import salt.utils.stringutils
from salt.ext import six
# Import Salt Testing Libs
from tests.support.mock import patch, mock_open
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class MockOpenMixin(object):
def _get_values(self, binary=False, multifile=False, split=False):
if split:
questions = (self.questions_bytes_lines if binary
else self.questions_str_lines)
answers = (self.answers_bytes_lines if binary
else self.answers_str_lines)
else:
questions = self.questions_bytes if binary else self.questions_str
answers = self.answers_bytes if binary else self.answers_str
mode = 'rb' if binary else 'r'
if multifile:
read_data = self.contents_bytes if binary else self.contents
else:
read_data = self.questions_bytes if binary else self.questions
return questions, answers, mode, read_data
def _test_read(self, binary=False, multifile=False):
questions, answers, mode, read_data = \
self._get_values(binary=binary, multifile=multifile)
with patch('salt.utils.files.fopen', mock_open(read_data=read_data)):
with salt.utils.files.fopen('foo.txt', mode) as self.fh:
result = self.fh.read()
assert result == questions, result
if multifile:
with salt.utils.files.fopen('bar.txt', mode) as self.fh2:
result = self.fh2.read()
assert result == answers, result
with salt.utils.files.fopen('baz.txt', mode) as self.fh3:
result = self.fh3.read()
assert result == answers, result
try:
with salt.utils.files.fopen('helloworld.txt'):
raise Exception('No patterns should have matched')
except IOError:
# An IOError is expected here
pass
def _test_read_explicit_size(self, binary=False, multifile=False):
questions, answers, mode, read_data = \
self._get_values(binary=binary, multifile=multifile)
with patch('salt.utils.files.fopen', mock_open(read_data=read_data)):
with salt.utils.files.fopen('foo.txt', mode) as self.fh:
# Read 10 bytes
result = self.fh.read(10)
assert result == questions[:10], result
# Read another 10 bytes
result = self.fh.read(10)
assert result == questions[10:20], result
# Read the rest
result = self.fh.read()
assert result == questions[20:], result
if multifile:
with salt.utils.files.fopen('bar.txt', mode) as self.fh2:
# Read 10 bytes
result = self.fh2.read(10)
assert result == answers[:10], result
# Read another 10 bytes
result = self.fh2.read(10)
assert result == answers[10:20], result
# Read the rest
result = self.fh2.read()
assert result == answers[20:], result
with salt.utils.files.fopen('baz.txt', mode) as self.fh3:
# Read 10 bytes
result = self.fh3.read(10)
assert result == answers[:10], result
# Read another 10 bytes
result = self.fh3.read(10)
assert result == answers[10:20], result
# Read the rest
result = self.fh3.read()
assert result == answers[20:], result
try:
with salt.utils.files.fopen('helloworld.txt'):
raise Exception('No globs should have matched')
except IOError:
# An IOError is expected here
pass
def _test_read_explicit_size_larger_than_file_size(self,
binary=False,
multifile=False):
questions, answers, mode, read_data = \
self._get_values(binary=binary, multifile=multifile)
with patch('salt.utils.files.fopen', mock_open(read_data=read_data)):
with salt.utils.files.fopen('foo.txt', mode) as self.fh:
result = self.fh.read(999999)
assert result == questions, result
if multifile:
with salt.utils.files.fopen('bar.txt', mode) as self.fh2:
result = self.fh2.read(999999)
assert result == answers, result
with salt.utils.files.fopen('baz.txt', mode) as self.fh3:
result = self.fh3.read(999999)
assert result == answers, result
try:
with salt.utils.files.fopen('helloworld.txt'):
raise Exception('No globs should have matched')
except IOError:
# An IOError is expected here
pass
def _test_read_for_loop(self, binary=False, multifile=False):
questions, answers, mode, read_data = \
self._get_values(binary=binary, multifile=multifile, split=True)
with patch('salt.utils.files.fopen', mock_open(read_data=read_data)):
with salt.utils.files.fopen('foo.txt', mode) as self.fh:
index = 0
for line in self.fh:
assert line == questions[index], \
'Line {0}: {1}'.format(index, line)
index += 1
if multifile:
with salt.utils.files.fopen('bar.txt', mode) as self.fh2:
index = 0
for line in self.fh2:
assert line == answers[index], \
'Line {0}: {1}'.format(index, line)
index += 1
with salt.utils.files.fopen('baz.txt', mode) as self.fh3:
index = 0
for line in self.fh3:
assert line == answers[index], \
'Line {0}: {1}'.format(index, line)
index += 1
try:
with salt.utils.files.fopen('helloworld.txt'):
raise Exception('No globs should have matched')
except IOError:
# An IOError is expected here
pass
def _test_read_readline(self, binary=False, multifile=False):
questions, answers, mode, read_data = \
self._get_values(binary=binary, multifile=multifile, split=True)
with patch('salt.utils.files.fopen', mock_open(read_data=read_data)):
with salt.utils.files.fopen('foo.txt', mode) as self.fh:
size = 8
result = self.fh.read(size)
assert result == questions[0][:size], result
# Use .readline() to read the remainder of the line
result = self.fh.readline()
assert result == questions[0][size:], result
# Read and check the other two lines
result = self.fh.readline()
assert result == questions[1], result
result = self.fh.readline()
assert result == questions[2], result
if multifile:
with salt.utils.files.fopen('bar.txt', mode) as self.fh2:
size = 20
result = self.fh2.read(size)
assert result == answers[0][:size], result
# Use .readline() to read the remainder of the line
result = self.fh2.readline()
assert result == answers[0][size:], result
# Read and check the other two lines
result = self.fh2.readline()
assert result == answers[1], result
result = self.fh2.readline()
assert result == answers[2], result
with salt.utils.files.fopen('baz.txt', mode) as self.fh3:
size = 20
result = self.fh3.read(size)
assert result == answers[0][:size], result
# Use .readline() to read the remainder of the line
result = self.fh3.readline()
assert result == answers[0][size:], result
# Read and check the other two lines
result = self.fh3.readline()
assert result == answers[1], result
result = self.fh3.readline()
assert result == answers[2], result
try:
with salt.utils.files.fopen('helloworld.txt'):
raise Exception('No globs should have matched')
except IOError:
# An IOError is expected here
pass
def _test_readline_readlines(self, binary=False, multifile=False):
questions, answers, mode, read_data = \
self._get_values(binary=binary, multifile=multifile, split=True)
with patch('salt.utils.files.fopen', mock_open(read_data=read_data)):
with salt.utils.files.fopen('foo.txt', mode) as self.fh:
# Read the first line
result = self.fh.readline()
assert result == questions[0], result
# Use .readlines() to read the remainder of the file
result = self.fh.readlines()
assert result == questions[1:], result
if multifile:
with salt.utils.files.fopen('bar.txt', mode) as self.fh2:
# Read the first line
result = self.fh2.readline()
assert result == answers[0], result
# Use .readlines() to read the remainder of the file
result = self.fh2.readlines()
assert result == answers[1:], result
with salt.utils.files.fopen('baz.txt', mode) as self.fh3:
# Read the first line
result = self.fh3.readline()
assert result == answers[0], result
# Use .readlines() to read the remainder of the file
result = self.fh3.readlines()
assert result == answers[1:], result
try:
with salt.utils.files.fopen('helloworld.txt'):
raise Exception('No globs should have matched')
except IOError:
# An IOError is expected here
pass
def _test_readlines_multifile(self, binary=False, multifile=False):
questions, answers, mode, read_data = \
self._get_values(binary=binary, multifile=multifile, split=True)
with patch('salt.utils.files.fopen', mock_open(read_data=read_data)):
with salt.utils.files.fopen('foo.txt', mode) as self.fh:
result = self.fh.readlines()
assert result == questions, result
if multifile:
with salt.utils.files.fopen('bar.txt', mode) as self.fh2:
result = self.fh2.readlines()
assert result == answers, result
with salt.utils.files.fopen('baz.txt', mode) as self.fh3:
result = self.fh3.readlines()
assert result == answers, result
try:
with salt.utils.files.fopen('helloworld.txt'):
raise Exception('No globs should have matched')
except IOError:
# An IOError is expected here
pass
class MockOpenTestCase(TestCase, MockOpenMixin):
'''
Tests for our mock_open helper to ensure that it behaves as closely as
possible to a real filehandle.
'''
# Cyrllic characters used to test unicode handling
questions = textwrap.dedent('''\
Шнат is your name?
Шнат is your quest?
Шнат is the airspeed velocity of an unladen swallow?
''')
answers = textwrap.dedent('''\
It is Аятнця, King of the Britons.
To seek тне Holy Grail.
Шнат do you mean? An African or European swallow?
''')
@classmethod
def setUpClass(cls):
cls.questions_lines = cls.questions.splitlines(True)
cls.answers_lines = cls.answers.splitlines(True)
cls.questions_str = salt.utils.stringutils.to_str(cls.questions)
cls.answers_str = salt.utils.stringutils.to_str(cls.answers)
cls.questions_str_lines = cls.questions_str.splitlines(True)
cls.answers_str_lines = cls.answers_str.splitlines(True)
cls.questions_bytes = salt.utils.stringutils.to_bytes(cls.questions)
cls.answers_bytes = salt.utils.stringutils.to_bytes(cls.answers)
cls.questions_bytes_lines = cls.questions_bytes.splitlines(True)
cls.answers_bytes_lines = cls.answers_bytes.splitlines(True)
# When this is used as the read_data, Python 2 should normalize
# cls.questions and cls.answers to str types.
cls.contents = {'foo.txt': cls.questions,
'b*.txt': cls.answers}
cls.contents_bytes = {'foo.txt': cls.questions_bytes,
'b*.txt': cls.answers_bytes}
cls.read_data_as_list = [
'foo', 'bar', 'спам',
IOError(errno.EACCES, 'Permission denied')
]
cls.normalized_read_data_as_list = salt.utils.data.decode(
cls.read_data_as_list,
to_str=True
)
cls.read_data_as_list_bytes = salt.utils.data.encode(cls.read_data_as_list)
def tearDown(self):
'''
Each test should read the entire contents of the mocked filehandle(s).
This confirms that the other read functions return empty strings/lists,
to simulate being at EOF.
'''
for handle_name in ('fh', 'fh2', 'fh3'):
try:
fh = getattr(self, handle_name)
except AttributeError:
continue
log.debug('Running tearDown tests for self.%s', handle_name)
try:
result = fh.read(5)
assert not result, result
result = fh.read()
assert not result, result
result = fh.readline()
assert not result, result
result = fh.readlines()
assert not result, result
# Last but not least, try to read using a for loop. This should not
# read anything as we should hit EOF immediately, before the generator
# in the mocked filehandle has a chance to yield anything. So the
# exception will only be raised if we aren't at EOF already.
for line in fh:
raise Exception(
'Instead of EOF, read the following from {0}: {1}'.format(
handle_name,
line
)
)
except IOError as exc:
if six.text_type(exc) != 'File not open for reading':
raise
del fh
def test_read(self):
'''
Test reading the entire file
'''
self._test_read(binary=False, multifile=False)
self._test_read(binary=True, multifile=False)
self._test_read(binary=False, multifile=True)
self._test_read(binary=True, multifile=True)
def test_read_explicit_size(self):
'''
Test reading with explicit sizes
'''
self._test_read_explicit_size(binary=False, multifile=False)
self._test_read_explicit_size(binary=True, multifile=False)
self._test_read_explicit_size(binary=False, multifile=True)
self._test_read_explicit_size(binary=True, multifile=True)
def test_read_explicit_size_larger_than_file_size(self):
'''
Test reading with an explicit size larger than the size of read_data.
This ensures that we just return the contents up until EOF and that we
don't raise any errors due to the desired size being larger than the
mocked file's size.
'''
self._test_read_explicit_size_larger_than_file_size(
binary=False, multifile=False)
self._test_read_explicit_size_larger_than_file_size(
binary=True, multifile=False)
self._test_read_explicit_size_larger_than_file_size(
binary=False, multifile=True)
self._test_read_explicit_size_larger_than_file_size(
binary=True, multifile=True)
def test_read_for_loop(self):
'''
Test reading the contents of the file line by line in a for loop
'''
self._test_read_for_loop(binary=False, multifile=False)
self._test_read_for_loop(binary=True, multifile=False)
self._test_read_for_loop(binary=False, multifile=True)
self._test_read_for_loop(binary=True, multifile=True)
def test_read_readline(self):
'''
Test reading part of a line using .read(), then reading the rest of the
line (and subsequent lines) using .readline().
'''
self._test_read_readline(binary=False, multifile=False)
self._test_read_readline(binary=True, multifile=False)
self._test_read_readline(binary=False, multifile=True)
self._test_read_readline(binary=True, multifile=True)
def test_readline_readlines(self):
'''
Test reading the first line using .readline(), then reading the rest of
the file using .readlines().
'''
self._test_readline_readlines(binary=False, multifile=False)
self._test_readline_readlines(binary=True, multifile=False)
self._test_readline_readlines(binary=False, multifile=True)
self._test_readline_readlines(binary=True, multifile=True)
def test_readlines(self):
'''
Test reading the entire file using .readlines
'''
self._test_readlines_multifile(binary=False, multifile=False)
self._test_readlines_multifile(binary=True, multifile=False)
self._test_readlines_multifile(binary=False, multifile=True)
self._test_readlines_multifile(binary=True, multifile=True)
def test_read_data_converted_to_dict(self):
'''
Test that a non-dict value for read_data is converted to a dict mapping
'*' to that value.
'''
contents = 'спам'
normalized = salt.utils.stringutils.to_str(contents)
with patch('salt.utils.files.fopen',
mock_open(read_data=contents)) as m_open:
assert m_open.read_data == {'*': normalized}, m_open.read_data
with patch('salt.utils.files.fopen',
mock_open(read_data=self.read_data_as_list)) as m_open:
assert m_open.read_data == {
'*': self.normalized_read_data_as_list,
}, m_open.read_data
def test_read_data_list(self):
'''
Test read_data when it is a list
'''
with patch('salt.utils.files.fopen',
mock_open(read_data=self.read_data_as_list)):
for value in self.normalized_read_data_as_list:
try:
with salt.utils.files.fopen('foo.txt') as self.fh:
result = self.fh.read()
assert result == value, result
except IOError:
# Only raise the caught exception if it wasn't expected
# (i.e. if value is not an exception)
if not isinstance(value, IOError):
raise
def test_read_data_list_bytes(self):
'''
Test read_data when it is a list and the value is a bytestring
'''
with patch('salt.utils.files.fopen',
mock_open(read_data=self.read_data_as_list_bytes)):
for value in self.read_data_as_list_bytes:
try:
with salt.utils.files.fopen('foo.txt', 'rb') as self.fh:
result = self.fh.read()
assert result == value, result
except IOError:
# Only raise the caught exception if it wasn't expected
# (i.e. if value is not an exception)
if not isinstance(value, IOError):
raise
def test_tell(self):
'''
Test the implementation of tell
'''
with patch('salt.utils.files.fopen',
mock_open(read_data=self.contents)):
# Try with reading explicit sizes and then reading the rest of the
# file.
with salt.utils.files.fopen('foo.txt') as self.fh:
self.fh.read(5)
loc = self.fh.tell()
assert loc == 5, loc
self.fh.read(12)
loc = self.fh.tell()
assert loc == 17, loc
self.fh.read()
loc = self.fh.tell()
assert loc == len(self.questions_str), loc
# Try reading way more content then actually exists in the file,
# tell() should return a value equal to the length of the content
with salt.utils.files.fopen('foo.txt') as self.fh:
self.fh.read(999999)
loc = self.fh.tell()
assert loc == len(self.questions_str), loc
# Try reading a few bytes using .read(), then the rest of the line
# using .readline(), then the rest of the file using .readlines(),
# and check the location after each read.
with salt.utils.files.fopen('foo.txt') as self.fh:
# Read a few bytes
self.fh.read(5)
loc = self.fh.tell()
assert loc == 5, loc
# Read the rest of the line. Location should then be at the end
# of the first line.
self.fh.readline()
loc = self.fh.tell()
assert loc == len(self.questions_str_lines[0]), loc
# Read the rest of the file using .readlines()
self.fh.readlines()
loc = self.fh.tell()
assert loc == len(self.questions_str), loc
# Check location while iterating through the filehandle
with salt.utils.files.fopen('foo.txt') as self.fh:
index = 0
for _ in self.fh:
index += 1
loc = self.fh.tell()
assert loc == sum(
len(x) for x in self.questions_str_lines[:index]
), loc
def test_write(self):
'''
Test writing to a filehandle using .write()
'''
# Test opening for non-binary writing
with patch('salt.utils.files.fopen', mock_open()):
with salt.utils.files.fopen('foo.txt', 'w') as self.fh:
for line in self.questions_str_lines:
self.fh.write(line)
assert self.fh.write_calls == self.questions_str_lines, self.fh.write_calls
# Test opening for binary writing using "wb"
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'wb') as self.fh:
for line in self.questions_bytes_lines:
self.fh.write(line)
assert self.fh.write_calls == self.questions_bytes_lines, self.fh.write_calls
# Test opening for binary writing using "ab"
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'ab') as self.fh:
for line in self.questions_bytes_lines:
self.fh.write(line)
assert self.fh.write_calls == self.questions_bytes_lines, self.fh.write_calls
# Test opening for read-and-write using "r+b"
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'r+b') as self.fh:
for line in self.questions_bytes_lines:
self.fh.write(line)
assert self.fh.write_calls == self.questions_bytes_lines, self.fh.write_calls
# Test trying to write str types to a binary filehandle
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'wb') as self.fh:
try:
self.fh.write('foo\n')
except TypeError:
# This exception is expected on Python 3
if not six.PY3:
raise
else:
# This write should work fine on Python 2
if six.PY3:
raise Exception(
'Should not have been able to write a str to a '
'binary filehandle'
)
if six.PY2:
# Try with non-ascii unicode. Note that the write above
# should work because the mocked filehandle should attempt
# a .encode() to convert it to a str type. But when writing
# a string with non-ascii unicode, it should raise a
# UnicodeEncodeError, which is what we are testing here.
try:
self.fh.write(self.questions)
except UnicodeEncodeError:
pass
else:
raise Exception(
'Should not have been able to write non-ascii '
'unicode to a binary filehandle'
)
# Test trying to write bytestrings to a non-binary filehandle
with patch('salt.utils.files.fopen', mock_open()):
with salt.utils.files.fopen('foo.txt', 'w') as self.fh:
try:
self.fh.write(b'foo\n')
except TypeError:
# This exception is expected on Python 3
if not six.PY3:
raise
else:
# This write should work fine on Python 2
if six.PY3:
raise Exception(
'Should not have been able to write a bytestring '
'to a non-binary filehandle'
)
if six.PY2:
# Try with non-ascii unicode. Note that the write above
# should work because the mocked filehandle should attempt
# a .encode() to convert it to a str type. But when writing
# a string with non-ascii unicode, it should raise a
# UnicodeEncodeError, which is what we are testing here.
try:
self.fh.write(self.questions)
except UnicodeEncodeError:
pass
else:
raise Exception(
'Should not have been able to write non-ascii '
'unicode to a binary filehandle'
)
def test_writelines(self):
'''
Test writing to a filehandle using .writelines()
'''
# Test opening for non-binary writing
with patch('salt.utils.files.fopen', mock_open()):
with salt.utils.files.fopen('foo.txt', 'w') as self.fh:
self.fh.writelines(self.questions_str_lines)
assert self.fh.writelines_calls == [self.questions_str_lines], self.fh.writelines_calls
# Test opening for binary writing using "wb"
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'wb') as self.fh:
self.fh.writelines(self.questions_bytes_lines)
assert self.fh.writelines_calls == [self.questions_bytes_lines], self.fh.writelines_calls
# Test opening for binary writing using "ab"
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'ab') as self.fh:
self.fh.writelines(self.questions_bytes_lines)
assert self.fh.writelines_calls == [self.questions_bytes_lines], self.fh.writelines_calls
# Test opening for read-and-write using "r+b"
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'r+b') as self.fh:
self.fh.writelines(self.questions_bytes_lines)
assert self.fh.writelines_calls == [self.questions_bytes_lines], self.fh.writelines_calls
# Test trying to write str types to a binary filehandle
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
with salt.utils.files.fopen('foo.txt', 'wb') as self.fh:
try:
self.fh.writelines(['foo\n'])
except TypeError:
# This exception is expected on Python 3
if not six.PY3:
raise
else:
# This write should work fine on Python 2
if six.PY3:
raise Exception(
'Should not have been able to write a str to a '
'binary filehandle'
)
if six.PY2:
# Try with non-ascii unicode. Note that the write above
# should work because the mocked filehandle should attempt
# a .encode() to convert it to a str type. But when writing
# a string with non-ascii unicode, it should raise a
# UnicodeEncodeError, which is what we are testing here.
try:
self.fh.writelines(self.questions_lines)
except UnicodeEncodeError:
pass
else:
raise Exception(
'Should not have been able to write non-ascii '
'unicode to a binary filehandle'
)
# Test trying to write bytestrings to a non-binary filehandle
with patch('salt.utils.files.fopen', mock_open()):
with salt.utils.files.fopen('foo.txt', 'w') as self.fh:
try:
self.fh.write([b'foo\n'])
except TypeError:
# This exception is expected on Python 3
if not six.PY3:
raise
else:
# This write should work fine on Python 2
if six.PY3:
raise Exception(
'Should not have been able to write a bytestring '
'to a non-binary filehandle'
)
if six.PY2:
# Try with non-ascii unicode. Note that the write above
# should work because the mocked filehandle should attempt
# a .encode() to convert it to a str type. But when writing
# a string with non-ascii unicode, it should raise a
# UnicodeEncodeError, which is what we are testing here.
try:
self.fh.writelines(self.questions_lines)
except UnicodeEncodeError:
pass
else:
raise Exception(
'Should not have been able to write non-ascii '
'unicode to a binary filehandle'
)
def test_open(self):
'''
Test that opening a file for binary reading with string read_data
fails, and that the same thing happens for non-binary filehandles and
bytestring read_data.
NOTE: This test should always pass on PY2 since MockOpen will normalize
unicode types to str types.
'''
try:
with patch('salt.utils.files.fopen', mock_open()):
try:
with salt.utils.files.fopen('foo.txt', 'rb') as self.fh:
self.fh.read()
except TypeError:
pass
else:
if six.PY3:
raise Exception(
'Should not have been able open for binary read with '
'non-bytestring read_data'
)
with patch('salt.utils.files.fopen', mock_open(read_data=b'')):
try:
with salt.utils.files.fopen('foo.txt', 'r') as self.fh2:
self.fh2.read()
except TypeError:
pass
else:
if six.PY3:
raise Exception(
'Should not have been able open for non-binary read '
'with bytestring read_data'
)
finally:
# Make sure we destroy the filehandles before the teardown, as they
# will also try to read and this will generate another exception
delattr(self, 'fh')
delattr(self, 'fh2')
| 44.043312 | 105 | 0.532394 |
07095e6e79ac5bc62216bd46213bc175b6f61b92 | 808 | py | Python | server/setup.py | hustrlee/opencv-python-getting-started | b33f155a1905d793b40bbd18aa725bce8ab897f9 | [
"MIT"
] | null | null | null | server/setup.py | hustrlee/opencv-python-getting-started | b33f155a1905d793b40bbd18aa725bce8ab897f9 | [
"MIT"
] | null | null | null | server/setup.py | hustrlee/opencv-python-getting-started | b33f155a1905d793b40bbd18aa725bce8ab897f9 | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
from setuptools import setup, find_packages
NAME = "bill_segmentation"
VERSION = "0.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"connexion>=2.0.2",
"swagger-ui-bundle>=0.0.2",
"python_dateutil>=2.6.0"
]
setup(
name=NAME,
version=VERSION,
description="票据分割 API",
author_email="",
url="",
keywords=["OpenAPI", "票据分割 API"],
install_requires=REQUIRES,
packages=find_packages(),
package_data={'': ['openapi/openapi.yaml']},
include_package_data=True,
entry_points={
'console_scripts': ['bill_segmentation=bill_segmentation.__main__:main']},
long_description="""\
按照票据模版,归一化并自动分割医疗票据
"""
)
| 20.717949 | 82 | 0.667079 |
f4af4b0a59fd33fc5f44131cd8480c1c56b3561c | 4,635 | py | Python | tools/api/androapi_format.py | tkgwJeff/androguard | bf653d6340a05023eb7d87a70b9c7ae6327eb3cc | [
"Apache-2.0"
] | 12 | 2016-12-26T12:40:42.000Z | 2022-01-04T23:18:12.000Z | tools/api/androapi_format.py | dweinstein/androguard | 7dd2791212e815e869a42541655a1f20cc548665 | [
"Apache-2.0"
] | null | null | null | tools/api/androapi_format.py | dweinstein/androguard | 7dd2791212e815e869a42541655a1f20cc548665 | [
"Apache-2.0"
] | 6 | 2017-03-20T02:03:17.000Z | 2022-01-13T22:36:22.000Z | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2010, Anthony Desnos <desnos at t0t0.org>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, re, string
from dvm_permissions_unformatted import PERMISSIONS
from permissions_by_hand import PERMISSIONS_BY_HAND
BASIC_TYPES = {
"byte": "B",
"char": "C",
"double": "D",
"float": "F",
"int": "I",
"long": "J",
"short": "S",
"boolean": "B",
"void": "V",
}
ADVANCED_TYPES = {
"String": "Ljava/lang/String;",
"List": "Ljava/util/List;",
"AccountManagerFuture": "Landroid/accounts/AccountManagerFuture;",
"CellLocation": "Landroid/telephony/CellLocation;",
"Uri": "Landroid/net/Uri;",
"Cursor": "Landroid/database/Cursor;",
"Set": "Ljava/util/Set;",
"BluetoothServerSocket": "Landroid/bluetooth/BluetoothServerSocket;",
"BluetoothSocket": "Landroid/bluetooth/BluetoothSocket;",
"DownloadManager.Request": "Landroid/app/DownloadManager/Request;",
"PendingIntent": "Landroid/app/PendingIntent;",
"SmsManager": "Landroid/telephony/SmsManager;",
"Bitmap": "Landroid/graphics/Bitmap;",
"IBinder": "Landroid/os/IBinder;",
}
def translateDescParams(desc_params):
desc_params = desc_params.replace(" ", "")
buff = ""
for elem in desc_params.split(","):
if elem != "":
tab = ""
if "[" in elem:
tab = "[" * string.count(elem, "[")
elem = elem[:tab.find("[") - 2]
if elem not in BASIC_TYPES:
if elem in ADVANCED_TYPES:
buff += tab + ADVANCED_TYPES[elem] + " "
else:
buff += tab + "L" + elem.replace(".", "/") + "; "
else:
buff += tab + BASIC_TYPES[elem] + " "
buff = buff[:-1]
return buff
def translateDescReturn(desc_return):
buff = ""
for elem in desc_return.split(" "):
tab = ""
if "[" in elem:
tab = "[" * string.count(elem, "[")
elem = elem[:tab.find("[") - 2]
if elem in BASIC_TYPES:
buff += tab + BASIC_TYPES[elem] + " "
else:
if elem in ADVANCED_TYPES:
buff += tab + ADVANCED_TYPES[elem] + " "
else:
if "." in elem:
buff += tab + "L" + elem.replace(".", "/") + "; "
buff = buff[:-1]
return buff
def translateToCLASS(desc_params, desc_return):
print desc_params, desc_return,
buff = "(" + translateDescParams(desc_params[desc_params.find(
"(") + 1:-1]) + ")" + translateDescReturn(desc_return)
print "----->", buff
return [desc_params[:desc_params.find("(")], buff]
def translateToCLASS2(constant_name, desc_return):
return [constant_name, translateDescReturn(desc_return)]
PERMISSIONS.update(PERMISSIONS_BY_HAND)
for perm in PERMISSIONS:
for package in PERMISSIONS[perm]:
for element in PERMISSIONS[perm][package]:
if element[0] == "F":
element.extend(translateToCLASS(element[1], element[2]))
elif element[0] == "C":
element.extend(translateToCLASS2(element[1], element[2]))
with open("./core/bytecodes/api_permissions.py", "w") as fd:
fd.write("DVM_PERMISSIONS_BY_PERMISSION = {\n")
for perm in PERMISSIONS:
fd.write("\"%s\" : {\n" % perm)
for package in PERMISSIONS[perm]:
fd.write("\t\"L%s;\" : [\n" % package.replace(".", "/"))
for element in PERMISSIONS[perm][package]:
fd.write("\t\t(\"%s\", \"%s\", \"%s\"),\n" %
(element[0], element[-2], element[-1]))
fd.write("\t],\n")
fd.write("},\n")
fd.write("}\n\n")
fd.write("DVM_PERMISSIONS_BY_ELEMENT = { \n")
for perm in PERMISSIONS:
for package in PERMISSIONS[perm]:
for element in PERMISSIONS[perm][package]:
fd.write("\t\"L%s;-%s-%s\" : \"%s\",\n" % (package.replace(
".", "/"), element[-2], element[-1], perm))
fd.write("}\n")
| 31.317568 | 75 | 0.575836 |
88ba111b4cbaba38bc64a1ecf4eb7f1374c1f0fc | 867 | py | Python | tests/test_modules/test_builtin/test_choicepart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_builtin/test_choicepart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_builtin/test_choicepart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | import unittest
from malcolm.core import call_with_params
from malcolm.modules.builtin.parts import ChoicePart
class TestChoicePart(unittest.TestCase):
def setUp(self):
self.o = call_with_params(
ChoicePart, name="cp", description="desc", choices=["a", "b"],
initialValue="a", writeable=True)
self.setter = list(self.o.create_attribute_models())[0][2]
def test_init(self):
assert self.o.name == "cp"
assert self.o.attr.value == "a"
assert self.o.attr.meta.description == "desc"
assert self.o.attr.meta.choices == ("a", "b")
assert self.o.attr.meta.tags == ("config",)
def test_setter(self):
assert self.o.attr.value == "a"
self.setter("b")
assert self.o.attr.value == "b"
with self.assertRaises(ValueError):
self.setter("c")
| 30.964286 | 74 | 0.61361 |
6ea24326db0fdae49b10d11cc9d5d7cf067d3192 | 294 | py | Python | pdfkit/__init__.py | Noorquacker/Remarkable | ff30d5b9e2e5f262dc7603860c01faadc86c67c6 | [
"MIT"
] | 2,074 | 2016-06-30T20:38:27.000Z | 2022-03-23T22:11:47.000Z | pdfkit/__init__.py | Noorquacker/Remarkable | ff30d5b9e2e5f262dc7603860c01faadc86c67c6 | [
"MIT"
] | 357 | 2016-07-01T12:24:40.000Z | 2022-03-30T18:58:45.000Z | pdfkit/__init__.py | Noorquacker/Remarkable | ff30d5b9e2e5f262dc7603860c01faadc86c67c6 | [
"MIT"
] | 323 | 2016-07-01T13:26:08.000Z | 2022-03-17T02:16:48.000Z | # -*- coding: utf-8 -*-
"""
Wkhtmltopdf python wrapper to convert html to pdf using the webkit rendering engine and qt
"""
__author__ = 'Golovanov Stanislav'
__version__ = '0.4.1'
__license__ = 'MIT'
from .pdfkit import PDFKit
from .api import from_url, from_file, from_string, configuration
| 24.5 | 90 | 0.741497 |
953461f76b8edbcab57f40b9742c03c4f165252f | 10,235 | py | Python | src/toil/test/sort/sort.py | david4096/toil | 491e3cceafc2462395bb83ce759da7b008bb27f3 | [
"Apache-2.0"
] | null | null | null | src/toil/test/sort/sort.py | david4096/toil | 491e3cceafc2462395bb83ce759da7b008bb27f3 | [
"Apache-2.0"
] | 1 | 2017-10-28T00:39:00.000Z | 2017-10-28T00:39:00.000Z | src/toil/test/sort/sort.py | david4096/toil | 491e3cceafc2462395bb83ce759da7b008bb27f3 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demonstration of toil. Sorts the lines of a file into ascending order by doing a parallel merge sort.
"""
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
from argparse import ArgumentParser
import os
import random
import logging
import shutil
from toil.common import Toil
from toil.job import Job
defaultLines = 1000
defaultLineLen = 50
sortMemory = '1000M'
def setup(job, inputFile, N, downCheckpoints, options):
"""
Sets up the sort.
Returns the FileID of the sorted file
"""
job.fileStore.logToMaster("Starting the merge sort")
return job.addChildJobFn(down,
inputFile, N,
downCheckpoints,
options = options,
memory='1000M').rv()
def down(job, inputFileStoreID, N, downCheckpoints, options, memory=sortMemory):
"""
Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results else
the file is sorted and placed in the output.
"""
# Read the file
inputFile = job.fileStore.readGlobalFile(inputFileStoreID, cache=False)
length = os.path.getsize(inputFile)
if length > N:
# We will subdivide the file
job.fileStore.logToMaster("Splitting file: %s of size: %s"
% (inputFileStoreID, length), level=logging.CRITICAL)
# Split the file into two copies
midPoint = getMidPoint(inputFile, 0, length)
t1 = job.fileStore.getLocalTempFile()
with open(t1, 'w') as fH:
copySubRangeOfFile(inputFile, 0, midPoint+1, fH)
t2 = job.fileStore.getLocalTempFile()
with open(t2, 'w') as fH:
copySubRangeOfFile(inputFile, midPoint+1, length, fH)
# Call down recursively. By giving the rv() of the two jobs as inputs to the follow-on job, up,
# we communicate the dependency without hindering concurrency.
return job.addFollowOnJobFn(up,
job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), N, downCheckpoints,
checkpoint=downCheckpoints, options=options, memory=options.sortMemory).rv(),
job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), N, downCheckpoints,
checkpoint=downCheckpoints, options=options, memory=options.mergeMemory).rv(), options=options, memory=options.sortMemory).rv()
else:
# We can sort this bit of the file
job.fileStore.logToMaster("Sorting file: %s of size: %s"
% (inputFileStoreID, length), level=logging.CRITICAL)
# Sort the copy and write back to the fileStore
shutil.copyfile(inputFile, inputFile + '.sort')
sort(inputFile + '.sort')
return job.fileStore.writeGlobalFile(inputFile + '.sort')
def up(job, inputFileID1, inputFileID2, options, memory=sortMemory):
"""
Merges the two files and places them in the output.
"""
with job.fileStore.writeGlobalFileStream() as (fileHandle, outputFileStoreID):
with job.fileStore.readGlobalFileStream(inputFileID1) as inputFileHandle1:
with job.fileStore.readGlobalFileStream(inputFileID2) as inputFileHandle2:
merge(inputFileHandle1, inputFileHandle2, fileHandle)
job.fileStore.logToMaster("Merging %s and %s to %s"
% (inputFileID1, inputFileID2, outputFileStoreID))
# Cleanup up the input files - these deletes will occur after the completion is successful.
job.fileStore.deleteGlobalFile(inputFileID1)
job.fileStore.deleteGlobalFile(inputFileID2)
return outputFileStoreID
def sort(file):
"""
Sorts the given file.
"""
fileHandle = open(file, 'r')
lines = fileHandle.readlines()
fileHandle.close()
lines.sort()
fileHandle = open(file, 'w')
for line in lines:
fileHandle.write(line)
fileHandle.close()
def merge(fileHandle1, fileHandle2, outputFileHandle):
"""
Merges together two files maintaining sorted order.
"""
line2 = fileHandle2.readline()
for line1 in fileHandle1.readlines():
while line2 != '' and line2 <= line1:
outputFileHandle.write(line2)
line2 = fileHandle2.readline()
outputFileHandle.write(line1)
while line2 != '':
outputFileHandle.write(line2)
line2 = fileHandle2.readline()
def copySubRangeOfFile(inputFile, fileStart, fileEnd, outputFileHandle):
"""
Copies the range (in bytes) between fileStart and fileEnd to the given
output file handle.
"""
with open(inputFile, 'r') as fileHandle:
fileHandle.seek(fileStart)
data = fileHandle.read(fileEnd - fileStart)
assert len(data) == fileEnd - fileStart
outputFileHandle.write(data)
def getMidPoint(file, fileStart, fileEnd):
"""
Finds the point in the file to split.
Returns an int i such that fileStart <= i < fileEnd
"""
fileHandle = open(file, 'r')
midPoint = old_div((fileStart + fileEnd), 2)
assert midPoint >= fileStart
fileHandle.seek(midPoint)
line = fileHandle.readline()
assert len(line) >= 1
if len(line) + midPoint < fileEnd:
return midPoint + len(line) - 1
fileHandle.seek(fileStart)
line = fileHandle.readline()
assert len(line) >= 1
assert len(line) + fileStart <= fileEnd
return len(line) + fileStart - 1
def makeFileToSort(fileName, lines=defaultLines, lineLen=defaultLineLen):
with open(fileName, 'w') as fileHandle:
for _ in range(lines):
line = "".join(random.choice('actgACTGNXYZ') for _ in range(lineLen - 1)) + '\n'
fileHandle.write(line)
def main(options=None):
if not options:
# deal with command line arguments
parser = ArgumentParser()
Job.Runner.addToilOptions(parser)
parser.add_argument('--numLines', default=defaultLines, help='Number of lines in file to sort.', type=int)
parser.add_argument('--lineLength', default=defaultLineLen, help='Length of lines in file to sort.', type=int)
parser.add_argument("--fileToSort", help="The file you wish to sort")
parser.add_argument("--outputFile", help="Where the sorted output will go")
parser.add_argument("--overwriteOutput", help="Write over the output file if it already exists.", default=True)
parser.add_argument("--N", dest="N",
help="The threshold below which a serial sort function is used to sort file. "
"All lines must of length less than or equal to N or program will fail",
default=10000)
parser.add_argument('--downCheckpoints', action='store_true',
help='If this option is set, the workflow will make checkpoints on its way through'
'the recursive "down" part of the sort')
parser.add_argument("--sortMemory", dest="sortMemory",
help="Memory for jobs that sort chunks of the file.",
default=None)
parser.add_argument("--mergeMemory", dest="mergeMemory",
help="Memory for jobs that collate results.",
default=None)
options = parser.parse_args()
if not hasattr(options, "sortMemory") or not options.sortMemory:
options.sortMemory = sortMemory
if not hasattr(options, "mergeMemory") or not options.mergeMemory:
options.mergeMemory = sortMemory
# do some input verification
sortedFileName = options.outputFile or "sortedFile.txt"
if not options.overwriteOutput and os.path.exists(sortedFileName):
print("the output file {} already exists. Delete it to run the sort example again or use --overwriteOutput=True".format(sortedFileName))
exit()
fileName = options.fileToSort
if options.fileToSort is None:
# make the file ourselves
fileName = 'fileToSort.txt'
if os.path.exists(fileName):
print "Sorting existing file", fileName
else:
print 'No sort file specified. Generating one automatically called %s.' % fileName
makeFileToSort(fileName=fileName, lines=options.numLines, lineLen=options.lineLength)
else:
if not os.path.exists(options.fileToSort):
raise RuntimeError("File to sort does not exist: %s" % options.fileToSort)
if int(options.N) <= 0:
raise RuntimeError("Invalid value of N: %s" % options.N)
# Now we are ready to run
with Toil(options) as workflow:
sortedFileURL = 'file://' + os.path.abspath(sortedFileName)
if not workflow.options.restart:
sortFileURL = 'file://' + os.path.abspath(fileName)
sortFileID = workflow.importFile(sortFileURL)
sortedFileID = workflow.start(Job.wrapJobFn(setup, sortFileID, int(options.N), options.downCheckpoints, options=options,
memory=sortMemory))
else:
sortedFileID = workflow.restart()
workflow.exportFile(sortedFileID, sortedFileURL)
if __name__ == '__main__':
main()
| 42.824268 | 181 | 0.644846 |
6a4890b38fb468ae02ee0f86b54bf68880e9a51b | 957 | py | Python | setup.py | TinaTabo/SciDataTool | a1a51b104248d3e6d07006f2c3f2806b4589624e | [
"Apache-2.0"
] | null | null | null | setup.py | TinaTabo/SciDataTool | a1a51b104248d3e6d07006f2c3f2806b4589624e | [
"Apache-2.0"
] | null | null | null | setup.py | TinaTabo/SciDataTool | a1a51b104248d3e6d07006f2c3f2806b4589624e | [
"Apache-2.0"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as file:
requirements = file.readlines()
install_requires = "".join(
requirements
).splitlines() # remove endline in each element
setuptools.setup(
name="SciDataTool",
version="2.2.2",
author="Helene Toubin",
author_email="helene.toubin@eomys.com",
description="Scientific Data Tool",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Eomys/SciDataTool",
download_url="https://github.com/Eomys/SciDataTool/archive/2.2.2.tar.gz",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">=2.7",
install_requires=install_requires,
)
| 30.870968 | 77 | 0.677116 |
1783f7e2075026faffb3c8f6f23c41df171add5f | 1,494 | py | Python | bot/rasa/core/wexin_channel.py | newsettle/ns4_chatbot | 526b97aa31292c28d10518bbfaa7466b8ba109ee | [
"Apache-2.0"
] | 51 | 2019-03-29T11:47:55.000Z | 2021-04-16T02:40:35.000Z | bot/rasa/core/wexin_channel.py | piginzoo/ns4_chatbot | 526b97aa31292c28d10518bbfaa7466b8ba109ee | [
"Apache-2.0"
] | 7 | 2019-04-16T01:46:01.000Z | 2022-03-11T23:44:09.000Z | bot/rasa/core/wexin_channel.py | newsettle/ns4_chatbot | 526b97aa31292c28d10518bbfaa7466b8ba109ee | [
"Apache-2.0"
] | 20 | 2019-04-02T03:37:38.000Z | 2021-12-31T09:25:12.000Z | # -*- coding: UTF-8 -*-
from rasa_core.channels.channel import UserMessage
from rasa_core.channels.channel import InputChannel, OutputChannel
import random, time
from wxpy import *
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#当需要发送消息
class WeixinOutputChannel(OutputChannel):
def __init__(self, weixin_group = None):
self.weixin_group = weixin_group
#message就是rasa产生的utterance对话,用于发送到微信去
def send_text_message(self, recipient_id, message):
print "准备push到微信机器人的消息:%s" % message
time.sleep(3*random.random())#休息3秒以内,防止频发发往微信,防止被封号
self.weixin_group.send(message)
#用于接收到消息,用于从微信接受消息,并转发到机器人
class WeixinInputChannel(InputChannel):
def __init__(self, bot = None):
self.bot = bot
def _record_messages(self, on_message):
group_name = u'聊天机器人'
groups = self.bot.groups().search(group_name)
if groups is None and len(groups) == 0:
print("找不到聊天群:%s" % group_name)
exit()
the_group = groups[0]
print("找到目标群:%s" % the_group)
@self.bot.register(the_group, run_async=False)
def print_message(msg):
print(msg.text)
#这里和微信衔接上了,将微信的消息转发给RASA系统
on_message(UserMessage(msg.text, WeixinOutputChannel(the_group)))
def start_async_listening(self, message_queue):
self._record_messages(message_queue.enqueue)
def start_sync_listening(self, message_handler):
self._record_messages(message_handler)
| 31.125 | 77 | 0.692102 |
d42cb1fe7dbc3b2a57cd568ca0d40a5fdc0264f2 | 706 | py | Python | var/spack/repos/builtin/packages/globus-toolkit/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2021-03-19T13:12:47.000Z | 2021-03-19T13:12:47.000Z | var/spack/repos/builtin/packages/globus-toolkit/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/globus-toolkit/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class GlobusToolkit(AutotoolsPackage):
"""The Globus Toolkit is an open source software toolkit used for building
grids"""
homepage = "http://toolkit.globus.org"
url = "http://toolkit.globus.org/ftppub/gt6/installers/src/globus_toolkit-6.0.1506371041.tar.gz"
version('6.0.1506371041', 'e17146f68e03b3482aaea3874d4087a5')
version('6.0.1493989444', '9e9298b61d045e65732e12c9727ceaa8')
depends_on('pkgconfig', type='build')
depends_on('openssl')
| 33.619048 | 105 | 0.735127 |
83edb3c165ed90338233f26f92095ae4690dd2ce | 600 | py | Python | miniapp/mini_benchmarks/shared/wait_comp_benchmarks.py | qyz96/tasktorrent | 4418d83da7de657363ac99ee263602794a0b97a5 | [
"MIT"
] | 23 | 2019-09-29T19:33:29.000Z | 2022-03-25T01:48:40.000Z | miniapp/mini_benchmarks/shared/wait_comp_benchmarks.py | qyz96/tasktorrent | 4418d83da7de657363ac99ee263602794a0b97a5 | [
"MIT"
] | 3 | 2020-03-11T18:14:08.000Z | 2020-05-09T22:32:56.000Z | miniapp/mini_benchmarks/shared/wait_comp_benchmarks.py | qyz96/tasktorrent | 4418d83da7de657363ac99ee263602794a0b97a5 | [
"MIT"
] | 7 | 2019-10-22T06:40:03.000Z | 2021-12-01T08:17:39.000Z | import subprocess
import os
repeat = 25
for threads in [1, 2, 4, 8, 16]:
for time in [1e-5, 1e-4]:
tasks = round(threads * 1.0 / time)
subprocess.run(["./ttor_wait", str(threads), str(tasks), str(time), "1", str(repeat), "0"])
os.environ['OMP_NUM_THREADS'] = str(threads)
os.environ['STARPU_NCPU'] = str(threads)
subprocess.run(["./omp_wait", str(tasks), str(time), str(repeat), "0"])
subprocess.run(["./starpu_wait", str(tasks), str(time), str(repeat), "0"])
subprocess.run(["./starpu_wait_stf", str(tasks), str(time), str(repeat), "0"])
| 42.857143 | 99 | 0.596667 |
6fb86e23347856c1399992080c9fc74e11195a6f | 2,796 | py | Python | rlpyt/samplers/serial/collectors.py | tristandeleu/rlpyt | 22eccb4e2b33d3c52947a27b6d300b575e36a3ea | [
"MIT"
] | 1 | 2021-04-24T16:42:18.000Z | 2021-04-24T16:42:18.000Z | rlpyt/samplers/serial/collectors.py | tristandeleu/rlpyt | 22eccb4e2b33d3c52947a27b6d300b575e36a3ea | [
"MIT"
] | null | null | null | rlpyt/samplers/serial/collectors.py | tristandeleu/rlpyt | 22eccb4e2b33d3c52947a27b6d300b575e36a3ea | [
"MIT"
] | null | null | null |
import numpy as np
from rlpyt.samplers.collectors import BaseEvalCollector
from rlpyt.agents.base import AgentInputs
from rlpyt.utils.buffer import buffer_from_example, torchify_buffer, numpify_buffer
from rlpyt.utils.logging import logger
# For sampling, serial sampler can use Cpu collectors.
class SerialEvalCollector(BaseEvalCollector):
"""Does not record intermediate data."""
def __init__(
self,
envs,
agent,
TrajInfoCls,
max_T,
max_trajectories=None,
):
super().__init__(0, # rank
envs,
TrajInfoCls,
None, # traj_infos_queue
max_T,
agent=agent)
self.max_trajectories = max_trajectories
def collect_evaluation(self, itr):
traj_infos = [self.TrajInfoCls() for _ in self.envs]
completed_traj_infos = list()
observations = list()
for env in self.envs:
observations.append(env.reset())
observation = buffer_from_example(observations[0], len(self.envs))
for b, o in enumerate(observations):
observation[b] = o
action = buffer_from_example(self.envs[0].action_space.null_value(),
len(self.envs))
reward = np.zeros(len(self.envs), dtype="float32")
obs_pyt, act_pyt, rew_pyt = torchify_buffer((observation, action, reward))
self.agent.reset()
self.agent.eval_mode(itr)
for t in range(self.max_T):
act_pyt, agent_info = self.agent.step(obs_pyt, act_pyt, rew_pyt)
action = numpify_buffer(act_pyt)
for b, env in enumerate(self.envs):
o, r, d, env_info = env.step(action[b])
traj_infos[b].step(observation[b], action[b], r, d,
agent_info[b], env_info)
if getattr(env_info, "traj_done", d):
completed_traj_infos.append(traj_infos[b].terminate(o))
traj_infos[b] = self.TrajInfoCls()
o = env.reset()
if d:
action[b] = 0 # Prev_action for next step.
r = 0
self.agent.reset_one(idx=b)
observation[b] = o
reward[b] = r
if (self.max_trajectories is not None and
len(completed_traj_infos) >= self.max_trajectories):
logger.log("Evaluation reached max num trajectories "
f"({self.max_trajectories}).")
break
if t == self.max_T - 1:
logger.log("Evaluation reached max num time steps "
f"({self.max_T}).")
return completed_traj_infos
| 38.833333 | 83 | 0.555794 |
a86c4c29b6ca0f4b861017085a7675878a286a35 | 6,782 | py | Python | sdk/lusid/models/i_unit_definition_dto.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | sdk/lusid/models/i_unit_definition_dto.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | sdk/lusid/models/i_unit_definition_dto.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class IUnitDefinitionDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'schema': 'str',
'code': 'str',
'display_name': 'str',
'description': 'str'
}
attribute_map = {
'schema': 'schema',
'code': 'code',
'display_name': 'displayName',
'description': 'description'
}
required_map = {
'schema': 'optional',
'code': 'optional',
'display_name': 'optional',
'description': 'optional'
}
def __init__(self, schema=None, code=None, display_name=None, description=None, local_vars_configuration=None): # noqa: E501
"""IUnitDefinitionDto - a model defined in OpenAPI"
:param schema: The available values are: NoUnits, Basic, Iso4217Currency
:type schema: str
:param code:
:type code: str
:param display_name:
:type display_name: str
:param description:
:type description: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._schema = None
self._code = None
self._display_name = None
self._description = None
self.discriminator = None
if schema is not None:
self.schema = schema
self.code = code
self.display_name = display_name
self.description = description
@property
def schema(self):
"""Gets the schema of this IUnitDefinitionDto. # noqa: E501
The available values are: NoUnits, Basic, Iso4217Currency # noqa: E501
:return: The schema of this IUnitDefinitionDto. # noqa: E501
:rtype: str
"""
return self._schema
@schema.setter
def schema(self, schema):
"""Sets the schema of this IUnitDefinitionDto.
The available values are: NoUnits, Basic, Iso4217Currency # noqa: E501
:param schema: The schema of this IUnitDefinitionDto. # noqa: E501
:type schema: str
"""
allowed_values = ["NoUnits", "Basic", "Iso4217Currency"] # noqa: E501
if self.local_vars_configuration.client_side_validation and schema not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `schema` ({0}), must be one of {1}" # noqa: E501
.format(schema, allowed_values)
)
self._schema = schema
@property
def code(self):
"""Gets the code of this IUnitDefinitionDto. # noqa: E501
:return: The code of this IUnitDefinitionDto. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this IUnitDefinitionDto.
:param code: The code of this IUnitDefinitionDto. # noqa: E501
:type code: str
"""
self._code = code
@property
def display_name(self):
"""Gets the display_name of this IUnitDefinitionDto. # noqa: E501
:return: The display_name of this IUnitDefinitionDto. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this IUnitDefinitionDto.
:param display_name: The display_name of this IUnitDefinitionDto. # noqa: E501
:type display_name: str
"""
self._display_name = display_name
@property
def description(self):
"""Gets the description of this IUnitDefinitionDto. # noqa: E501
:return: The description of this IUnitDefinitionDto. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this IUnitDefinitionDto.
:param description: The description of this IUnitDefinitionDto. # noqa: E501
:type description: str
"""
self._description = description
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IUnitDefinitionDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IUnitDefinitionDto):
return True
return self.to_dict() != other.to_dict()
| 28.737288 | 129 | 0.585668 |
54baebb7dc29664e745e67af28590536da0baef0 | 2,174 | py | Python | Tests/test_SearchIO_fasta_m10_index.py | lukasz-kozlowski/biopython | 6b601cf09234e1e82cfc94ad5030389036cb6343 | [
"BSD-3-Clause"
] | 2,856 | 2015-01-01T07:10:06.000Z | 2022-03-31T18:17:25.000Z | Tests/test_SearchIO_fasta_m10_index.py | lukasz-kozlowski/biopython | 6b601cf09234e1e82cfc94ad5030389036cb6343 | [
"BSD-3-Clause"
] | 3,429 | 2015-01-05T11:11:42.000Z | 2022-03-31T13:08:10.000Z | Tests/test_SearchIO_fasta_m10_index.py | lukasz-kozlowski/biopython | 6b601cf09234e1e82cfc94ad5030389036cb6343 | [
"BSD-3-Clause"
] | 1,619 | 2015-01-05T13:07:11.000Z | 2022-03-31T19:19:52.000Z | # Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for SearchIO fasta-m10 indexing."""
import os
import unittest
from search_tests_common import CheckIndex
class FastaM10IndexCases(CheckIndex):
fmt = "fasta-m10"
def test_output_002(self):
"""Test fasta-m10 indexing, fasta34, multiple queries."""
filename = os.path.join("Fasta", "output002.m10")
self.check_index(filename, self.fmt)
def test_output_001(self):
"""Test fasta-m10 indexing, fasta35, multiple queries."""
filename = os.path.join("Fasta", "output001.m10")
self.check_index(filename, self.fmt)
def test_output_005(self):
"""Test fasta-m10 indexing, ssearch35, multiple queries."""
filename = os.path.join("Fasta", "output005.m10")
self.check_index(filename, self.fmt)
def test_output_008(self):
"""Test fasta-m10 indexing, tfastx36, multiple queries."""
filename = os.path.join("Fasta", "output008.m10")
self.check_index(filename, self.fmt)
def test_output_009(self):
"""Test fasta-m10 indexing, fasta36, multiple queries."""
filename = os.path.join("Fasta", "output009.m10")
self.check_index(filename, self.fmt)
def test_output_010(self):
"""Test fasta-m10 indexing, fasta36, single query, no hits."""
filename = os.path.join("Fasta", "output010.m10")
self.check_index(filename, self.fmt)
def test_output_011(self):
"""Test fasta-m10 indexing, fasta36, single query, hits with single hsp."""
filename = os.path.join("Fasta", "output011.m10")
self.check_index(filename, self.fmt)
def test_output_012(self):
"""Test fasta-m10 indexing, fasta36, single query with multiple hsps."""
filename = os.path.join("Fasta", "output012.m10")
self.check_index(filename, self.fmt)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 35.064516 | 83 | 0.672493 |
11c4160e2a248a094bb194690570aa60f87f9a2a | 393 | py | Python | kwiktalk/asgi.py | Vicynet/social-login-django | 7384d7725cd8a33dcf9f05c92b11eec8d928e4eb | [
"bzip2-1.0.6"
] | null | null | null | kwiktalk/asgi.py | Vicynet/social-login-django | 7384d7725cd8a33dcf9f05c92b11eec8d928e4eb | [
"bzip2-1.0.6"
] | null | null | null | kwiktalk/asgi.py | Vicynet/social-login-django | 7384d7725cd8a33dcf9f05c92b11eec8d928e4eb | [
"bzip2-1.0.6"
] | null | null | null | """
ASGI config for kwiktalk project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kwiktalk.settings')
application = get_asgi_application()
| 23.117647 | 78 | 0.78626 |
ade7105d9886971384cc1183ef43c963e001da36 | 3,307 | py | Python | model_compiler/src/model_compiler/compilers/saved_model_file_to_saved_model.py | yuanliya/Adlik | 602074b44064002fc0bb054e17a989a5bcf22e92 | [
"Apache-2.0"
] | 548 | 2019-09-27T07:37:47.000Z | 2022-03-31T05:12:38.000Z | model_compiler/src/model_compiler/compilers/saved_model_file_to_saved_model.py | yuanliya/Adlik | 602074b44064002fc0bb054e17a989a5bcf22e92 | [
"Apache-2.0"
] | 533 | 2019-09-27T06:30:41.000Z | 2022-03-29T07:34:08.000Z | model_compiler/src/model_compiler/compilers/saved_model_file_to_saved_model.py | yuanliya/Adlik | 602074b44064002fc0bb054e17a989a5bcf22e92 | [
"Apache-2.0"
] | 54 | 2019-10-10T02:19:31.000Z | 2021-12-28T03:37:45.000Z | # Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Mapping, NamedTuple, Optional, Sequence
import tensorflow as tf
from . import repository
from ..models.sources.saved_model_file import SavedModelFile
from ..models.targets.saved_model import SavedModel, Input, Output, DataFormat
from .. import utilities
class Config(NamedTuple):
input_names: Optional[Sequence[str]] = None
data_formats: Sequence[Optional[DataFormat]] = []
output_names: Optional[Sequence[str]] = None
signature_keys: Optional[str] = None
@staticmethod
def from_json(value: Mapping[str, Any]) -> 'Config':
return Config(input_names=value.get('input_names'),
data_formats=utilities.get_data_formats(value.get('input_formats')),
output_names=value.get('output_names'),
signature_keys=value.get('signature_keys'))
@staticmethod
def from_env(env: Mapping[str, str]) -> 'Config':
return Config(input_names=utilities.split_by(env.get('INPUT_NAMES'), ','),
data_formats=utilities.get_data_formats(utilities.split_by('INPUT_FORMATS', ',')),
output_names=utilities.split_by(env.get('OUTPUT_NAMES'), ','),
signature_keys=env.get('SIGNATURE_KEYS'))
def get_model_info(signature):
model_layers = {}
if len(signature) == 1:
model_layers = signature
else:
for input_signature in signature:
if input_signature:
model_layers.update(input_signature)
return model_layers
def _get_inputs(model_inputs, input_names, data_formats):
inputs = []
if input_names:
for i, input_name in enumerate(input_names):
inputs.append(Input(input_name, model_inputs[input_name],
data_formats[i] if i < len(data_formats) else None))
else:
for i, input_name in enumerate(model_inputs.keys()):
inputs.append(Input(input_name, model_inputs[input_name],
data_formats[i] if i < len(data_formats) else None))
return inputs
def _get_outputs(model_outputs, output_names):
outputs = []
if output_names:
for output_name in output_names:
outputs.append(Output(output_name, model_outputs[output_name]))
else:
for key in model_outputs.keys():
outputs.append(Output(key, model_outputs[key]))
return outputs
@repository.REPOSITORY.register(source_type=SavedModelFile, target_type=SavedModel, config_type=Config)
def compile_source(source: SavedModelFile, config: Config) -> SavedModel:
model = tf.saved_model.load(source.model_path, tags=['serve'])
if config.signature_keys:
infer = model.signatures[config.signature_keys]
else:
infer = model.signatures['serving_default']
return SavedModel(inputs=_get_inputs(get_model_info(infer.structured_input_signature),
config.input_names, config.data_formats),
outputs=_get_outputs(get_model_info(infer.structured_outputs), config.output_names),
session=None,
model=model,
signature=infer)
| 38.453488 | 106 | 0.659208 |
ecb017375c202cdfcbdddc5a4d9b0b37f3c581a5 | 417 | py | Python | tests/test_equal_groups.py | Vareto-Forks/Same-Size-K-Means | 1bc3f6a355d77cf7a06c1130d9493c0c3b59bd09 | [
"BSD-3-Clause"
] | 68 | 2017-11-20T14:13:23.000Z | 2022-02-27T03:45:56.000Z | tests/test_equal_groups.py | Vareto-Forks/Same-Size-K-Means | 1bc3f6a355d77cf7a06c1130d9493c0c3b59bd09 | [
"BSD-3-Clause"
] | 12 | 2018-05-01T05:43:20.000Z | 2021-10-19T06:53:53.000Z | miscellaneous/SameSizeKMeans/tests/test_equal_groups.py | Jinqi-Cheng/delivery_pal_server | b38f540919b9b61b6b22935e7801ce7da1f41db3 | [
"MIT"
] | 49 | 2017-12-05T21:54:27.000Z | 2022-02-04T05:37:57.000Z | from clustering.equal_groups import EqualGroupsKMeans
import numpy as np
X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]])
def test_evens():
assert 1 + 1 == 2
def test_evens_imports():
clf = EqualGroupsKMeans(n_clusters=2, random_state=0)
clf.fit(X)
clf.labels_
clf.cluster_centers_
predict = clf.predict([[0, 0], [4, 4]])
print(clf.inertia_)
assert clf.inertia_ != 0
| 23.166667 | 62 | 0.628297 |
ea351b1e338e75f50f3f197e2581e6e49d2a2577 | 1,011 | py | Python | Dragon Curve/1 - basic.py | ThePythonist/AlgorithmicBotanyModule | 7e7b3d1e36a2da702890a0a2345d4c5efe197172 | [
"MIT"
] | null | null | null | Dragon Curve/1 - basic.py | ThePythonist/AlgorithmicBotanyModule | 7e7b3d1e36a2da702890a0a2345d4c5efe197172 | [
"MIT"
] | null | null | null | Dragon Curve/1 - basic.py | ThePythonist/AlgorithmicBotanyModule | 7e7b3d1e36a2da702890a0a2345d4c5efe197172 | [
"MIT"
] | null | null | null | #Draw a dragon curve
from turtle import *
#Recursively draw sides of the curve
#Since the line segments alternate between bending left and right, we need a parameter to tell us which to do
def draw_side(length, numLayers, leftFirst=False):
if numLayers == 1:
forward(length)
else:
#Ternary operator - very handy
dir1 = left if leftFirst else right
dir2 = right if leftFirst else left
#Split the line segment recursively into two
dir1(45)
draw_side(length / (2 ** 0.5), numLayers-1, False)
dir2(90)
draw_side(length / (2 ** 0.5), numLayers-1, True)
dir1(45)
#Curve will have initial side length 300 and will be 10 layers deep
sideLength = 300
numLayers = 10
#Set move the turtle such that the snowflake is centred
#Moving to the bottom left vertex
penup()
setx(-sideLength/2)
pendown()
#Draw the curve
draw_side(sideLength, numLayers)
#Make sure screen stays open once drawing is finished
getscreen()._root.mainloop() | 28.083333 | 109 | 0.695351 |
f2d02ab0c27a29f39e7586b6acea3c2f844836ab | 5,583 | py | Python | sanic/static.py | SimonCqk/sanic | b9fd1d1d2e5dc379b22532f76db70072bdb69695 | [
"MIT"
] | 5 | 2018-05-10T19:50:27.000Z | 2018-05-10T20:07:05.000Z | sanic/static.py | SimonCqk/sanic | b9fd1d1d2e5dc379b22532f76db70072bdb69695 | [
"MIT"
] | 1 | 2019-02-18T15:34:13.000Z | 2019-02-18T15:34:13.000Z | sanic/static.py | SimonCqk/sanic | b9fd1d1d2e5dc379b22532f76db70072bdb69695 | [
"MIT"
] | null | null | null | from mimetypes import guess_type
from os import path
from re import sub
from time import strftime, gmtime
from urllib.parse import unquote
from aiofiles.os import stat
from sanic.exceptions import (
ContentRangeError,
FileNotFound,
HeaderNotFound,
InvalidUsage,
)
from sanic.handlers import ContentRangeHandler
from sanic.response import file, file_stream, HTTPResponse
def register(app, uri, file_or_directory, pattern,
use_modified_since, use_content_range,
stream_large_files, name='static', host=None,
strict_slashes=None):
# TODO: Though sanic is not a file server, I feel like we should at least
# make a good effort here. Modified-since is nice, but we could
# also look into etags, expires, and caching
"""
Register a static directory handler with Sanic by adding a route to the
router and registering a handler.
:param app: Sanic
:param file_or_directory: File or directory path to serve from
:param uri: URL to serve from
:param pattern: regular expression used to match files in the URL
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the
server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the file_stream() handler rather
than the file() handler to send the file
If this is an integer, this represents the
threshold size to switch to file_stream()
:param name: user defined name used for url_for
"""
# If we're not trying to match a file directly,
# serve from the folder
if not path.isfile(file_or_directory):
uri += '<file_uri:' + pattern + '>'
async def _handler(request, file_uri=None):
# Using this to determine if the URL is trying to break out of the path
# served. os.path.realpath seems to be very slow
if file_uri and '../' in file_uri:
raise InvalidUsage("Invalid URL")
# Merge served directory and requested file if provided
# Strip all / that in the beginning of the URL to help prevent python
# from herping a derp and treating the uri as an absolute path
root_path = file_path = file_or_directory
if file_uri:
file_path = path.join(
file_or_directory, sub('^[/]*', '', file_uri))
# URL decode the path sent by the browser otherwise we won't be able to
# match filenames which got encoded (filenames with spaces etc)
file_path = path.abspath(unquote(file_path))
if not file_path.startswith(path.abspath(unquote(root_path))):
raise FileNotFound('File not found',
path=file_or_directory,
relative_url=file_uri)
try:
headers = {}
# Check if the client has been sent this file before
# and it has not been modified since
stats = None
if use_modified_since:
stats = await stat(file_path)
modified_since = strftime(
'%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime))
if request.headers.get('If-Modified-Since') == modified_since:
return HTTPResponse(status=304)
headers['Last-Modified'] = modified_since
_range = None
if use_content_range:
_range = None
if not stats:
stats = await stat(file_path)
headers['Accept-Ranges'] = 'bytes'
headers['Content-Length'] = str(stats.st_size)
if request.method != 'HEAD':
try:
_range = ContentRangeHandler(request, stats)
except HeaderNotFound:
pass
else:
del headers['Content-Length']
for key, value in _range.headers.items():
headers[key] = value
if request.method == 'HEAD':
return HTTPResponse(
headers=headers,
content_type=guess_type(file_path)[0] or 'text/plain')
else:
if stream_large_files:
if isinstance(stream_large_files, int):
threshold = stream_large_files
else:
threshold = 1024 * 1024
if not stats:
stats = await stat(file_path)
if stats.st_size >= threshold:
return await file_stream(file_path, headers=headers,
_range=_range)
return await file(file_path, headers=headers, _range=_range)
except ContentRangeError:
raise
except Exception:
raise FileNotFound('File not found',
path=file_or_directory,
relative_url=file_uri)
# special prefix for static files
if not name.startswith('_static_'):
name = '_static_{}'.format(name)
app.route(uri, methods=['GET', 'HEAD'], name=name, host=host,
strict_slashes=strict_slashes)(_handler)
| 43.617188 | 79 | 0.569228 |
399e06a5878e8322e0fc0a052c5a6a7e73449e19 | 4,934 | py | Python | ipython-extension/autoplot/extensions/toast.py | kernelpanek/jupyterlab-autoplot | 023b0b6a1ebc1857b4dab95c04286d45ec70fc42 | [
"BSD-3-Clause"
] | 48 | 2021-01-27T14:40:00.000Z | 2022-03-31T10:15:35.000Z | ipython-extension/autoplot/extensions/toast.py | kernelpanek/jupyterlab-autoplot | 023b0b6a1ebc1857b4dab95c04286d45ec70fc42 | [
"BSD-3-Clause"
] | 1 | 2021-03-11T06:31:35.000Z | 2021-07-29T18:47:29.000Z | ipython-extension/autoplot/extensions/toast.py | kernelpanek/jupyterlab-autoplot | 023b0b6a1ebc1857b4dab95c04286d45ec70fc42 | [
"BSD-3-Clause"
] | 5 | 2021-04-22T17:44:12.000Z | 2022-02-09T22:47:16.000Z | """Module containing the class used to display JupyterLab toasts.
Communication is achieved via custom DOM events, using the name 'autoplot-toast'.
Classes
-------
ToastType
Enum class defining different toast types.
Toast
Class to display JupyterLab toasts via DOM events.
"""
from enum import Enum
from IPython.core.display import Javascript, display
from ipywidgets import widgets
class ToastType(Enum):
"""Enum class defining different toast types."""
error = "error"
warning = "warning"
success = "success"
info = "info"
class Toast(object):
"""Class to display JupyterLab toasts via DOM events.
Methods
-------
show(message, toast_type)
Display a generic toast with a given message and type.
downsample_warning(var_name, old_size, new_size)
Display a toast warning the user that a trace has been downsampled.
no_downsample_info(var_name)
Display a toast notifying the user that a trace is no longer downsampled.
invalid_trace_colour(colour)
Display an error toast when an invalid colour is requested.
invalid_max_length(max_length)
Display an error toast when an invalid colour sample length is requested.
unrecognised_variable(var_name)
Display an error toast when an unrecognised variable name is referenced.
"""
def __init__(self, js_output: widgets.Output):
"""Initialise a `Toast` instance and display `js_output`.
Parameters
----------
js_output: widgets.Output
The IPython output widget in which to display the JavaScript. This will be
cleared every time new JavaScript is shown to prevent old toasts being
re-shown on page refresh.
"""
self._js_output = js_output
display(self._js_output)
def show(self, message: str, toast_type: ToastType):
"""Display a generic toast with a given message and type.
This is achieved by dispatching a custom DOM event with the name
'autoplot-toast'.
Parameters
----------
message: str
The toast message to display. Must not contain the backtick (`) character.
toast_type: ToastType
The toast type, used to format the toast. Must be one of the values defined
in `ToastType`.
"""
assert "`" not in message, "Message cannot contain '`'"
js = f"""document.dispatchEvent(new CustomEvent(
'autoplot-toast', {{ detail: {{
message: `{message}`, type: `{toast_type.value}`,
}} }} ))"""
with self._js_output:
display(Javascript(js)) # noqa
self._js_output.clear_output()
def downsample_warning(self, var_name: str, old_size: int, new_size: int):
"""Display a toast warning the user that a trace has been downsampled.
Parameters
----------
var_name: str
The name of the downsampled variable.
old_size: int
The variable's original size.
new_size: int
The variable's size after downsizing.
"""
message = (
f"Time series '{var_name}' has {old_size} data points, thus has been downsampled to {new_size} points."
)
self.show(message, ToastType.warning)
def no_downsample_info(self, var_name: str):
"""Display a toast notifying the user that a trace is no longer downsampled.
Parameters
----------
var_name: str
The name of the variable.
"""
message = f"Time series '{var_name}' is now being displayed in full."
self.show(message, ToastType.info)
def invalid_trace_colour(self, colour: str):
"""Display an error toast when an invalid colour is requested.
Parameters
----------
colour: str
The invalid colour.
"""
message = f"'{colour}' is not a valid matplotlib colour."
self.show(message, ToastType.error)
def invalid_max_length(self, max_length: int):
"""Display an error toast when an invalid colour sample length is requested.
Parameters
----------
max_length: int
The invalid max_length.
"""
message = (
f"Maximum series length before downsampling must be >= 0, not '{max_length}'. Set to 0 for no downsampling."
)
self.show(message, ToastType.error)
def unrecognised_variable(self, var_name: str):
"""Display an error toast when an unrecognised variable name is referenced.
Parameters
----------
var_name: str
The unrecognised variable name.
"""
message = f"Cannot find variable '{var_name}'. Make sure you are using its actual name, not its legend label."
self.show(message, ToastType.error)
| 30.8375 | 120 | 0.621403 |
63d2f050b020d1fe3f613be52922c74dfb64a747 | 32,502 | py | Python | pygocomma/r9.py | p3g4asus/pygocomma | 096ce0455d90acd0f4b825591477fb650e5c1653 | [
"MIT"
] | null | null | null | pygocomma/r9.py | p3g4asus/pygocomma | 096ce0455d90acd0f4b825591477fb650e5c1653 | [
"MIT"
] | null | null | null | pygocomma/r9.py | p3g4asus/pygocomma | 096ce0455d90acd0f4b825591477fb650e5c1653 | [
"MIT"
] | null | null | null | '''
Created on 28 apr 2019
@author: Matteo
'''
import traceback
import struct
import asyncio
from base64 import b64decode, b64encode
import json
import time
from Crypto.Cipher import AES
import random
import string
import binascii
from hashlib import md5
from . import _LOGGER
from .const import (CD_ADD_AND_CONTINUE_WAITING, CD_RETURN_IMMEDIATELY, CD_CONTINUE_WAITING, CD_ABORT_AND_RETRY)
from .asyncio_udp import open_local_endpoint
DEFAULT_PORT = 6668
class R9:
STUDY_KEY_DICT = {
"devId": '',
"dps": {
"1": "study_key",
"10": 300,
"7": '',
# "8": keyorig
},
"t": 0,
"uid": ''
}
STUDY_KEY_COMMAND = 7
STUDY_KEY_RESP_1_COMMAND = 7
STUDY_EXIT_COMMAND = 7
STUDY_EXIT_RESP_COMMAND = 8
STUDY_COMMAND = 7
STUDY_RESP_COMMAND = 8
STUDY_DICT = {
"devId": '',
"dps": {
"1": "study",
"10": 300
},
"t": 0,
"uid": ''
}
STUDY_EXIT_DICT = {
"devId": '',
"dps": {
"1": "study_exit",
"10": 300
},
"t": 0,
"uid": ''
}
ASK_LAST_DICT = {
"devId": '',
"gwId": ''
}
ASK_LAST_COMMAND = 0x0a
ASK_LAST_RESP_COMMAND = 0x0a
PING_COMMAND = 9
PING_RESP_COMMAND = 9
PING_DICT = {
}
PROTOCOL_VERSION_BYTES = b'3.1'
LEARNED_COMMAND = 8
crc32Table = [
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
]
@staticmethod
def crc32(cbytes):
crc = 0xFFFFFFFF
for b in cbytes:
crc = (crc >> 8) ^ R9.crc32Table[(crc ^ b) & 255]
return crc ^ 0xFFFFFFFF
@staticmethod
def _pad(s):
padnum = 16 - len(s) % 16
return s + padnum * chr(padnum)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
@staticmethod
def check_discovery_packet(retdata, addr):
lenorig = len(retdata)
if lenorig <= 12 + 8 + 8:
_LOGGER.warning("CheckResp small len=%d", lenorig)
return CD_CONTINUE_WAITING
lenconf = struct.unpack('>I', retdata[12:16])[0] + 8 + 8
if lenconf != lenorig:
_LOGGER.warning("CheckResp len %d!=%d", lenorig, lenconf)
return CD_CONTINUE_WAITING
headerconf = struct.unpack('>I', retdata[0:4])[0]
if headerconf != 0x000055AA:
_LOGGER.warning("CheckResp header %d!=%d", 0x000055AA, headerconf)
return CD_CONTINUE_WAITING
footerconf = struct.unpack('>I', retdata[-4:])[0]
if footerconf != 0x0000AA55:
_LOGGER.warning("CheckResp footer %d!=%d", 0x0000AA55, headerconf)
return CD_CONTINUE_WAITING
crcconf = struct.unpack('>I', retdata[-8:-4])[0]
crcorig = R9.crc32(retdata[0:-8])
if crcconf != crcorig:
_LOGGER.warning("CheckResp crc %d!=%d", crcorig, crcconf)
return CD_CONTINUE_WAITING
statusconf = struct.unpack('>I', retdata[16:20])[0]
if statusconf != 0:
_LOGGER.warning("CheckResp status %d!=%d", 0, statusconf)
return CD_CONTINUE_WAITING
payload = retdata[20:-8]
try:
jsonstr = payload.decode('utf-8')
except BaseException as ex:
_LOGGER.warning("CheckResp decode %s %s", ex, binascii.hexlify(payload))
return CD_CONTINUE_WAITING
try:
jsondec = json.loads(jsonstr)
except BaseException as ex:
_LOGGER.warning("CheckResp jsonp %s %s", ex, jsonstr)
return CD_CONTINUE_WAITING
if "gwId" in jsondec:
return CD_ADD_AND_CONTINUE_WAITING, jsondec
else:
return CD_CONTINUE_WAITING
@staticmethod
async def discovery(timeout, retry=3):
"""!
Discovers Tuya devices listening to broadcast UDP messages sent to 6666 port
@param timeout: [int] time to be waited for broadcast messages
@param retry: [int] Number of retries to make if no device is found (Obtional)
@return [dict] A dict whose keys are ip addresses of Tuya devices and values are R9 objects. Please note that th found R9 devices
cannot be used before setting the correct encryption key (it is set to b'0123456789abcdef' by default)
"""
out_data = None
_local = None
addr = ('255.255.255.255', 6666)
for _ in range(retry):
try:
_local = await open_local_endpoint(port=6666, allow_broadcast=True)
if _local:
for _ in range(retry):
out_data = await _local.protocol(None, addr, R9.check_discovery_packet, timeout, 1, True)
if out_data:
break
break
except BaseException as ex:
_LOGGER.error("Protocol[%s:%d] error: %s", *addr, str(ex))
finally:
if _local:
try:
_local.abort()
except Exception:
pass
finally:
_local = None
if _local:
try:
_local.abort()
except Exception:
pass
finally:
_local = None
rv = dict()
if out_data:
for o in out_data:
try:
it = o[0]
if it['ip'] not in rv:
obj = R9((it['ip'], DEFAULT_PORT), it['gwId'], b'0123456789abcdef')
rv[it['ip']] = obj
_LOGGER.info("Discovered %s", obj)
except BaseException as ex:
_LOGGER.error("Error in discovery process %s", ex)
return rv
def __init__(self, hp, idv, key, timeout=5, force_reconnect_s=20):
"""!
Costructs R9 remote Object
@param hp: [tuple] A tuple with host and port of the R9 remote
@param idv: [string] id of the R9 object
@param key: [string|bytes] key used to encrypt/decrypt messages from/to R9
@param timeout: [int] timeout to be used in TCP communication (optional)
@param force_reconnect_s: [int] seconds after which to force reconnection
"""
self._hp = hp
self._id = idv
if isinstance(key, str):
key = key.encode()
self._key = key
self._timeout = timeout
self._cipher = AES.new(key, mode=AES.MODE_ECB)
self._pktnum = 1
self._uid = ''.join(random.choices(string.ascii_letters + string.digits, k=20))
self._reader = None
self._writer = None
self._contime = 0
self._force_reconnect_s = force_reconnect_s
def __repr__(self):
"""!
Gets string representation of this R9 object
@return [string] string representation of this R9 object
"""
return '(%s:%d) id=%s key=%s' % (*self._hp, self._id, self._key)
async def destroy_connection(self):
"""!
Destroys the connection with the R9 device
"""
try:
if self._writer:
self._writer.close()
await self._writer.wait_closed()
except Exception:
pass
finally:
self._writer = None
self._reader = None
self._pktnum = 1
async def _init_connection(self):
try:
if self._force_reconnect_s > 0 and time.time() - self._contime > self._force_reconnect_s:
await self.destroy_connection()
if not self._writer:
_LOGGER.debug("Connecting to %s:%d (TCP)", *self._hp)
self._reader, self._writer = await asyncio.open_connection(*self._hp)
self._contime = time.time()
return True
except BaseException as ex:
_LOGGER.error("Cannot estabilish connection %s: %s", str(ex), traceback.format_exc())
await self.destroy_connection()
return False
def _generic_check_resp(self, retdata, command, command_in_dict=None, status_ok=[0]):
"""!
Checks payload of TCP packet got from R9 device. This includes Satus value check, CRC32 check, AES decryption (if needed), and MD5 check (if needed)
@param retdata: [bytes] bytes of the TCP packet payload received prom R9 device
@param command: [int] Command that is expected in the packet header
@param command_in_dict: [string|NoneType] Command that is expected in the packet JSON dps["1"]. If NoneType, no JSON is expected in packet content. If equal to '',
no dps["1"] is expected in packet JSON
@param status_ok: [list] Accepted status codes. Defaults to [0]
@return [dict|boolean] On successful check if no JSON content is present, True is returned, Otherwise the parsed dict is returned.
If check fails, False is returned
"""
lenorig = len(retdata)
if lenorig < 12 + 8 + 8:
_LOGGER.warning("CheckResp small len=%d", lenorig)
return False
lenconf = struct.unpack('>I', retdata[12:16])[0] + 8 + 8
if lenconf != lenorig:
_LOGGER.warning("CheckResp len %d!=%d", lenorig, lenconf)
return False
commandconf = struct.unpack('>I', retdata[8:12])[0]
if commandconf != command:
_LOGGER.warning("CheckResp command[%d] %d!=%d", lenorig, command, commandconf)
return False
headerconf = struct.unpack('>I', retdata[0:4])[0]
if headerconf != 0x000055AA:
_LOGGER.warning("CheckResp header %d!=%d", 0x000055AA, headerconf)
return False
footerconf = struct.unpack('>I', retdata[-4:])[0]
if footerconf != 0x0000AA55:
_LOGGER.warning("CheckResp footer %d!=%d", 0x0000AA55, headerconf)
return False
crcconf = struct.unpack('>I', retdata[-8:-4])[0]
crcorig = R9.crc32(retdata[0:-8])
if crcconf != crcorig:
_LOGGER.warning("CheckResp crc %d!=%d", crcorig, crcconf)
return False
statusconf = struct.unpack('>I', retdata[16:20])[0]
if statusconf not in status_ok:
_LOGGER.warning("CheckResp status %d!=%d", status_ok, statusconf)
return False
if command_in_dict is None:
return True
if lenorig <= 12 + 8 + 8 + 16 + len(R9.PROTOCOL_VERSION_BYTES):
_LOGGER.warning("CheckResp small2 len=%d", lenorig)
return False
protocolconf = retdata[20:23]
if protocolconf != R9.PROTOCOL_VERSION_BYTES:
_LOGGER.warning("CheckResp prot %s!=%s",
binascii.hexlify(R9.PROTOCOL_VERSION_BYTES),
binascii.hexlify(protocolconf))
return False
b64payload = retdata[39:-8]
hashconf = self._get_md5_hash(b64payload)
hashorig = retdata[20:39]
if hashconf != hashorig:
_LOGGER.warning("CheckResp md5 %s!=%s", binascii.hexlify(hashorig), binascii.hexlify(hashconf))
return False
try:
cryptpayload = b64decode(b64payload)
except BaseException as ex:
_LOGGER.warning("CheckResp b64 %s %s", str(ex), binascii.hexlify(b64payload))
return False
try:
payload = self._cipher.decrypt(cryptpayload)
payload = R9._unpad(payload)
except BaseException as ex:
_LOGGER.warning("CheckResp decry %s %s", str(ex), binascii.hexlify(cryptpayload))
return False
try:
jsonstr = payload.decode('utf-8')
except BaseException as ex:
_LOGGER.warning("CheckResp decode %s %s", str(ex), binascii.hexlify(payload))
return False
try:
jsondec = json.loads(jsonstr)
except BaseException as ex:
_LOGGER.warning("CheckResp jsonp %s %s", str(ex), jsonstr)
return False
if not len(command_in_dict):
return jsondec
if "dps" not in jsondec or "1" not in jsondec["dps"]:
_LOGGER.warning("CheckResp struct %s", jsondec)
return False
if jsondec["dps"]["1"] != command_in_dict:
_LOGGER.warning("CheckResp command %s!=%s", command_in_dict, jsondec["dps"]["1"])
return False
return jsondec
def _check_ping_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.PING_RESP_COMMAND)
if dictok:
return CD_RETURN_IMMEDIATELY, retdata
else:
return CD_CONTINUE_WAITING, None
def _check_ask_last_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.ASK_LAST_RESP_COMMAND, status_ok=[0, 1])
if dictok:
payload = retdata[20:-8]
try:
jsonstr = payload.decode('utf-8')
except BaseException as ex:
_LOGGER.warning("CheckResp decode %s %s", str(ex), binascii.hexlify(payload))
return CD_CONTINUE_WAITING, None
if jsonstr.find("json obj") >= 0:
return CD_RETURN_IMMEDIATELY, {"devId": self._id}
try:
jsondec = json.loads(jsonstr)
except BaseException as ex:
_LOGGER.warning("CheckResp jsonp %s %s", str(ex), jsonstr)
return CD_CONTINUE_WAITING, None
if ("devId" in jsondec and jsondec['devId'] == self._id) or\
("gwId" in jsondec and jsondec['gwId'] == self._id):
return CD_RETURN_IMMEDIATELY, jsondec
return CD_CONTINUE_WAITING, None
async def ask_last(self, timeout=-1, retry=2):
"""!
Sends ping to R9 object to get last command. This command is sent not crypted
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [dict|NoneType] On successful send, the decoded confirmation dict obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.ASK_LAST_COMMAND, self._get_ask_last_bytes())
return await self._tcp_protocol(pld, self._check_ask_last_resp, timeout, retry)
async def ping(self, timeout=-1, retry=2):
"""!
Sends ping to R9 object to see if it is online
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [bytes|NoneType] On successful send, bytes got from R9 are returned; None otherwise.
"""
pld = self._get_payload_bytes(R9.PING_COMMAND, {})
return await self._tcp_protocol(pld, self._check_ping_resp, timeout, retry)
def _check_study_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.STUDY_RESP_COMMAND, "study")
if dictok:
return CD_RETURN_IMMEDIATELY, dictok
else:
return CD_CONTINUE_WAITING, None
def _check_study_key_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.STUDY_KEY_RESP_1_COMMAND)
if dictok:
return CD_RETURN_IMMEDIATELY, retdata
else:
return CD_CONTINUE_WAITING, None
def _check_study_exit_resp(self, retdata):
dictok = self._generic_check_resp(retdata, R9.STUDY_EXIT_RESP_COMMAND, "study_exit")
if dictok:
return CD_RETURN_IMMEDIATELY, dictok
else:
return CD_CONTINUE_WAITING, None
async def emit_ir(self, keybytes, timeout=-1, retry=3):
"""!
Sends ir to the R9 device
@param keybytes: [bytes] key to be emitted by R9 device. The key should be a byte object that represents lirc/arduino format array of little-endian shorts.
This is the same format obtained with the learning process
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [bytes|NoneType] On successful send, the array of bytes obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.STUDY_KEY_COMMAND, self._get_study_key_dict(keybytes))
return await self._tcp_protocol(pld, self._check_study_key_resp, timeout, retry)
async def enter_learning_mode(self, timeout=-1, retry=3):
"""!
Puts R9 in learning mode
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [dict|NoneType] On successful send, the decoded confirmation dict obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.STUDY_COMMAND, self._get_study_dict())
return await self._tcp_protocol(pld, self._check_study_resp, timeout, retry)
async def exit_learning_mode(self, timeout=-1, retry=3):
"""!
Exits R9 learning mode
@param timeout: [int] timeout to be used in TCP communication (optional). If not specified, the timeout specified when constructing the R9 object will be used
@param retry: [int] Number of retries to make if no device is found (optional)
@return [dict|NoneType] On successful send, the decoded confirmation dict obtained by R9 device is returned. Otherwise return value is None
"""
pld = self._get_payload_bytes(R9.STUDY_EXIT_COMMAND, self._get_study_exit_dict())
return await self._tcp_protocol(pld, self._check_study_exit_resp, timeout, retry)
def _check_learned_key(self, retdata):
dictok = self._generic_check_resp(retdata, R9.LEARNED_COMMAND, "")
if dictok:
_LOGGER.debug("Learned dict %s", dictok)
if "dps" not in dictok or "2" not in dictok["dps"]:
_LOGGER.warning("CheckResp not2 %s", dictok)
return CD_ABORT_AND_RETRY, None
try:
keydec = b64decode(dictok["dps"]["2"].encode())
except BaseException as ex:
_LOGGER.warning("CheckResp invalidkey %s %s", dictok, str(ex))
return CD_ABORT_AND_RETRY, None
return CD_RETURN_IMMEDIATELY, keydec
else:
return CD_CONTINUE_WAITING, None
async def get_learned_key(self, timeout=30):
"""!
Puts R9 in learning mode
@param timeout: [int] timeout to be used in TCP communication (optional). Default value is 30 seconds. If awaited, this method will block until a key is not received or
timeout seconds have been passed
@return [bytes|NoneType] On successful key reception, the byte object representing the learned key is returned. this can be used with emit_ir function for future key sending. It returns
None on error or on timeout (no key was pressed/detected)
"""
return await self._tcp_protocol(None, self._check_learned_key, timeout, 1)
async def _tcp_protocol(self, data, check_data_fun, timeout=-1, retry=1):
lstdata = []
if timeout < 0:
timeout = self._timeout
for _ in range(retry):
try:
passed = 0
starttime = time.time()
if await asyncio.wait_for(self._init_connection(), timeout):
if data:
self._writer.write(data)
await self._writer.drain()
self._contime = time.time()
self._pktnum += 1
while passed < timeout:
try:
rec_data = await asyncio.wait_for(self._reader.read(4096), timeout-passed)
# _LOGGER.info("Received[%s:%d][%d] %s",*self._hp,len(rec_data),binascii.hexlify(rec_data))
rv, rec_data = check_data_fun(rec_data)
if rv == CD_RETURN_IMMEDIATELY:
return rec_data
elif rv == CD_ABORT_AND_RETRY:
break
elif rv == CD_ADD_AND_CONTINUE_WAITING:
lstdata.append(rec_data)
except asyncio.TimeoutError:
_LOGGER.warning("Protocol[%s:%d] timeout", *self._hp)
break
passed = time.time()-starttime
if lstdata:
return lstdata
elif not data:
break
except asyncio.TimeoutError:
_LOGGER.warning("Protocol[%s:%d] connecting timeout", *self._hp)
await self.destroy_connection()
except BaseException as ex:
_LOGGER.warning("Protocol[%s:%d] error %s", *self._hp, str(ex))
await self.destroy_connection()
await self.destroy_connection()
return None
def _prepare_payload(self, dictjson):
txtjs = json.dumps(dictjson)
_LOGGER.debug("Send Schema (%d) %s", len(txtjs), txtjs)
txtjs = R9._pad(txtjs).encode()
crypted_text = self._cipher.encrypt(txtjs)
_LOGGER.debug("Cipher (%d) %s", len(crypted_text), binascii.hexlify(crypted_text).decode('utf-8'))
cifenc = b64encode(crypted_text)
_LOGGER.debug("B64 cipher (%d) %s", len(cifenc), cifenc.decode('utf-8'))
return cifenc
def _generic_fill_dict(self, filld):
filld["devId"] = self._id
filld['t'] = int(time.time())
filld['uid'] = self._uid
return filld
def _get_payload_bytes(self, command, filled_dict):
if not filled_dict:
pldall = bytes()
elif isinstance(filled_dict, dict):
pld = self._prepare_payload(filled_dict)
md5bytes = self._get_md5_hash(pld)
pldall = md5bytes+pld
else:
pldall = filled_dict
ln = len(pldall)+16-8
docrc = b'\x00\x00\x55\xAA' + struct.pack('>I', self._pktnum) + struct.pack('>I', command) + struct.pack('>I', ln) + pldall
crcbytes = struct.pack('>I', R9.crc32(docrc))
complete = docrc + crcbytes + b'\x00\x00\xAA\x55'
_LOGGER.debug("Comp packet (%d) %s", len(complete), binascii.hexlify(complete).decode('utf-8'))
return complete
def _get_study_key_dict(self, keybytes):
R9.STUDY_KEY_DICT["dps"]["7"] = b64encode(keybytes).decode('utf8')
return self._generic_fill_dict(R9.STUDY_KEY_DICT)
def _get_study_dict(self):
return self._generic_fill_dict(R9.STUDY_DICT)
def _get_ask_last_bytes(self):
R9.ASK_LAST_DICT["devId"] = self._id
R9.ASK_LAST_DICT["gwId"] = self._id
return json.dumps(R9.ASK_LAST_DICT).encode()
def _get_study_exit_dict(self):
return self._generic_fill_dict(R9.STUDY_EXIT_DICT)
def _get_md5_hash(self, payload_bytes):
preMd5String = b'data=' + payload_bytes + b'||lpv=' + R9.PROTOCOL_VERSION_BYTES + b'||' + self._key
m = md5()
m.update(preMd5String)
# print(repr(m.digest()))
hexdigest = m.hexdigest()
s = hexdigest[8:][:16]
_LOGGER.debug("Computed md5 %s", s)
return R9.PROTOCOL_VERSION_BYTES+s.encode()
if __name__ == '__main__': # pragma: no cover
import sys
import logging
async def testFake(n):
for i in range(n):
_LOGGER.debug("Counter is %d", i)
await asyncio.sleep(1)
async def ping_test(*args):
a = R9((args[2], DEFAULT_PORT), args[3], args[4])
rv = await a.ping()
if rv:
_LOGGER.info("Ping OK %s", binascii.hexlify(rv))
else:
_LOGGER.warning("Ping failed")
await a.destroy_connection()
async def ask_last_test(*args):
a = R9((args[2], DEFAULT_PORT), args[3], args[4])
rv = await a.ask_last()
if rv:
_LOGGER.info("Ask last OK %s", rv)
else:
_LOGGER.warning("Ask last failed")
await a.destroy_connection()
async def discovery_test(*args):
rv = await R9.discovery(int(args[2]))
if rv:
_LOGGER.info("Discovery OK %s", rv)
else:
_LOGGER.warning("Discovery failed")
async def emit_test(*args):
import re
mo = re.search('^[a-fA-F0-9]+$', args[5])
if mo:
payload = binascii.unhexlify(args[5])
else:
payload = b64decode(args[5])
a = R9((args[2], DEFAULT_PORT), args[3], args[4])
rv = await a.emit_ir(payload)
if rv:
_LOGGER.info("Emit OK %s", binascii.hexlify(rv).decode('utf-8'))
else:
_LOGGER.warning("Emit failed")
await a.destroy_connection()
async def learn_test(*args):
a = R9((args[2], DEFAULT_PORT), args[3], args[4])
rv = await a.enter_learning_mode()
if rv:
_LOGGER.info("Entered learning mode (%s): please press key", rv)
rv = await a.get_learned_key()
if rv:
_LOGGER.info("Obtained %s", binascii.hexlify(rv).decode('utf-8'))
else:
_LOGGER.warning("No key pressed")
rv = await a.exit_learning_mode()
if rv:
_LOGGER.info("Exit OK %s", rv)
else:
_LOGGER.warning("Exit failed")
else:
_LOGGER.warning("Enter learning failed")
await a.destroy_connection()
_LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
loop = asyncio.get_event_loop()
try:
asyncio.ensure_future(testFake(150))
if sys.argv[1] == "learn":
loop.run_until_complete(learn_test(*sys.argv))
elif sys.argv[1] == "discovery":
loop.run_until_complete(discovery_test(*sys.argv))
elif sys.argv[1] == "ping":
loop.run_until_complete(ping_test(*sys.argv))
elif sys.argv[1] == "asklast":
loop.run_until_complete(ask_last_test(*sys.argv))
elif sys.argv[1] == "pingst":
for i in range(int(sys.argv[5])):
loop.run_until_complete(ping_test(*sys.argv))
else:
# loop.run_until_complete(emit_test('00000000a801000000000000000098018e11951127029b0625029906270299062702380227023a0225023802270238022d023202270299062702990627029806270238022702380227023802270238022802370227023802270238022702980627023802240245021c02380227023802270238022702980627029c0623023802270298062702990627029b062502990627029906270220b7a1119d11270299062702990628029b06250238022702380227023802270238022702380227029906270299062702990627023802270238022a0234022702380227023802260238022702380226029a06260238022602380226023802260241021e02380227029b0624029906270238022702980627029b0625029906270299062702990629021db79f11a2112502990627029b0625029906270238022702380227023802270238022a02350227029906270299062702990628023702260238022702380227023802270238022702380226023b02240299062702380226023802270238022602380227023c0223029906270299062702380226029b062402990627029906270299062802980627020000'))
loop.run_until_complete(emit_test(*sys.argv))
# loop.run_until_complete(learn_test())
except BaseException as ex:
_LOGGER.error("Test error %s", str(ex))
traceback.print_exc()
finally:
loop.close()
| 42.430809 | 901 | 0.600025 |
17da98758fef614f2fdca1329c924ab628e8cfa6 | 861 | py | Python | reviews/models.py | yun-mh/uniwalk | f5307f6970b24736d13b56b4792c580398c35b3a | [
"Apache-2.0"
] | null | null | null | reviews/models.py | yun-mh/uniwalk | f5307f6970b24736d13b56b4792c580398c35b3a | [
"Apache-2.0"
] | 9 | 2020-01-10T14:10:02.000Z | 2022-03-12T00:08:19.000Z | reviews/models.py | yun-mh/uniwalk | f5307f6970b24736d13b56b4792c580398c35b3a | [
"Apache-2.0"
] | null | null | null | from django.core.validators import MaxValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from core import models as core_models
class Review(core_models.TimeStampedModel):
""" レビューのモデルを定義する """
product = models.ForeignKey(
"products.Product",
verbose_name=_("商品"),
related_name="reviews",
on_delete=models.CASCADE,
)
user = models.ForeignKey(
"users.User", verbose_name=_("ユーザー"), on_delete=models.CASCADE
)
title = models.CharField(_("タイトル"), max_length=50)
text = models.TextField(_("本文"))
rate = models.PositiveIntegerField(
_("評点"), default=0, validators=[MaxValueValidator(5)]
)
class Meta:
verbose_name = _("レビュー")
verbose_name_plural = _("レビュー")
def __str__(self):
return self.title
| 26.90625 | 70 | 0.664344 |
e1c804e0550ff2803e3b004705b5d71b6b94a737 | 3,828 | py | Python | python/pyspark/pandas/tests/test_generic_functions.py | dbolshak/spark | e00d305053c98995efa990ffb2cf82cb281c71d8 | [
"Apache-2.0"
] | 1 | 2021-10-07T11:25:57.000Z | 2021-10-07T11:25:57.000Z | python/pyspark/pandas/tests/test_generic_functions.py | dbolshak/spark | e00d305053c98995efa990ffb2cf82cb281c71d8 | [
"Apache-2.0"
] | 1 | 2022-02-11T00:28:06.000Z | 2022-02-11T00:28:06.000Z | python/pyspark/pandas/tests/test_generic_functions.py | dbolshak/spark | e00d305053c98995efa990ffb2cf82cb281c71d8 | [
"Apache-2.0"
] | 1 | 2016-11-22T03:46:44.000Z | 2016-11-22T03:46:44.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class GenericFunctionsTest(PandasOnSparkTestCase, TestUtils):
def test_interpolate_error(self):
psdf = ps.range(10)
with self.assertRaisesRegex(
NotImplementedError, "interpolate currently works only for method='linear'"
):
psdf.interpolate(method="quadratic")
with self.assertRaisesRegex(ValueError, "limit must be > 0"):
psdf.interpolate(limit=0)
def _test_series_interpolate(self, pser):
psser = ps.from_pandas(pser)
self.assert_eq(psser.interpolate(), pser.interpolate())
for l1 in range(1, 5):
self.assert_eq(psser.interpolate(limit=l1), pser.interpolate(limit=l1))
def _test_dataframe_interpolate(self, pdf):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.interpolate(), pdf.interpolate())
for l2 in range(1, 5):
self.assert_eq(psdf.interpolate(limit=l2), pdf.interpolate(limit=l2))
def test_interpolate(self):
pser = pd.Series(
[
1,
np.nan,
3,
],
name="a",
)
self._test_series_interpolate(pser)
pser = pd.Series(
[
np.nan,
np.nan,
np.nan,
],
name="a",
)
self._test_series_interpolate(pser)
pser = pd.Series(
[
np.nan,
np.nan,
np.nan,
0,
1,
np.nan,
np.nan,
np.nan,
np.nan,
3,
np.nan,
np.nan,
np.nan,
],
name="a",
)
self._test_series_interpolate(pser)
pdf = pd.DataFrame(
[
(1, 0.0, np.nan),
(2, np.nan, 2.0),
(3, 2.0, 3.0),
(4, np.nan, 4.0),
(5, np.nan, 1.0),
],
columns=list("abc"),
)
self._test_dataframe_interpolate(pdf)
pdf = pd.DataFrame(
[
(0.0, np.nan, -1.0, 1.0, np.nan),
(np.nan, 2.0, np.nan, np.nan, np.nan),
(2.0, 3.0, np.nan, 9.0, np.nan),
(np.nan, 4.0, -4.0, 16.0, np.nan),
(np.nan, 1.0, np.nan, 7.0, np.nan),
],
columns=list("abcde"),
)
self._test_dataframe_interpolate(pdf)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_generic_functions import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 30.624 | 87 | 0.551463 |
cc048a4e9acac4324a3d13531f1fef898c900e42 | 1,607 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_1.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_1.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_1.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Coupon", "instances": 34, "metric_value": 0.99, "depth": 1}
if obj[3]>2:
# {"feature": "Time", "instances": 22, "metric_value": 0.976, "depth": 2}
if obj[2]<=2:
# {"feature": "Gender", "instances": 14, "metric_value": 0.7496, "depth": 3}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
# {"feature": "Age", "instances": 6, "metric_value": 1.0, "depth": 4}
if obj[6]>2:
return 'True'
elif obj[6]<=2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[2]>2:
# {"feature": "Income", "instances": 8, "metric_value": 0.8113, "depth": 3}
if obj[11]<=4:
return 'True'
elif obj[11]>4:
# {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 4}
if obj[6]>2:
return 'False'
elif obj[6]<=2:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[3]<=2:
# {"feature": "Maritalstatus", "instances": 12, "metric_value": 0.65, "depth": 2}
if obj[7]>0:
return 'True'
elif obj[7]<=0:
# {"feature": "Bar", "instances": 5, "metric_value": 0.971, "depth": 3}
if obj[12]>1.0:
return 'True'
elif obj[12]<=1.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
| 37.372093 | 347 | 0.586808 |
551ad697326bddf77c3cd210312b53d882f0e44b | 11,239 | py | Python | venv/Lib/site-packages/scipy/ndimage/fourier.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | venv/Lib/site-packages/scipy/ndimage/fourier.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | venv/Lib/site-packages/scipy/ndimage/fourier.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
'fourier_shift']
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.float64)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.complex128)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
"""
Multidimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_gaussian : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return output
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
"""
Multidimensional uniform fourier filter.
The array is multiplied with the Fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_uniform : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return output
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
"""
Multidimensional ellipsoid Fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : ndarray
The filtered input.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return output
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
"""
Multidimensional Fourier shift filter.
The array is multiplied with the Fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_shift : ndarray
The shifted input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier_complex(output, input)
axis = normalize_axis_index(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype=numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return output
| 36.728758 | 78 | 0.659489 |
63f6bc65bff5668505c4c7784831c5288a0ef1b4 | 1,696 | py | Python | extra_apps/social_core/pipeline/partial.py | kaocher82/Vue-Django-Shop-Website | 6273990a5510b72c3a3115d73e149d242049b5bc | [
"MIT"
] | 84 | 2019-02-22T08:19:52.000Z | 2022-02-08T03:36:32.000Z | Backend/extra_apps/social_core/pipeline/partial.py | GinCho-Max/Dailyfresh-B2C | 7c94e9a4428e5116c91bf27cf696e6eee430748a | [
"Apache-2.0"
] | 16 | 2019-09-06T10:25:40.000Z | 2022-02-12T06:37:41.000Z | Backend/extra_apps/social_core/pipeline/partial.py | GinCho-Max/Dailyfresh-B2C | 7c94e9a4428e5116c91bf27cf696e6eee430748a | [
"Apache-2.0"
] | 61 | 2019-03-20T02:29:23.000Z | 2021-07-09T08:14:25.000Z | from functools import wraps
from .utils import partial_prepare
def partial_step(save_to_session):
"""Wraps func to behave like a partial pipeline step, any output
that's not None or {} will be considered a response object and
will be returned to user.
The pipeline function will receive a current_partial object, it
contains the partial pipeline data and a token that is used to
identify it when it's continued, this is useful to build links
with the token.
The default value for this parameter is partial_token, but can be
overridden by SOCIAL_AUTH_PARTIAL_PIPELINE_TOKEN_NAME setting.
The token is also stored in the session under the
partial_pipeline_token key when the save_to_session parameter is True.
"""
def decorator(func):
@wraps(func)
def wrapper(strategy, backend, pipeline_index, *args, **kwargs):
current_partial = partial_prepare(strategy, backend, pipeline_index,
*args, **kwargs)
out = func(strategy=strategy,
backend=backend,
pipeline_index=pipeline_index,
current_partial=current_partial,
*args, **kwargs) or {}
if not isinstance(out, dict):
strategy.storage.partial.store(current_partial)
if save_to_session:
strategy.session_set('partial_pipeline_token', current_partial.token)
return out
return wrapper
return decorator
# Backward compatible partial decorator, that stores the token in the session
partial = partial_step(save_to_session=True)
| 35.333333 | 89 | 0.65684 |
b0a7f7d301f5030d331ec5fac440ae621c86e90f | 353 | py | Python | tools/train.py | Kingzerd/siamfc_pytorch | fd1dbeb12dd7e2b9190876a1de7ea4b71a7a1166 | [
"MIT"
] | 1 | 2019-11-17T05:12:58.000Z | 2019-11-17T05:12:58.000Z | tools/train.py | Kingzerd/siamfc_pytorch | fd1dbeb12dd7e2b9190876a1de7ea4b71a7a1166 | [
"MIT"
] | null | null | null | tools/train.py | Kingzerd/siamfc_pytorch | fd1dbeb12dd7e2b9190876a1de7ea4b71a7a1166 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import os
from got10k.datasets import *
from siamfc import TrackerSiamFC
if __name__ == '__main__':
# root_dir = os.path.abspath('~/data/GOT-10k')
root_dir = 'H:/datasets/GOT-10k'
seqs = GOT10k(root_dir, subset='train', return_meta=True)
tracker = TrackerSiamFC()
tracker.train_over(seqs)
| 22.0625 | 61 | 0.716714 |
704ff29783736a31c052cf80753f1a68d47141c9 | 3,865 | py | Python | venv/lib/python3.8/site-packages/hypothesis/strategies/_internal/recursive.py | fmfrancisco/chapters | 34acc93e7a41490fe3c856e16927e50fdc370dee | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/hypothesis/strategies/_internal/recursive.py | fmfrancisco/chapters | 34acc93e7a41490fe3c856e16927e50fdc370dee | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/hypothesis/strategies/_internal/recursive.py | fmfrancisco/chapters | 34acc93e7a41490fe3c856e16927e50fdc370dee | [
"MIT"
] | null | null | null | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2021 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
from contextlib import contextmanager
from hypothesis.errors import InvalidArgument
from hypothesis.internal.lazyformat import lazyformat
from hypothesis.internal.reflection import get_pretty_function_description
from hypothesis.strategies._internal.strategies import OneOfStrategy, SearchStrategy
class LimitReached(BaseException):
pass
class LimitedStrategy(SearchStrategy):
def __init__(self, strategy):
super().__init__()
self.base_strategy = strategy
self.marker = 0
self.currently_capped = False
def __repr__(self):
return "LimitedStrategy(%r)" % (self.base_strategy,)
def do_validate(self):
self.base_strategy.validate()
def do_draw(self, data):
assert self.currently_capped
if self.marker <= 0:
raise LimitReached()
self.marker -= 1
return data.draw(self.base_strategy)
@contextmanager
def capped(self, max_templates):
assert not self.currently_capped
try:
self.currently_capped = True
self.marker = max_templates
yield
finally:
self.currently_capped = False
class RecursiveStrategy(SearchStrategy):
def __init__(self, base, extend, max_leaves):
self.max_leaves = max_leaves
self.base = base
self.limited_base = LimitedStrategy(base)
self.extend = extend
strategies = [self.limited_base, self.extend(self.limited_base)]
while 2 ** (len(strategies) - 1) <= max_leaves:
strategies.append(extend(OneOfStrategy(tuple(strategies))))
self.strategy = OneOfStrategy(strategies)
def __repr__(self):
if not hasattr(self, "_cached_repr"):
self._cached_repr = "recursive(%r, %s, max_leaves=%d)" % (
self.base,
get_pretty_function_description(self.extend),
self.max_leaves,
)
return self._cached_repr
def do_validate(self):
if not isinstance(self.base, SearchStrategy):
raise InvalidArgument(
"Expected base to be SearchStrategy but got %r" % (self.base,)
)
extended = self.extend(self.limited_base)
if not isinstance(extended, SearchStrategy):
raise InvalidArgument(
"Expected extend(%r) to be a SearchStrategy but got %r"
% (self.limited_base, extended)
)
self.limited_base.validate()
self.extend(self.limited_base).validate()
def do_draw(self, data):
count = 0
while True:
try:
with self.limited_base.capped(self.max_leaves):
return data.draw(self.strategy)
except LimitReached:
# Workaround for possible coverage bug - this branch is definitely
# covered but for some reason is showing up as not covered.
if count == 0: # pragma: no branch
data.note_event(
lazyformat(
"Draw for %r exceeded max_leaves and had to be retried",
self,
)
)
count += 1
| 34.81982 | 84 | 0.623027 |
831d9c94befd0221675dfede1756cc4fc32d0cca | 3,387 | py | Python | src/simmer/drivers.py | holdengill/SImMER | 7608a9cb044f2827f43f2d0c177e17faf8ff7720 | [
"MIT"
] | null | null | null | src/simmer/drivers.py | holdengill/SImMER | 7608a9cb044f2827f43f2d0c177e17faf8ff7720 | [
"MIT"
] | 25 | 2021-01-21T06:51:58.000Z | 2022-03-28T21:07:30.000Z | src/simmer/drivers.py | holdengill/SImMER | 7608a9cb044f2827f43f2d0c177e17faf8ff7720 | [
"MIT"
] | null | null | null | """
Module for driving reduction processes. Contains highest-level API.
"""
from glob import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
from . import darks, flats, image
from . import plotting as pl
from . import search_headers as search
from . import sky
def all_driver(
inst, config_file, raw_dir, reddir, plotting_yml=None, searchsize=10
):
"""
Runs all drivers, performing an end-to-end reduction.
Inputs:
:inst: (Instrument object) instrument for which data is being reduced.
:config_file: (string) path of the config file containing plotting
specifications. Optional.
:raw_dir: (string) path of the directory containing the raw data.
:reddir: (string) path of the directory to contain the raw data.
:plotting_yml: (string) path to the plotting configuration file.
"""
# obtain file list from config file
config = pd.read_csv(config_file)
config.Object = config.Object.astype(str)
if inst.name == "ShARCS":
search.search_headers(raw_dir)
if plotting_yml:
pl.initialize_plotting(plotting_yml)
darks.dark_driver(raw_dir, reddir, config, inst)
flats.flat_driver(raw_dir, reddir, config, inst)
sky.sky_driver(raw_dir, reddir, config, inst)
methods = image.image_driver(raw_dir, reddir, config, inst)
star_dirlist = glob(reddir + "*/")
# we want to ensure that the code doesn't attempt to reduce folders
# that are in the reduced directory but not in the config
cleaned_star_dirlist = [
star_dir
for star_dir in star_dirlist
for ob in config.Object
if ob in star_dir
]
for i, s_dir in enumerate(
tqdm(
np.unique(cleaned_star_dirlist),
desc="Running registration",
position=0,
leave=True,
)
):
image.create_im(s_dir, searchsize, method=methods[i])
def config_driver(inst, config_file, raw_dir, reddir):
"""
Runs all_drivers, terminating afrer running sky_driver.
Inputs:
:inst: (Instrument object) instrument for which data is being reduced.
:config_file: (string) path of the config file.
:raw_dir: (string) path of the directory containing the raw data.
:reddir: (string) path of the directory to contain the raw data.
"""
# get file list from config file
config = pd.read_csv(config_file)
config.Object = config.Object.astype(str)
darks.dark_driver(raw_dir, reddir, config, inst)
flats.flat_driver(raw_dir, reddir, config, inst)
sky.sky_driver(raw_dir, reddir, config, inst)
def image_driver(inst, config_file, raw_dir, reddir):
"""
Runs all_drivers, terminating afrer running image_drivers.
Inputs:
:inst: (Instrument object) instrument for which data is being reduced.
:config_file: (string) path of the config file.
:raw_dir: (string) path of the directory containing the raw data.
:reddir: (string) path of the directory to contain the raw data.
"""
# get file list from config file
config = pd.read_csv(config_file)
config.Object = config.Object.astype(str)
image.image_driver(raw_dir, reddir, config, inst)
# Now do registration
star_dirlist = glob(reddir + "*/")
for s_dir in star_dirlist:
image.create_im(s_dir, 10)
| 30.241071 | 78 | 0.675229 |
44114c66c293094c47af36383993bc14f8ceebf5 | 1,210 | py | Python | wikipedia_scraper.py | rodrigogomesrc/CrawlerStats | b0c5d27369127580bbbd3c868907b1431a501564 | [
"MIT"
] | null | null | null | wikipedia_scraper.py | rodrigogomesrc/CrawlerStats | b0c5d27369127580bbbd3c868907b1431a501564 | [
"MIT"
] | null | null | null | wikipedia_scraper.py | rodrigogomesrc/CrawlerStats | b0c5d27369127580bbbd3c868907b1431a501564 | [
"MIT"
] | null | null | null | from config import *
from bs4 import BeautifulSoup
import requests
from FTDHandler import ftdhandler as txt
import time
def extract_page_text(content):
soup = BeautifulSoup(content, 'html.parser')
paragraphs_list = soup.find_all("p")
page_text = " "
for paragraph in paragraphs_list:
page_text += paragraph.get_text()
return page_text
def scrape():
links = txt.ftdhandler.raw_lines(LINKS_FILENAME, linebreaks=False)
links_quantity = len(links)
requested_links = SCRAPING_START
print("Extracting pages from the links...")
for n in range(SCRAPING_START, links_quantity):
time.sleep(DELAY)
current_link = links[n]
try:
response = requests.get(current_link)
if response.status_code == 200:
requested_links += 1
print("Scraping %d of %d : %s" %(requested_links, links_quantity, current_link))
text = extract_page_text(response.content)
if (requested_links >= LINKS_ACCESS_LIMIT) and LINKS_ACCESS_LIMIT != -1:
print("Link access limit reached")
break
with open(TEXT_FILENAME, 'a') as file:
file.write(text)
else:
print("Request Error: %d" %response.status_code)
except:
continue
if __name__ == "__main__":
scrape()
| 18.90625 | 84 | 0.715702 |
814f9bb8b748e2a9ffac0a140d188d94c5e2da68 | 3,520 | py | Python | code.py | shadow09rj/olympic-hero | 652f9f88241e3320cdeb16abee18af75d4aef3e7 | [
"MIT"
] | null | null | null | code.py | shadow09rj/olympic-hero | 652f9f88241e3320cdeb16abee18af75d4aef3e7 | [
"MIT"
] | null | null | null | code.py | shadow09rj/olympic-hero | 652f9f88241e3320cdeb16abee18af75d4aef3e7 | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data= pd.read_csv(path)
data.rename(columns = {'Total':'Total_Medals'},inplace=True)
print(data.head(10))
#Code starts here
# --------------
#Code starts here0
#Code starts here0
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'] ,
'Summer','Winter' )
data['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'] ,
'Both',data['Better_Event'] )
better_event = data['Better_Event'].value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter',
'Total_Medals']]
top_countries.drop(data.index[-1],inplace=True)
def top_ten(top_countries,column):
country_list = []
countries = top_countries.nlargest(10,column)
country_list = countries['Country_Name'].tolist()
return country_list
top_10_summer =top_ten(top_countries,'Total_Summer')
top_10_winter =top_ten(top_countries,'Total_Winter')
top_10 =top_ten(top_countries,'Total_Medals')
common = [i for i in top_10_summer for j in top_10_winter for k in top_10 if i==j==k]
print(common)
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
#fig, (ax_1,ax_2,ax_3) = plt.subplot(1,3, figsize = (20,10))
#plot 1
summer_df.plot('Country_Name','Total_Summer',kind='bar',color='r')
plt.xlabel("Countries")
plt.xticks(rotation=45)
plt.title('Medal counts for summer top 10 teams')
plt.show()
#plot 2
winter_df.plot('Country_Name','Total_Winter',kind='bar',color='b')
plt.xlabel("Countries")
plt.xticks(rotation=45)
plt.title('Medal counts for winter top 10 teams')
plt.show()
#plot 3
top_df.plot('Country_Name','Total_Medals',kind='bar',color='g')
plt.xlabel("Countries")
plt.xticks(rotation=45)
plt.title('Medal counts for all over top 10 teams')
plt.show()
# --------------
#Code starts here
#summer max gold
summer_df['Golden_Ratio'] = summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio = summer_df['Golden_Ratio'].max()
summer_country_gold = summer_df[summer_df['Golden_Ratio'] == summer_max_ratio]['Country_Name'].to_string(index=False)
#winter max gold
winter_df['Golden_Ratio'] = winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio = winter_df['Golden_Ratio'].max()
winter_country_gold =winter_df[winter_df['Golden_Ratio'] == winter_max_ratio]['Country_Name'].to_string(index=False)
#top max gold
top_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio = top_df['Golden_Ratio'].max()
top_country_gold = top_df[top_df['Golden_Ratio'] == top_max_ratio]['Country_Name'].to_string(index=False)
# --------------
#Code starts here
data_1 = data.drop(data.index[-1])
data_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total'] + 1*data_1['Bronze_Total']
most_points = data_1['Total_Points'].max()
best_country = data_1[data_1['Total_Points'] == most_points]['Country_Name'].to_string(index=False)
# --------------
#Code starts here
best = data[data['Country_Name'] == best_country]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot(kind='bar',stacked=True)
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
plt.show()
| 30.344828 | 118 | 0.705398 |
e501b2f73286439b86125483113001b774dab3d4 | 535 | py | Python | examples/sanic/example.py | Jie-Yuan/1_DataMining | f5338388b4f883233f350d4fb9c5903180883430 | [
"Apache-2.0"
] | 14 | 2019-06-25T13:46:32.000Z | 2020-10-27T02:04:59.000Z | examples/sanic/example.py | Jie-Yuan/2_DataMining | f5338388b4f883233f350d4fb9c5903180883430 | [
"Apache-2.0"
] | null | null | null | examples/sanic/example.py | Jie-Yuan/2_DataMining | f5338388b4f883233f350d4fb9c5903180883430 | [
"Apache-2.0"
] | 7 | 2019-06-25T13:26:16.000Z | 2020-10-27T02:05:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : example
# @Time : 2019-09-12 11:37
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
import os
from sanic import Sanic, response
from sanic.response import html, json, redirect, text, raw, file, file_stream
app = Sanic()
@app.route('/get')
async def get_test(request):
title = request.args.get('title')
return response.json([{'model_name': title}])
app.run()
| 19.814815 | 77 | 0.616822 |
5f01359889fa890604453dd36663a1e64ba7ed72 | 188 | py | Python | src/OTLMOW/ModelGenerator/OSLODatatypeUnion.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/ModelGenerator/OSLODatatypeUnion.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/ModelGenerator/OSLODatatypeUnion.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | import dataclasses
@dataclasses.dataclass
class OSLODatatypeUnion:
name: str
objectUri: str
definition: str
label: str
usagenote: str
deprecated_version: str
| 11.75 | 27 | 0.702128 |
415eaffb93a7bacc3ddd15d91d551ea06562cef3 | 8,349 | py | Python | pyleecan/GUI/Dialog/DMachineSetup/SMHoleMag/PHoleM58/PHoleM58.py | mxgnsr/pyleecan | 2b0a04e4ae67c073a91362ab42332908fef53bdd | [
"Apache-2.0"
] | null | null | null | pyleecan/GUI/Dialog/DMachineSetup/SMHoleMag/PHoleM58/PHoleM58.py | mxgnsr/pyleecan | 2b0a04e4ae67c073a91362ab42332908fef53bdd | [
"Apache-2.0"
] | null | null | null | pyleecan/GUI/Dialog/DMachineSetup/SMHoleMag/PHoleM58/PHoleM58.py | mxgnsr/pyleecan | 2b0a04e4ae67c073a91362ab42332908fef53bdd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from numpy import pi
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget
from ......Classes.HoleM58 import HoleM58
from ......GUI import gui_option
from ......GUI.Dialog.DMachineSetup.SMHoleMag.PHoleM58.Gen_PHoleM58 import Gen_PHoleM58
from ......GUI.Dialog.DMatLib.MatLib import MatLib
from ......Methods.Slot.Slot.check import SlotCheckError
class PHoleM58(Gen_PHoleM58, QWidget):
"""Page to set the Hole Type 58"""
# Signal to DMachineSetup to know that the save popup is needed
saveNeeded = pyqtSignal()
# Information for WHoleMag
hole_name = "Slot Type 58"
hole_type = HoleM58
def __init__(self, hole=None, matlib=MatLib()):
"""Initialize the widget according to hole
Parameters
----------
self : PHoleM58
A PHoleM58 widget
hole : HoleM58
current hole to edit
matlib : MatLib
Material Library
"""
# Build the interface according to the .ui file
QWidget.__init__(self)
self.setupUi(self)
self.matlib = matlib
self.hole = hole
# Set FloatEdit unit
self.lf_W0.unit = "m"
self.lf_W1.unit = "m"
self.lf_W2.unit = "m"
self.lf_W3.unit = "rad"
self.lf_R0.unit = "m"
self.lf_H0.unit = "m"
self.lf_H1.unit = "m"
self.lf_H2.unit = "m"
# Set default materials
self.w_mat_0.setText("mat_void:")
self.w_mat_0.def_mat = "Air"
self.w_mat_1.setText("magnet_0:")
self.w_mat_1.def_mat = "Magnet1"
# Adapt GUI with/without magnet
if hole.magnet_0 is None: # SyRM
self.img_slot.setPixmap(
QPixmap(":/images/images/MachineSetup/WSlot/Slot_58_no_mag.PNG")
)
self.W1 = 0
self.W2 = 0
self.w_mat_0.update(self.hole, "mat_void", self.matlib)
self.w_mat_1.hide()
else:
# Set current material
self.w_mat_0.update(self.hole, "mat_void", self.matlib)
self.w_mat_1.update(self.hole.magnet_0, "mat_type", self.matlib)
# Set unit name (m ou mm)
self.u = gui_option.unit
wid_list = [
self.unit_W0,
self.unit_W1,
self.unit_W2,
self.unit_R0,
self.unit_H0,
self.unit_H1,
self.unit_H2,
]
for wid in wid_list:
wid.setText(self.u.get_m_name())
# Fill the fields with the machine values (if they're filled)
self.lf_W0.setValue(self.hole.W0)
self.lf_W1.setValue(self.hole.W1)
self.lf_W2.setValue(self.hole.W2)
self.lf_W3.setValue(self.hole.W3)
self.lf_R0.setValue(self.hole.R0)
self.lf_H0.setValue(self.hole.H0)
self.lf_H1.setValue(self.hole.H1)
self.lf_H2.setValue(self.hole.H2)
# Display the main output of the hole (surface, height...)
self.comp_output()
# Connect the signal
self.lf_W0.editingFinished.connect(self.set_W0)
self.lf_W1.editingFinished.connect(self.set_W1)
self.lf_W2.editingFinished.connect(self.set_W2)
self.lf_W3.editingFinished.connect(self.set_W3)
self.lf_R0.editingFinished.connect(self.set_R0)
self.lf_H0.editingFinished.connect(self.set_H0)
self.lf_H1.editingFinished.connect(self.set_H1)
self.lf_H2.editingFinished.connect(self.set_H2)
self.w_mat_0.saveNeeded.connect(self.emit_save)
self.w_mat_1.saveNeeded.connect(self.emit_save)
def set_W0(self):
"""Signal to update the value of W0 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.W0 = self.lf_W0.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_W1(self):
"""Signal to update the value of W1 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.W1 = self.lf_W1.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_W2(self):
"""Signal to update the value of W2 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.W2 = self.lf_W2.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_W3(self):
"""Signal to update the value of W3 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.W3 = self.lf_W3.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_R0(self):
"""Signal to update the value of R0 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.R0 = self.lf_R0.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_H0(self):
"""Signal to update the value of H0 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.H0 = self.lf_H0.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_H1(self):
"""Signal to update the value of H1 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.H1 = self.lf_H1.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_H2(self):
"""Signal to update the value of H2 according to the line edit
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
self.hole.H2 = self.lf_H2.value()
self.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def comp_output(self):
"""Compute and display the hole output
Parameters
----------
self : PHoleM58
A PHoleM58 widget
"""
is_set = False
if self.check() is None:
try:
# We compute the output only if the hole is correctly set
# Compute all the needed output as string
s_surf = format(self.u.get_m2(self.hole.comp_surface()), ".4g")
m_surf = format(self.u.get_m2(self.hole.comp_surface_magnets()), ".4g")
# Update the GUI to display the Output
self.out_slot_surface.setText(
"Hole suface: " + s_surf + " " + self.u.get_m2_name()
)
self.out_magnet_surface.setText(
"Magnet surface: " + m_surf + " " + self.u.get_m2_name()
)
is_set = True
except:
pass
if not is_set:
# We can't compute the output => We erase the previous version
# (that way the user know that something is wrong)
self.out_slot_surface.setText("Hole suface: ?")
self.out_magnet_surface.setText("Magnet surface: ?")
def check(self):
"""Check that the current machine have all the needed field set
Parameters
----------
self : PHoleM58
A PHoleM58 widget
Returns
-------
error : str
Error message (return None if no error)
"""
# Constraints and None
try:
self.hole.check()
except SlotCheckError as error:
return str(error)
def emit_save(self):
"""Send a saveNeeded signal to the DMachineSetup"""
self.saveNeeded.emit()
| 30.694853 | 87 | 0.568811 |
1d93ae0de7b18e1f2d775ed3725d8d28dce97946 | 3,114 | py | Python | gnupg_mails/message.py | jandd/django-gnupg-mails | 15e6fceeacc3d7c6d9e3bcbce41db65e0f9f7ce0 | [
"MIT"
] | 4 | 2015-03-16T13:03:11.000Z | 2020-04-27T16:19:09.000Z | gnupg_mails/message.py | jandd/django-gnupg-mails | 15e6fceeacc3d7c6d9e3bcbce41db65e0f9f7ce0 | [
"MIT"
] | 1 | 2019-05-16T17:38:46.000Z | 2020-02-25T20:39:27.000Z | gnupg_mails/message.py | jandd/django-gnupg-mails | 15e6fceeacc3d7c6d9e3bcbce41db65e0f9f7ce0 | [
"MIT"
] | null | null | null | from email.charset import Charset, QP
from email.encoders import encode_noop
from email.mime.application import MIMEApplication
from email.mime.nonmultipart import MIMENonMultipart
from email.utils import formatdate
from django.conf import settings
from django.core.mail import EmailMessage
from django.core.mail import SafeMIMEMultipart
from django.core.mail.message import MIMEMixin
from django.core.mail.message import make_msgid
from gnupg import GPG
class MIMEUTF8QPText(MIMEMixin, MIMENonMultipart):
def __init__(self, payload, charset="utf-8"):
MIMENonMultipart.__init__(self, "text", "plain", charset=charset)
utf8qp = Charset(charset)
utf8qp.body_encoding = QP
self.set_payload(payload, charset=utf8qp)
class GnuPGMessage(EmailMessage):
def __init__(self, *args, **kwargs):
super(GnuPGMessage, self).__init__(*args, **kwargs)
self.gpg = GPG(gnupghome=settings.GNUPG_HOMEDIR)
def _sign(self, original_bytes):
sig = self.gpg.sign(original_bytes, detach=True, clearsign=False)
signature = MIMEApplication(
str(sig), "pgp-signature", encode_noop, name="signature.asc"
)
signature.add_header("Content-Description", "Digital signature")
del signature["MIME-Version"]
return signature
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = MIMEUTF8QPText(self.body, encoding)
msg = self._create_message(msg)
del msg["MIME-Version"]
wrapper = SafeMIMEMultipart(
"signed", protocol="application/pgp-signature", micalg="pgp-sha512"
)
wrapper.preamble = "This is an OpenPGP/MIME signed message (RFC 4880 and 3156)"
# copy headers from original message to PGP/MIME envelope
for header in msg.keys():
if header.lower() not in (
"content-disposition",
"content-type",
"mime-version",
):
for value in msg.get_all(header):
wrapper.add_header(header, value)
del msg[header]
wrapper["Subject"] = self.subject
wrapper["From"] = self.extra_headers.get("From", self.from_email)
wrapper["To"] = self.extra_headers.get("To", ", ".join(self.to))
if self.cc:
wrapper["Cc"] = ", ".join(self.cc)
header_names = [key.lower() for key in self.extra_headers]
if "date" not in header_names:
wrapper["Date"] = formatdate()
if "message-id" not in header_names:
wrapper["Message-ID"] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ("from", "to"):
# From and To are already handled
continue
wrapper[name] = value
for part in msg.walk():
del part["MIME-Version"]
signature = self._sign(msg.as_bytes(linesep="\r\n"))
wrapper["Content-Disposition"] = "inline"
wrapper.attach(msg)
wrapper.attach(signature)
return wrapper
| 34.988764 | 87 | 0.632627 |
02fb73dad8796f75103022ec87a70f2fc021a301 | 1,388 | py | Python | tests/test_profile.py | Jikol/terracotta | d7e5b615f52cb856d2d12039e95b1de837603be6 | [
"MIT"
] | 1 | 2022-01-23T14:52:48.000Z | 2022-01-23T14:52:48.000Z | tests/test_profile.py | fitriandriastuti/terracotta | 04f1019ee47c565dbf068ee2a6d49f05080da698 | [
"MIT"
] | null | null | null | tests/test_profile.py | fitriandriastuti/terracotta | 04f1019ee47c565dbf068ee2a6d49f05080da698 | [
"MIT"
] | 1 | 2022-01-04T22:14:22.000Z | 2022-01-04T22:14:22.000Z | import pytest
import time
from moto import mock_xray_client, XRaySegment
def test_xray_tracing(caplog):
@mock_xray_client
def run_test():
from terracotta import update_settings
import terracotta.profile
update_settings(XRAY_PROFILE=True)
@terracotta.profile.trace('dummy')
def func_to_trace():
time.sleep(0.1)
with XRaySegment():
func_to_trace()
with XRaySegment():
with terracotta.profile.trace('dummy2'):
time.sleep(0.1)
for record in caplog.records:
assert record.levelname != 'ERROR'
# sanity check, recording without starting a segment should fail
func_to_trace()
assert any('cannot find the current segment' in rec.message for rec in caplog.records)
run_test()
def test_xray_exception(caplog):
@mock_xray_client
def run_test():
from terracotta import update_settings
import terracotta.profile
update_settings(XRAY_PROFILE=True)
with XRaySegment():
with pytest.raises(NotImplementedError):
with terracotta.profile.trace('dummy') as subsegment:
raise NotImplementedError('foo')
assert len(subsegment.cause['exceptions']) == 1
assert subsegment.cause['exceptions'][0].message == 'foo'
run_test()
| 25.703704 | 94 | 0.639049 |
e18166ecbf88e51292aec27fc43c595b6249a633 | 4,836 | py | Python | cloudmersive_convert_api_client/models/get_docx_pages_response.py | Cloudmersive/Cloudmersive.APIClient.Python.Convert | dba2fe7257229ebdacd266531b3724552c651009 | [
"Apache-2.0"
] | 3 | 2018-07-25T23:04:34.000Z | 2021-08-10T16:43:10.000Z | cloudmersive_convert_api_client/models/get_docx_pages_response.py | Cloudmersive/Cloudmersive.APIClient.Python.Convert | dba2fe7257229ebdacd266531b3724552c651009 | [
"Apache-2.0"
] | 3 | 2020-11-23T10:46:48.000Z | 2021-12-30T14:09:34.000Z | cloudmersive_convert_api_client/models/get_docx_pages_response.py | Cloudmersive/Cloudmersive.APIClient.Python.Convert | dba2fe7257229ebdacd266531b3724552c651009 | [
"Apache-2.0"
] | 2 | 2020-01-07T09:48:01.000Z | 2020-11-23T10:47:00.000Z | # coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetDocxPagesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'successful': 'bool',
'pages': 'list[DocxPage]',
'page_count': 'int'
}
attribute_map = {
'successful': 'Successful',
'pages': 'Pages',
'page_count': 'PageCount'
}
def __init__(self, successful=None, pages=None, page_count=None): # noqa: E501
"""GetDocxPagesResponse - a model defined in Swagger""" # noqa: E501
self._successful = None
self._pages = None
self._page_count = None
self.discriminator = None
if successful is not None:
self.successful = successful
if pages is not None:
self.pages = pages
if page_count is not None:
self.page_count = page_count
@property
def successful(self):
"""Gets the successful of this GetDocxPagesResponse. # noqa: E501
True if successful, false otherwise # noqa: E501
:return: The successful of this GetDocxPagesResponse. # noqa: E501
:rtype: bool
"""
return self._successful
@successful.setter
def successful(self, successful):
"""Sets the successful of this GetDocxPagesResponse.
True if successful, false otherwise # noqa: E501
:param successful: The successful of this GetDocxPagesResponse. # noqa: E501
:type: bool
"""
self._successful = successful
@property
def pages(self):
"""Gets the pages of this GetDocxPagesResponse. # noqa: E501
Pages in the document # noqa: E501
:return: The pages of this GetDocxPagesResponse. # noqa: E501
:rtype: list[DocxPage]
"""
return self._pages
@pages.setter
def pages(self, pages):
"""Sets the pages of this GetDocxPagesResponse.
Pages in the document # noqa: E501
:param pages: The pages of this GetDocxPagesResponse. # noqa: E501
:type: list[DocxPage]
"""
self._pages = pages
@property
def page_count(self):
"""Gets the page_count of this GetDocxPagesResponse. # noqa: E501
Count of pages # noqa: E501
:return: The page_count of this GetDocxPagesResponse. # noqa: E501
:rtype: int
"""
return self._page_count
@page_count.setter
def page_count(self, page_count):
"""Sets the page_count of this GetDocxPagesResponse.
Count of pages # noqa: E501
:param page_count: The page_count of this GetDocxPagesResponse. # noqa: E501
:type: int
"""
self._page_count = page_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetDocxPagesResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetDocxPagesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.793103 | 85 | 0.577957 |
57307fd7d390d9b7b456d025281c9c16a2b3f46b | 3,108 | py | Python | uncertainty_wizard/internal_utils/tf_version_resolver.py | p1ndsvin/uncertainty-wizard | 92a7a9bcb411e512cf6ad54e7ba226a3c66d3583 | [
"MIT"
] | 33 | 2020-12-21T20:21:21.000Z | 2022-03-25T17:51:36.000Z | uncertainty_wizard/internal_utils/tf_version_resolver.py | swb19/uncertainty-wizard | 5ba9bfc6ee967eb5f226abbedb6f9d5452b3cfea | [
"MIT"
] | 83 | 2020-12-18T18:18:28.000Z | 2022-03-28T21:17:29.000Z | uncertainty_wizard/internal_utils/tf_version_resolver.py | swb19/uncertainty-wizard | 5ba9bfc6ee967eb5f226abbedb6f9d5452b3cfea | [
"MIT"
] | 5 | 2021-02-13T13:27:48.000Z | 2021-12-25T16:45:19.000Z | import warnings
from typing import Union
import tensorflow as tf
def _compare_expected_to_current_tf_version(expected_version) -> Union[None, int]:
"""
Compares the 'x.y.z' version parts of a passed expected version and the actual tensorflow version.
The result is negative if the expected version is newer, positive if the expected version is older
and 0 if they are the same.
If one of the versions cannot be parsed, a warning is triggered and 'None' is returned.
:param expected_version:
:return: an int if a comparison was made and None if parsing was impossible
"""
actual_version = tf.version.VERSION
# Note: We're currently only considering versions in the format 'x.y.z'
# (i.e., ignore RCs and multi digit versions - which is probably fine given tfs very fast major release cycles)
# Inspection disabling reason: We really want to catch all exceptions.
# noinspection PyBroadException
try:
expected_v_splits = [int(v) for v in expected_version[:5].split(".")]
actual_v_splits = [int(v) for v in actual_version[:5].split(".")]
except Exception:
warnings.warn(
f"One of the version strings '{expected_version}' (requested) "
f"or '{actual_version}' was not parsed: "
f"We are trying to use a suitable guess about your tf compatibility and thus,"
f"you may not actually note any problems."
f"However, to be safe, please report this issue (with this warning) "
f"to the uncertainty wizard maintainers.",
RuntimeWarning,
)
return None
for act, expected in zip(actual_v_splits, expected_v_splits):
if expected > act:
return 1
elif expected < act:
return -1
# Version equality
return 0
def current_tf_version_is_older_than(version: str, fallback: Union[bool, None] = True):
"""
A method to check whether the loaded tensorflow version is older than a passed version.
:param version: A tensorflow version string, e.g. '2.3.0'
:param fallback: If a problem occurs during parsing, the value of fallback will be returned
:return: True if the used tensorflow version is older than the version specified in the passed string
"""
comp = _compare_expected_to_current_tf_version(version)
if comp is None:
return fallback
elif comp > 0:
return True
else:
return False
def current_tf_version_is_newer_than(version: str, fallback: Union[bool, None] = False):
"""
A method to check whether the loaded tensorflow version is younger than a passed version.
:param version: A tensorflow version string, e.g. '2.3.0'
:param fallback: If a problem occurs during parsing, the value of fallback will be returned
:return: True if the used tensorflow version is newer than the version specified in the passed string
"""
comp = _compare_expected_to_current_tf_version(version)
if comp is None:
return fallback
elif comp >= 0:
return False
else:
return True
| 40.363636 | 115 | 0.685972 |
d432e415ba500b10d8274254c15bc725d047bf2c | 14,789 | py | Python | smart_device_client/smart_device_client.py | buckley-w-david/smart-device-client | 3ea8ea190466794300a1956557445800b26c74b8 | [
"MIT"
] | null | null | null | smart_device_client/smart_device_client.py | buckley-w-david/smart-device-client | 3ea8ea190466794300a1956557445800b26c74b8 | [
"MIT"
] | null | null | null | smart_device_client/smart_device_client.py | buckley-w-david/smart-device-client | 3ea8ea190466794300a1956557445800b26c74b8 | [
"MIT"
] | null | null | null | from enum import IntEnum
import json
import logging
import re
import socket
from typing import Tuple, Optional, Dict
import zmq
RESP_PATTERN = re.compile(r"calibre wireless device client \(on (.+)\);(\d+),(\d+)")
CALIBRE_MESSAGE = re.compile(r"^(?P<length>\d+)(?P<message>.*)")
MAGIC_PATH_LENGTH = 37
Port = int
Address = str
Host = Tuple[Address, Port]
CalibrePayload = Dict
# mapping copied directly from src/calibre/devices/smart_device_app/driver.py in calibre repo
class SmartDeviceOpcode(IntEnum):
NOOP = 12
OK = 0
BOOK_DONE = 11
CALIBRE_BUSY = 18
SET_LIBRARY_INFO = 19
DELETE_BOOK = 13
DISPLAY_MESSAGE = 17
ERROR = 20
FREE_SPACE = 5
GET_BOOK_FILE_SEGMENT = 14
GET_BOOK_METADATA = 15
GET_BOOK_COUNT = 6
GET_DEVICE_INFORMATION = 3
GET_INITIALIZATION_INFO = 9
SEND_BOOKLISTS = 7
SEND_BOOK = 8
SEND_BOOK_METADATA = 16
SET_CALIBRE_DEVICE_INFO = 1
SET_CALIBRE_DEVICE_NAME = 2
TOTAL_SPACE = 4
ResponsePayload = Tuple[SmartDeviceOpcode, Dict]
CLIENT_NAME = "smart-device-client"
DEVICE = "Kobo"
DEVICE_NAME = f"{CLIENT_NAME} ({DEVICE})"
VERSION = "0.1.0"
VALID_EXTENSIONS = ["epub"]
logger = logging.getLogger(__name__)
# TODO: Configuration
# appName
# deviceKind
# deviceName (handled by the last two)
# version
# extensions
class SmartDeviceClient:
COMPANION_LOCAL_PORT = 8134
def __init__(self, calibre_host: Host, replied_port: Port):
self.calibre_host = calibre_host
self.replied_port = replied_port
self.implementation = {
SmartDeviceOpcode.NOOP: self.on_noop,
SmartDeviceOpcode.OK: self.on_ok,
SmartDeviceOpcode.BOOK_DONE: self.on_book_done,
SmartDeviceOpcode.CALIBRE_BUSY: self.on_calibre_busy,
SmartDeviceOpcode.SET_LIBRARY_INFO: self.on_set_library_info,
SmartDeviceOpcode.DELETE_BOOK: self.on_delete_book,
SmartDeviceOpcode.DISPLAY_MESSAGE: self.on_display_message,
SmartDeviceOpcode.ERROR: self.on_error,
SmartDeviceOpcode.FREE_SPACE: self.on_free_space,
SmartDeviceOpcode.GET_BOOK_FILE_SEGMENT: self.on_get_book_file_segment,
SmartDeviceOpcode.GET_BOOK_METADATA: self.on_get_book_metadata,
SmartDeviceOpcode.GET_BOOK_COUNT: self.on_get_book_count,
SmartDeviceOpcode.GET_DEVICE_INFORMATION: self.on_get_device_information,
SmartDeviceOpcode.GET_INITIALIZATION_INFO: self.on_get_initialization_info,
SmartDeviceOpcode.SEND_BOOKLISTS: self.on_send_booklists,
SmartDeviceOpcode.SEND_BOOK: self.on_send_book,
SmartDeviceOpcode.SEND_BOOK_METADATA: self.on_send_book_metadata,
SmartDeviceOpcode.SET_CALIBRE_DEVICE_INFO: self.on_set_calibre_device_info,
SmartDeviceOpcode.SET_CALIBRE_DEVICE_NAME: self.on_set_calibre_device_name,
SmartDeviceOpcode.TOTAL_SPACE: self.on_total_space,
}
def on_unimplemented(
self, opcode: SmartDeviceOpcode, payload: CalibrePayload
) -> None:
logger.warning("received %s, which is is unimplemented. %s", opcode, payload)
def on_noop(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("NOOP: %s", payload)
# This method shows a problem with the client structure, how does this method communicate intention to the serve method?
# such as needing to disconnect
# We can probably open a second command socket and send commands to it or something but that's janky
# might be our best bet though
# This is what I get for trying to make things nice for consumers and hiding the complexity
# calibre wants to close the socket, time to disconnect
if 'ejecting' in payload:
# self.disconnected_by_server = true
# self:disconnect()
return (SmartDeviceOpcode.OK, {})
# calibre announces the count of books that need more metadata
elif 'count' in payload:
return
# calibre requests more metadata for a book by its index
elif 'priKey' in payload:
# TODO
# local book = CalibreMetadata:getBookMetadata(arg.priKey)
# logger.dbg(string.format("sending book metadata %d/%d", self.current, self.pending))
# self:sendJsonData('OK', book)
# if self.current == self.pending then
# self.current = nil
# self.pending = nil
# return
# end
# self.current = self.current + 1
# return
return (SmartDeviceOpcode.OK, {})
# keep-alive NOOP
return (SmartDeviceOpcode.OK, {})
def on_ok(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("OK: %s", payload)
def on_book_done(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("BOOK_DONE: %s", payload)
def on_calibre_busy(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("CALIBRE_BUSY: %s", payload)
def on_set_library_info(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.warning("SET_LIBRARY_INFO received but not implemented: %s", payload)
return (SmartDeviceOpcode.OK, {})
def on_delete_book(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("DELETE_BOOK: %s", payload)
def on_display_message(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("DISPLAY_MESSAGE: %s", payload)
def on_error(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("ERROR: %s", payload)
def on_free_space(self, payload: CalibrePayload) -> ResponsePayload:
logger.warning("FREE_SPACE received but not implemented: %s", payload)
return (SmartDeviceOpcode.OK, {"free_space_on_device": 1024 * 1024 * 1024})
def on_get_book_file_segment(
self, payload: CalibrePayload
) -> Optional[ResponsePayload]:
logger.debug("GET_BOOK_FILE_SEGMENT: %s", payload)
def on_get_book_metadata(
self, payload: CalibrePayload
) -> Optional[ResponsePayload]:
logger.debug("GET_BOOK_METADATA: %s", payload)
def on_get_book_count(self, payload: CalibrePayload) -> ResponsePayload:
logger.warning("GET_BOOK_COUNT received but not implemented: %s", payload)
return (
SmartDeviceOpcode.OK,
{
"willStream": True,
"willScan": True,
"count": 0,
},
)
def on_get_device_information(self, payload: CalibrePayload) -> ResponsePayload:
logger.warning("GET_DEVICE received but not implemented: %s", payload)
return (
SmartDeviceOpcode.OK,
{
"device_info": {
# 'device_store_uuid' = CalibreMetadata.drive.device_store_uuid,
"device_name": DEVICE_NAME,
},
"version": VERSION,
"device_version": VERSION,
},
)
def on_get_initialization_info(self, payload: CalibrePayload) -> ResponsePayload:
logger.debug("default GET_INITIALIZATION_INFO handler called: %s", payload)
# TODO: I'm not using this for anything at the moment, but I could
# calibre_version = ".".join(map(str, payload["calibre_version"]))
# TODO: handle auth
# Also, kinda weird auth format
# effectivly we set up a shared secret used to hash "challenge" and verify we both can produce the same hash
# It's less of a password and more of a key.
# Is this done because we're just using TCP, so we don't want to transmit the plaintext password to the server?
# local getPasswordHash = function()
# local password = G_reader_settings:readSetting("calibre_wireless_password")
# local challenge = arg.passwordChallenge
# if password and challenge then
# return sha.sha1(password..challenge)
# else
# return ""
# end
#
# TODO: Looks like we're going to have to formalize some configuration spec
init_info = {
"appName": CLIENT_NAME, # TODO: Configurable
"acceptedExtensions": VALID_EXTENSIONS, # TODO: Configurable
"cacheUsesLpaths": True,
"canAcceptLibraryInfo": True,
"canDeleteMultipleBooks": True,
"canReceiveBookBinary": True,
"canSendOkToSendbook": True,
"canStreamBooks": True,
"canStreamMetadata": True,
"canUseCachedMetadata": True,
"ccVersionNumber": VERSION,
"coverHeight": 240,
"deviceKind": DEVICE, # TODO: Configurable
"deviceName": DEVICE_NAME, # TODO Configurable
"extensionPathLengths": [MAGIC_PATH_LENGTH for _ in VALID_EXTENSIONS],
"passwordHash": "", # TODO: getPasswordHash()
"maxBookContentPacketLen": 4096,
"useUuidFileNames": False,
"versionOK": True,
}
return (SmartDeviceOpcode.OK, init_info)
def on_send_booklists(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("SEND_BOOKLISTS: %s", payload)
def on_send_book(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("SEND_BOOK: %s", payload)
def on_send_book_metadata(
self, payload: CalibrePayload
) -> Optional[ResponsePayload]:
logger.debug("SEND_BOOK_METADATA: %s", payload)
def on_set_calibre_device_info(self, payload: CalibrePayload) -> ResponsePayload:
logger.warning("SET_CALIBRE_DEVICE_INFO received but not implemented: %s", payload)
return (SmartDeviceOpcode.OK, {})
# Can we even recieve this opcode? Or is it just for sending
def on_set_calibre_device_name(
self, payload: CalibrePayload
) -> Optional[ResponsePayload]:
logger.debug("SET_CALIBRE_DEVICE_NAME: %s", payload)
def on_total_space(self, payload: CalibrePayload) -> Optional[ResponsePayload]:
logger.debug("TOTAL_SPACE: %s", payload)
def serve(self):
context = zmq.Context()
socket = context.socket(zmq.STREAM)
socket.connect("tcp://%s:%d" % (self.calibre_host[0], self.replied_port))
try:
id = socket.getsockopt(zmq.IDENTITY)
logger.debug("id: \"%s\"", id)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
while True:
socks = dict(poller.poll())
if socks.get(socket) == zmq.POLLIN:
buffer = []
cl = 0
# We always recieve this weird id value first that we don't care about
# Similar to the message in _send, guided by koreader, doesn't work without it.
_ = socket.recv()
message = socket.recv().decode("utf-8")
logger.debug("message part: %s", message)
# regex is probably overkill for this parsing now that I've realized I can't just consume the whole message in one big recv
if match := CALIBRE_MESSAGE.match(message):
length = int(match.group("length"))
message = match.group("message")
buffer.append(message)
cl += len(message)
# If the payload is too big, we have to do those 2 recv calls in a loop until we've read in everything that was sent
# FIXME: There is a race condition here
# I'm not yet certain why it's happening
# but I've recieved two messages in the same full_message
# which obviously exploded attempting to json.loads it
while cl < length:
_ = socket.recv()
message = socket.recv().decode("utf-8")
logger.debug("message part: %s", message)
buffer.append(message)
cl += len(message)
full_message = "".join(buffer)
logger.debug("recieved \"%s\"", full_message)
op, payload = json.loads(full_message)
opcode = SmartDeviceOpcode(op)
implementation = self.implementation.get(opcode) or (
lambda payload: self.on_unimplemented(opcode, payload)
)
ret = implementation(payload)
if ret:
self._send(socket, id, ret)
# TODO: Narrow down the exception list?
except Exception as e:
logger.exception(e)
raise
finally:
socket.close()
context.term()
def _send(self, socket: zmq.Socket, id: bytes, response: ResponsePayload) -> None:
opcode, payload = response
message = json.dumps([opcode.value, payload])
# calibre exchanges these messages in this format:
# LENGTH[OPCODE, MESSAGE]
# Where LENGTH is for the array holding the opcode and message
encoded = (str(len(message)) + message).encode()
multipart = [id, encoded]
logger.debug('sending: "%s"', multipart)
# I have honestly no idea why I have to send_multipart with that id value
# I'm just following the implementation in koreader, and it didn't work without it ¯\_(ツ)_/¯
socket.send_multipart(multipart)
@staticmethod
def find_calibre_server() -> Optional["SmartDeviceClient"]: # type: ignore
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(5)
# This magic list of ports is from the calibre source
# It's literally just some random ports they chose with the hope that
# nothing else happens to listen on them
broadcast_ports = [54982, 48123, 39001, 44044, 59678]
try:
for port in broadcast_ports:
count = sock.sendto(b"hello", (b"255.255.255.255", port))
if count:
dgram, host = sock.recvfrom(1024)
if match := RESP_PATTERN.match(dgram.decode("utf-8")):
return SmartDeviceClient(host, int(match.group(3)))
finally:
sock.close()
| 42.133903 | 143 | 0.618162 |
8a561548bb11553bac1e047a676ca78db1a57457 | 6,479 | py | Python | historical_event.py | alia0801/Asset-allocation | 2df61ea57d7de83a42b616f839631ee142c468d0 | [
"MIT"
] | null | null | null | historical_event.py | alia0801/Asset-allocation | 2df61ea57d7de83a42b616f839631ee142c468d0 | [
"MIT"
] | null | null | null | historical_event.py | alia0801/Asset-allocation | 2df61ea57d7de83a42b616f839631ee142c468d0 | [
"MIT"
] | null | null | null | import sys
import pandas as pd
import numpy as np
import pymysql
import math
import statistics
import time
import datetime
from itertools import combinations, permutations
from scipy.special import comb, perm
# starttime = datetime.datetime.now()
years = ["1990","1991","1992","1993","1994","1995","1996","1997","1998","1999",
"2000","2001","2002","2003","2004","2005","2006","2007","2008","2009",
"2010","2011","2012","2013","2014","2015","2016","2017","2018","2019","2020"]
month = ["00","01","02","03","04","05","06","07","08","09","10","11","12"]
day = ["00","01","02","03","04","05","06","07","08","09","10",
"11","12","13","14","15","16","17","18","19","20",
"21","22","23","24","25","26","27","28","29","30","31"]
day_of_month = [ 31,28,31, 30,31,30, 31,31,30, 31,30,31]
v1 = ['VTI','VOO','VXUS','SPY','BND','IVV','BNDX','VEA','VO',
'VUG','VB','VWO','VTV','QQQ','BSV','BIV','VTIP','VOE','IEF',
'SHY','TLT','IVE','VT','GOVT']
db = pymysql.connect("localhost", "root", "esfortest", "etf")
cursor = db.cursor()
choose1 = sys.argv[1]
weight1 = sys.argv[2]
input_per_month = float(sys.argv[3])/12
# start_date = '2014-06-20'
# end_date = '2016-02-11'
start_d = datetime.date(2014,6,20)
end_d = datetime.date(2016,2,11)
input_per_month = 10000
if( start_d.month < end_d.month ):#足一年
y = end_d.year - start_d.year
if(start_d.day < end_d.day):#足一個月
end_ddd = datetime.date(end_d.year,end_d.month,start_d.day)
mm = end_d.month - start_d.month
else:
mm = end_d.month - start_d.month -1
if end_d.month!=1:
end_ddd = datetime.date(end_d.year,end_d.month-1,start_d.day)
else:
end_ddd = datetime.date(end_d.year,12,start_d.day)
else:
y = end_d.year - start_d.year-1
if(start_d.day < end_d.day):#足一個月
mm = end_d.month - start_d.month +12
end_ddd = datetime.date(end_d.year,end_d.month,start_d.day)
else:
mm = end_d.month - start_d.month +12-1
if end_d.month!=1:
end_ddd = datetime.date(end_d.year,end_d.month-1,start_d.day)
else:
end_ddd = datetime.date(end_d.year,12,start_d.day)
print(y,mm,end_ddd)
m = y*12 + mm
if(start_d.day!=end_d.day):
m+=1#最後不滿一個月的部分也要算
print(m)
# today = datetime.date.today()
# yesterday = today - datetime.timedelta(days=5)
choose = choose1.split(',')
choose = ['VOO','VOE','VT','VEA']
weight = weight1.split(',')
weight = ['0.31','0.23','0.23','0.23']
for i in range(len(weight)):
weight[i] = float(weight[i])
rewards = np.zeros(m)#放每月的報酬率
in_money_arr=[]#投入總金額
for i in range(m):
in_money_arr.append(i*input_per_month)
in_money_arr.append(m*input_per_month)
# d_now=yesterday
d_now = end_ddd
for b in range(m):
if b==0:
d_now = end_ddd
else:
d_now = d_pre
if d_now.month-2<0:
d_now_premonth=11
else:
d_now_premonth = d_now.month-2
# d_now_premonth=d_now.month
dminus= day_of_month[d_now_premonth]-1
d_pre = d_now - datetime.timedelta(days=dminus)
w = d_now.weekday()
if w==6:
d_now = d_now - datetime.timedelta(days=2)
elif w==5:
d_now = d_now - datetime.timedelta(days=1)
w = d_pre.weekday()
if w==6:
d_pre = d_pre - datetime.timedelta(days=2)
elif w==5:
d_pre = d_pre - datetime.timedelta(days=1)
for c in range(len(choose)):
sql = "select close from etf_close where (name = '"+choose[c]+"' and date = '"+str(d_now) + "')"
# print(sql)
cursor.execute(sql)
result_select3 = cursor.fetchall()
db.commit()
sql = "select close from etf_close where (name = '"+choose[c]+"' and date = '"+str(d_pre) + "')"
# print(sql)
cursor.execute(sql)
result_select4 = cursor.fetchall()
db.commit()
if len(result_select3) >0:
reward_now = result_select3[0][0]
# else:
# print(choose[c]+str(d_now)+'no result')
if len(result_select4) >0:
reward_pre = result_select4[0][0]
# else:
# print(choose[c]+str(d_pre)+'no result')
rewarddd = (reward_now-reward_pre)/reward_pre
rewards[b+1] += rewarddd * weight[c]
#算不足一個月的部分
d_now = end_d
d_pre = end_ddd
w = d_now.weekday()
if w==6:
d_now = d_now - datetime.timedelta(days=2)
elif w==5:
d_now = d_now - datetime.timedelta(days=1)
w = d_pre.weekday()
if w==6:
d_pre = d_pre - datetime.timedelta(days=2)
elif w==5:
d_pre = d_pre - datetime.timedelta(days=1)
for c in range(len(choose)):
sql = "select close from etf_close where (name = '"+choose[c]+"' and date = '"+str(d_now) + "')"
print(sql)
cursor.execute(sql)
result_select3 = cursor.fetchall()
db.commit()
sql = "select close from etf_close where (name = '"+choose[c]+"' and date = '"+str(d_pre) + "')"
print(sql)
cursor.execute(sql)
result_select4 = cursor.fetchall()
db.commit()
if len(result_select3) >0:
reward_now = result_select3[0][0]
# else:
# print(choose[c]+str(d_now)+'no result')
if len(result_select4) >0:
reward_pre = result_select4[0][0]
# else:
# print(choose[c]+str(d_pre)+'no result')
rewarddd = (reward_now-reward_pre)/reward_pre
rewards[0] = rewarddd
#把報酬率陣列反過來排
result = []
# rewards2 = []
for x in range(len(rewards)):
result.append(rewards[len(rewards)-1-x])
# rewards2.append(rewards[len(rewards)-1-x])
# result.append(rewarddd)
# print(result)
# reward_arr = result[len(result)-6:]
# print(len(reward_arr))
# print(reward_arr)
# every_reward = []
# final_ans=[]
# final_inmoney=[]
ans = np.zeros(m+1)
for i in range(1,m):
ans[i] = ans[i-1] * (result[i-1]+1) +input_per_month
ans[m] = ans[m-1] * (result[m-1]+1)
final_r = (ans[m]-(input_per_month*(m-1)))/(input_per_month*(m-1))
# print(ans[m-1],input_per_month*m)
final_r = format(final_r*100 , '0.3f')
# every_reward[count] = str(final_r)
# count+=1
print(ans)
print(final_r+'%')
print(format(ans[m] , '0.2f'))
print(input_per_month*(m-1))
# every_reward.append(final_r+'%')
# final_ans.append(format(ans[m-1] , '0.2f'))
# # final_ans.append(str(round(ans[m-1])))
# final_inmoney.append(str(input_per_month*(m-1)))
# # db.close()
# result1 = ' '.join(every_reward)
# result2 = ' '.join(final_ans)
# result3 = ' '.join(final_inmoney)
# print(result1)
# print(result2)
# print(result3)
# # print(every_reward)
# # print(choose)
| 29.45 | 104 | 0.609353 |
89d4c0403fd5125e5f287098b30e36b97c79ad21 | 2,539 | py | Python | DS/tree/tree_node.py | kiapanahi/CS101 | e800d00d11eccfc5f1bd8fd8b45615158eea3035 | [
"MIT"
] | null | null | null | DS/tree/tree_node.py | kiapanahi/CS101 | e800d00d11eccfc5f1bd8fd8b45615158eea3035 | [
"MIT"
] | null | null | null | DS/tree/tree_node.py | kiapanahi/CS101 | e800d00d11eccfc5f1bd8fd8b45615158eea3035 | [
"MIT"
] | null | null | null | from .constants import HEAP_TYPE
class AbstractTreeNode(object):
"""
the abstraction of a tree node containing a generic value and
optional left/right children
"""
# type hinting is done in strings due to the 'forward referencing' problem
# https://www.python.org/dev/peps/pep-0484/#id28
def __init__(self, value=None, left: 'AbstractTreeNode' = None, right: 'AbstractTreeNode' = None):
if value is None:
raise TypeError('node value cannot be None')
self.value = value
self.left_child = left
self.right_child = right
def is_max_heap(self) -> bool:
is_max_heap = True
if self.left_child is not None:
is_max_heap = (is_max_heap and (
self.left_child.value <= self.value))
if self.right_child is not None:
is_max_heap = (is_max_heap and (
self.right_child.value <= self.value))
return is_max_heap
def is_min_heap(self) -> bool:
is_min_heap = True
if self.left_child is not None:
is_min_heap = (is_min_heap and (
self.left_child.value >= self.value))
if self.right_child is not None:
is_min_heap = (is_min_heap and (
self.right_child.value >= self.value))
return is_min_heap
def heapify(self, heap_type: HEAP_TYPE = HEAP_TYPE.MAX):
if heap_type is HEAP_TYPE.MAX:
self._max_heapify()
elif heap_type is HEAP_TYPE.MIN:
self._min_heapify()
pass
def _max_heapify(self):
if self.has_left_child():
if self.left_child.value > self.value:
self.value, self.left_child.value = self.left_child.value, self.value
if self.has_right_child():
if self.right_child.value > self.value:
self.value, self.right_child.value = self.right_child.value, self.value
def _min_heapify(self):
if self.has_left_child():
if self.left_child.value < self.value:
self.value, self.left_child.value = self.left_child.value, self.value
if self.has_right_child():
if self.right_child.value < self.value:
self.value, self.right_child.value = self.right_child.value, self.value
def has_left_child(self):
return self.left_child is not None and self.left_child.value is not None
def has_right_child(self):
return self.right_child is not None and self.right_child.value is not None
| 32.974026 | 102 | 0.625837 |
7ff454782556873d24ed1cee878166bac80e183f | 12,890 | py | Python | scripts/json_manip.py | certik/pandas | 758ca05e2eb04532b5d78331ba87c291038e2c61 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 22 | 2015-03-05T17:23:34.000Z | 2021-12-30T02:52:22.000Z | scripts/json_manip.py | certik/pandas | 758ca05e2eb04532b5d78331ba87c291038e2c61 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2016-09-30T11:15:32.000Z | 2016-09-30T11:15:32.000Z | scripts/json_manip.py | certik/pandas | 758ca05e2eb04532b5d78331ba87c291038e2c61 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 22 | 2015-01-02T12:14:20.000Z | 2021-10-13T09:22:30.000Z | """
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{u('name'): u('isInstantiated'), u('value'): True}],
u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'),
u('isEnabled'): True},
{u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False},
{u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True},
{u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True},
{u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), u('isEnabled'): True}],
u('fxVersion'): u('9.0'),
u('location'): u('zh-CN'),
u('operatingSystem'): u('WINNT Windows NT 5.1'),
u('surveyAnswers'): u(''),
u('task_guid'): u('d69fbd15-2517-45b5-8a17-bb7354122a75'),
u('tpVersion'): u('1.2'),
u('updateChannel'): u('beta')},
u('survey_data'): {
u('extensions'): [{u('appDisabled'): False,
u('id'): u('testpilot?labs.mozilla.com'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): u('Test Pilot')},
{u('appDisabled'): True,
u('id'): u('dict?www.youdao.com'),
u('isCompatible'): False,
u('isEnabled'): False,
u('isPlatformCompatible'): True,
u('name'): u('Youdao Word Capturer')},
{u('appDisabled'): False,
u('id'): u('jqs?sun.com'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): u('Java Quick Starter')},
{u('appDisabled'): False,
u('id'): u('?20a82645-c095-46ed-80e3-08825760534b?'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): u('Microsoft .NET Framework Assistant')},
{u('appDisabled'): False,
u('id'): u('?a0d7ccb3-214d-498b-b4aa-0e8fda9a7bf7?'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): u('WOT')}],
u('version_number'): 1}}
# class SurveyResult(object):
# def __init__(self, record):
# self.record = record
# self.metadata, self.survey_data = self._flatten_results()
# def _flatten_results(self):
# survey_data = self.record['survey_data']
# extensions = DataFrame(survey_data['extensions'])
def denorm(queries,iterable_of_things,default=None):
"""
'repeat', or 'stutter' to 'tableize' for downstream.
(I have no idea what a good word for this is!)
Think ``kronecker`` products, or:
``SELECT single,multiple FROM table;``
single multiple
------- ---------
id1 val1
id1 val2
Args:
queries: iterable of ``Q`` queries.
iterable_of_things: to be queried.
Returns:
list of 'stuttered' output, where if a query returns
a 'single', it gets repeated appropriately.
"""
def _denorm(queries,thing):
fields = []
results = []
for q in queries:
#print(q)
r = Ql(q,thing)
#print("-- result: ", r)
if not r:
r = [default]
if isinstance(r[0], type({})):
fields.append(sorted(r[0].keys())) # dicty answers
else:
fields.append([q]) # stringy answer
results.append(r)
#print(results)
#print(fields)
flist = list(flatten(*map(iter,fields)))
prod = itertools.product(*results)
for p in prod:
U = dict()
for (ii,thing) in enumerate(p):
#print(ii,thing)
if isinstance(thing, type({})):
U.update(thing)
else:
U[fields[ii][0]] = thing
yield U
return list(flatten(*[_denorm(queries,thing) for thing in iterable_of_things]))
def default_iget(fields,default=None,):
""" itemgetter with 'default' handling, that *always* returns lists
API CHANGES from ``operator.itemgetter``
Note: Sorry to break the iget api... (fields vs *fields)
Note: *always* returns a list... unlike itemgetter,
which can return tuples or 'singles'
"""
myiget = operator.itemgetter(*fields)
L = len(fields)
def f(thing):
try:
ans = list(myiget(thing))
if L < 2:
ans = [ans,]
return ans
except KeyError:
# slower!
return [thing.get(x,default) for x in fields]
f.__doc__ = "itemgetter with default %r for fields %r" %(default,fields)
f.__name__ = "default_itemgetter"
return f
def flatten(*stack):
"""
helper function for flattening iterables of generators in a
sensible way.
"""
stack = list(stack)
while stack:
try: x = next(stack[0])
except StopIteration:
stack.pop(0)
continue
if hasattr(x,'next') and callable(getattr(x,'next')):
stack.insert(0, x)
#if isinstance(x, (GeneratorType,listerator)):
else: yield x
def _Q(filter_, thing):
""" underlying machinery for Q function recursion """
T = type(thing)
if isinstance({}, T):
for k,v in compat.iteritems(thing):
#print(k,v)
if filter_ == k:
if isinstance(v, type([])):
yield iter(v)
else:
yield v
if type(v) in (type({}),type([])):
yield Q(filter_,v)
elif isinstance([], T):
for k in thing:
#print(k)
yield Q(filter_,k)
else:
# no recursion.
pass
def Q(filter_,thing):
"""
type(filter):
- list: a flattened list of all searches (one list)
- dict: dict with vals each of which is that search
Notes:
[1] 'parent thing', with space, will do a descendent
[2] this will come back 'flattened' jQuery style
[3] returns a generator. Use ``Ql`` if you want a list.
"""
if isinstance(filter_, type([])):
return flatten(*[_Q(x,thing) for x in filter_])
elif isinstance(filter_, type({})):
d = dict.fromkeys(list(filter_.keys()))
#print(d)
for k in d:
#print(flatten(Q(k,thing)))
d[k] = Q(k,thing)
return d
else:
if " " in filter_: # i.e. "antecendent post"
parts = filter_.strip().split()
r = None
for p in parts:
r = Ql(p,thing)
thing = r
return r
else: # simple.
return flatten(_Q(filter_,thing))
def Ql(filter_,thing):
""" same as Q, but returns a list, not a generator """
res = Q(filter_,thing)
if isinstance(filter_, type({})):
for k in res:
res[k] = list(res[k])
return res
else:
return list(res)
def countit(fields,iter_of_iter,default=None):
"""
note: robust to fields not being in i_of_i, using ``default``
"""
C = Counter() # needs hashables
T = namedtuple("Thing",fields)
get = default_iget(*fields,default=default)
return Counter(
(T(*get(thing)) for thing in iter_of_iter)
)
## right now this works for one row...
def printout(queries,things,default=None, f=sys.stdout, **kwargs):
""" will print header and objects
**kwargs go to csv.DictWriter
help(csv.DictWriter) for more.
"""
results = denorm(queries,things,default=None)
fields = set(itertools.chain(*(x.keys() for x in results)))
W = csv.DictWriter(f=f,fieldnames=fields,**kwargs)
#print("---prod---")
#print(list(prod))
W.writeheader()
for r in results:
W.writerow(r)
def test_run():
print("\n>>> print(list(Q('url',ex1)))")
print(list(Q('url',ex1)))
assert list(Q('url',ex1)) == ['url1','url2','url3']
assert Ql('url',ex1) == ['url1','url2','url3']
print("\n>>> print(list(Q(['name','id'],ex1)))")
print(list(Q(['name','id'],ex1)))
assert Ql(['name','id'],ex1) == ['Gregg','hello','gbye']
print("\n>>> print(Ql('more url',ex1))")
print(Ql('more url',ex1))
print("\n>>> list(Q('extensions',ex1))")
print(list(Q('extensions',ex1)))
print("\n>>> print(Ql('extensions',ex1))")
print(Ql('extensions',ex1))
print("\n>>> printout(['name','extensions'],[ex1,], extrasaction='ignore')")
printout(['name','extensions'],[ex1,], extrasaction='ignore')
print("\n\n")
from pprint import pprint as pp
print("-- note that the extension fields are also flattened! (and N/A) -- ")
pp(denorm(['location','fxVersion','notthere','survey_data extensions'],[ex2,], default="N/A")[:2])
if __name__ == "__main__":
pass
| 30.400943 | 120 | 0.595811 |
5f561f2efd9dd256eb39c9a86c7785c7a39b9b6e | 31,666 | py | Python | selfdrive/locationd/test/ublox.py | wolterhv/openpilot | c189d15af9a613d8f109b39298c0ab3e22f39f6d | [
"MIT"
] | 16 | 2018-09-27T06:29:11.000Z | 2022-03-17T22:56:38.000Z | selfdrive/locationd/test/ublox.py | wolterhv/openpilot | c189d15af9a613d8f109b39298c0ab3e22f39f6d | [
"MIT"
] | 3 | 2018-12-29T01:34:02.000Z | 2022-01-07T19:08:20.000Z | selfdrive/locationd/test/ublox.py | wolterhv/openpilot | c189d15af9a613d8f109b39298c0ab3e22f39f6d | [
"MIT"
] | 14 | 2018-08-24T00:34:17.000Z | 2021-03-19T11:57:15.000Z | #!/usr/bin/env python3
# pylint: skip-file
'''
UBlox binary protocol handling
Copyright Andrew Tridgell, October 2012
Released under GNU GPL version 3 or later
WARNING: This code has originally intended for
ublox version 7, it has been adapted to work
for ublox version 8, not all functions may work.
'''
import struct
import os
import time
# protocol constants
PREAMBLE1 = 0xb5
PREAMBLE2 = 0x62
# message classes
CLASS_NAV = 0x01
CLASS_RXM = 0x02
CLASS_INF = 0x04
CLASS_ACK = 0x05
CLASS_CFG = 0x06
CLASS_MON = 0x0A
CLASS_AID = 0x0B
CLASS_TIM = 0x0D
CLASS_ESF = 0x10
# ACK messages
MSG_ACK_NACK = 0x00
MSG_ACK_ACK = 0x01
# NAV messages
MSG_NAV_POSECEF = 0x1
MSG_NAV_POSLLH = 0x2
MSG_NAV_STATUS = 0x3
MSG_NAV_DOP = 0x4
MSG_NAV_SOL = 0x6
MSG_NAV_PVT = 0x7
MSG_NAV_POSUTM = 0x8
MSG_NAV_VELNED = 0x12
MSG_NAV_VELECEF = 0x11
MSG_NAV_TIMEGPS = 0x20
MSG_NAV_TIMEUTC = 0x21
MSG_NAV_CLOCK = 0x22
MSG_NAV_SVINFO = 0x30
MSG_NAV_AOPSTATUS = 0x60
MSG_NAV_DGPS = 0x31
MSG_NAV_DOP = 0x04
MSG_NAV_EKFSTATUS = 0x40
MSG_NAV_SBAS = 0x32
MSG_NAV_SOL = 0x06
# RXM messages
MSG_RXM_RAW = 0x15
MSG_RXM_SFRB = 0x11
MSG_RXM_SFRBX = 0x13
MSG_RXM_SVSI = 0x20
MSG_RXM_EPH = 0x31
MSG_RXM_ALM = 0x30
MSG_RXM_PMREQ = 0x41
# AID messages
MSG_AID_ALM = 0x30
MSG_AID_EPH = 0x31
MSG_AID_ALPSRV = 0x32
MSG_AID_AOP = 0x33
MSG_AID_DATA = 0x10
MSG_AID_ALP = 0x50
MSG_AID_DATA = 0x10
MSG_AID_HUI = 0x02
MSG_AID_INI = 0x01
MSG_AID_REQ = 0x00
# CFG messages
MSG_CFG_PRT = 0x00
MSG_CFG_ANT = 0x13
MSG_CFG_DAT = 0x06
MSG_CFG_EKF = 0x12
MSG_CFG_ESFGWT = 0x29
MSG_CFG_CFG = 0x09
MSG_CFG_USB = 0x1b
MSG_CFG_RATE = 0x08
MSG_CFG_SET_RATE = 0x01
MSG_CFG_NAV5 = 0x24
MSG_CFG_FXN = 0x0E
MSG_CFG_INF = 0x02
MSG_CFG_ITFM = 0x39
MSG_CFG_MSG = 0x01
MSG_CFG_NAVX5 = 0x23
MSG_CFG_NMEA = 0x17
MSG_CFG_NVS = 0x22
MSG_CFG_PM2 = 0x3B
MSG_CFG_PM = 0x32
MSG_CFG_RINV = 0x34
MSG_CFG_RST = 0x04
MSG_CFG_RXM = 0x11
MSG_CFG_SBAS = 0x16
MSG_CFG_TMODE2 = 0x3D
MSG_CFG_TMODE = 0x1D
MSG_CFG_TPS = 0x31
MSG_CFG_TP = 0x07
MSG_CFG_GNSS = 0x3E
MSG_CFG_ODO = 0x1E
# ESF messages
MSG_ESF_MEAS = 0x02
MSG_ESF_STATUS = 0x10
# INF messages
MSG_INF_DEBUG = 0x04
MSG_INF_ERROR = 0x00
MSG_INF_NOTICE = 0x02
MSG_INF_TEST = 0x03
MSG_INF_WARNING = 0x01
# MON messages
MSG_MON_SCHD = 0x01
MSG_MON_HW = 0x09
MSG_MON_HW2 = 0x0B
MSG_MON_IO = 0x02
MSG_MON_MSGPP = 0x06
MSG_MON_RXBUF = 0x07
MSG_MON_RXR = 0x21
MSG_MON_TXBUF = 0x08
MSG_MON_VER = 0x04
# TIM messages
MSG_TIM_TP = 0x01
MSG_TIM_TM2 = 0x03
MSG_TIM_SVIN = 0x04
MSG_TIM_VRFY = 0x06
# port IDs
PORT_DDC = 0
PORT_SERIAL1 = 1
PORT_SERIAL2 = 2
PORT_USB = 3
PORT_SPI = 4
# dynamic models
DYNAMIC_MODEL_PORTABLE = 0
DYNAMIC_MODEL_STATIONARY = 2
DYNAMIC_MODEL_PEDESTRIAN = 3
DYNAMIC_MODEL_AUTOMOTIVE = 4
DYNAMIC_MODEL_SEA = 5
DYNAMIC_MODEL_AIRBORNE1G = 6
DYNAMIC_MODEL_AIRBORNE2G = 7
DYNAMIC_MODEL_AIRBORNE4G = 8
#reset items
RESET_HOT = 0
RESET_WARM = 1
RESET_COLD = 0xFFFF
RESET_HW = 0
RESET_SW = 1
RESET_SW_GPS = 2
RESET_HW_GRACEFUL = 4
RESET_GPS_STOP = 8
RESET_GPS_START = 9
class UBloxError(Exception):
'''Ublox error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class UBloxAttrDict(dict):
'''allow dictionary members as attributes'''
def __init__(self):
dict.__init__(self)
def __getattr__(self, name):
try:
return self.__getitem__(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__:
# allow set on normal attributes
dict.__setattr__(self, name, value)
else:
self.__setitem__(name, value)
def ArrayParse(field):
'''parse an array descriptor'''
arridx = field.find('[')
if arridx == -1:
return (field, -1)
alen = int(field[arridx + 1:-1])
fieldname = field[:arridx]
return (fieldname, alen)
class UBloxDescriptor:
'''class used to describe the layout of a UBlox message'''
def __init__(self,
name,
msg_format,
fields=None,
count_field=None,
format2=None,
fields2=None):
if fields is None:
fields = []
self.name = name
self.msg_format = msg_format
self.fields = fields
self.count_field = count_field
self.format2 = format2
self.fields2 = fields2
def unpack(self, msg):
'''unpack a UBloxMessage, creating the .fields and ._recs attributes in msg'''
msg._fields = {}
# unpack main message blocks. A comm
formats = self.msg_format.split(',')
buf = msg._buf[6:-2]
count = 0
msg._recs = []
fields = self.fields[:]
for fmt in formats:
size1 = struct.calcsize(fmt)
if size1 > len(buf):
raise UBloxError("%s INVALID_SIZE1=%u" % (self.name, len(buf)))
f1 = list(struct.unpack(fmt, buf[:size1]))
i = 0
while i < len(f1):
field = fields.pop(0)
(fieldname, alen) = ArrayParse(field)
if alen == -1:
msg._fields[fieldname] = f1[i]
if self.count_field == fieldname:
count = int(f1[i])
i += 1
else:
msg._fields[fieldname] = [0] * alen
for a in range(alen):
msg._fields[fieldname][a] = f1[i]
i += 1
buf = buf[size1:]
if len(buf) == 0:
break
if self.count_field == '_remaining':
count = len(buf) // struct.calcsize(self.format2)
if count == 0:
msg._unpacked = True
if len(buf) != 0:
raise UBloxError("EXTRA_BYTES=%u" % len(buf))
return
size2 = struct.calcsize(self.format2)
for c in range(count):
r = UBloxAttrDict()
if size2 > len(buf):
raise UBloxError("INVALID_SIZE=%u, " % len(buf))
f2 = list(struct.unpack(self.format2, buf[:size2]))
for i in range(len(self.fields2)):
r[self.fields2[i]] = f2[i]
buf = buf[size2:]
msg._recs.append(r)
if len(buf) != 0:
raise UBloxError("EXTRA_BYTES=%u" % len(buf))
msg._unpacked = True
def pack(self, msg, msg_class=None, msg_id=None):
'''pack a UBloxMessage from the .fields and ._recs attributes in msg'''
f1 = []
if msg_class is None:
msg_class = msg.msg_class()
if msg_id is None:
msg_id = msg.msg_id()
msg._buf = ''
fields = self.fields[:]
for f in fields:
(fieldname, alen) = ArrayParse(f)
if fieldname not in msg._fields:
break
if alen == -1:
f1.append(msg._fields[fieldname])
else:
for a in range(alen):
f1.append(msg._fields[fieldname][a])
try:
# try full length message
fmt = self.msg_format.replace(',', '')
msg._buf = struct.pack(fmt, *tuple(f1))
except Exception:
# try without optional part
fmt = self.msg_format.split(',')[0]
msg._buf = struct.pack(fmt, *tuple(f1))
length = len(msg._buf)
if msg._recs:
length += len(msg._recs) * struct.calcsize(self.format2)
header = struct.pack('<BBBBH', PREAMBLE1, PREAMBLE2, msg_class, msg_id, length)
msg._buf = header + msg._buf
for r in msg._recs:
f2 = []
for f in self.fields2:
f2.append(r[f])
msg._buf += struct.pack(self.format2, *tuple(f2))
msg._buf += struct.pack('<BB', *msg.checksum(data=msg._buf[2:]))
def format(self, msg):
'''return a formatted string for a message'''
if not msg._unpacked:
self.unpack(msg)
ret = self.name + ': '
for f in self.fields:
(fieldname, alen) = ArrayParse(f)
if fieldname not in msg._fields:
continue
v = msg._fields[fieldname]
if isinstance(v, list):
ret += '%s=[' % fieldname
for a in range(alen):
ret += '%s, ' % v[a]
ret = ret[:-2] + '], '
elif isinstance(v, str):
ret += '%s="%s", ' % (f, v.rstrip(' \0'))
else:
ret += '%s=%s, ' % (f, v)
for r in msg._recs:
ret += '[ '
for f in self.fields2:
v = r[f]
ret += '%s=%s, ' % (f, v)
ret = ret[:-2] + ' ], '
return ret[:-2]
# list of supported message types.
msg_types = {
(CLASS_ACK, MSG_ACK_ACK):
UBloxDescriptor('ACK_ACK', '<BB', ['clsID', 'msgID']),
(CLASS_ACK, MSG_ACK_NACK):
UBloxDescriptor('ACK_NACK', '<BB', ['clsID', 'msgID']),
(CLASS_CFG, MSG_CFG_USB):
UBloxDescriptor('CFG_USB', '<HHHHHH32s32s32s', [
'vendorID', 'productID', 'reserved1', 'reserved2', 'powerConsumption', 'flags',
'vendorString', 'productString', 'serialNumber'
]),
(CLASS_CFG, MSG_CFG_PRT):
UBloxDescriptor('CFG_PRT', '<BBHIIHHHH', [
'portID', 'reserved0', 'txReady', 'mode', 'baudRate', 'inProtoMask', 'outProtoMask',
'reserved4', 'reserved5'
]),
(CLASS_CFG, MSG_CFG_CFG):
UBloxDescriptor('CFG_CFG', '<III,B',
['clearMask', 'saveMask', 'loadMask', 'deviceMask']),
(CLASS_CFG, MSG_CFG_RXM):
UBloxDescriptor('CFG_RXM', '<BB',
['reserved1', 'lpMode']),
(CLASS_CFG, MSG_CFG_RST):
UBloxDescriptor('CFG_RST', '<HBB', ['navBbrMask ', 'resetMode', 'reserved1']),
(CLASS_CFG, MSG_CFG_SBAS):
UBloxDescriptor('CFG_SBAS', '<BBBBI',
['mode', 'usage', 'maxSBAS', 'scanmode2', 'scanmode1']),
(CLASS_CFG, MSG_CFG_GNSS):
UBloxDescriptor('CFG_GNSS', '<BBBB',
['msgVer', 'numTrkChHw', 'numTrkChUse',
'numConfigBlocks'], 'numConfigBlocks', '<BBBBI',
['gnssId', 'resTrkCh', 'maxTrkCh', 'reserved1', 'flags']),
(CLASS_CFG, MSG_CFG_RATE):
UBloxDescriptor('CFG_RATE', '<HHH', ['measRate', 'navRate', 'timeRef']),
(CLASS_CFG, MSG_CFG_MSG):
UBloxDescriptor('CFG_MSG', '<BB6B', ['msgClass', 'msgId', 'rates[6]']),
(CLASS_NAV, MSG_NAV_POSLLH):
UBloxDescriptor('NAV_POSLLH', '<IiiiiII',
['iTOW', 'Longitude', 'Latitude', 'height', 'hMSL', 'hAcc', 'vAcc']),
(CLASS_NAV, MSG_NAV_VELNED):
UBloxDescriptor('NAV_VELNED', '<IiiiIIiII', [
'iTOW', 'velN', 'velE', 'velD', 'speed', 'gSpeed', 'heading', 'sAcc', 'cAcc'
]),
(CLASS_NAV, MSG_NAV_DOP):
UBloxDescriptor('NAV_DOP', '<IHHHHHHH',
['iTOW', 'gDOP', 'pDOP', 'tDOP', 'vDOP', 'hDOP', 'nDOP', 'eDOP']),
(CLASS_NAV, MSG_NAV_STATUS):
UBloxDescriptor('NAV_STATUS', '<IBBBBII',
['iTOW', 'gpsFix', 'flags', 'fixStat', 'flags2', 'ttff', 'msss']),
(CLASS_NAV, MSG_NAV_SOL):
UBloxDescriptor('NAV_SOL', '<IihBBiiiIiiiIHBBI', [
'iTOW', 'fTOW', 'week', 'gpsFix', 'flags', 'ecefX', 'ecefY', 'ecefZ', 'pAcc',
'ecefVX', 'ecefVY', 'ecefVZ', 'sAcc', 'pDOP', 'reserved1', 'numSV', 'reserved2'
]),
(CLASS_NAV, MSG_NAV_PVT):
UBloxDescriptor('NAV_PVT', '<IHBBBBBBIiBBBBiiiiIIiiiiiIIH6BihH', [
'iTOW', 'year', 'month', 'day', 'hour', 'min', 'sec', 'valid', 'tAcc', 'nano',
'fixType', 'flags', 'flags2', 'numSV', 'lon', 'lat', 'height', 'hMSL', 'hAcc', 'vAcc',
'velN', 'velE', 'velD', 'gSpeed', 'headMot', 'sAcc', 'headAcc', 'pDOP',
'reserverd1[6]', 'headVeh', 'magDec', 'magAcc'
]),
(CLASS_NAV, MSG_NAV_POSUTM):
UBloxDescriptor('NAV_POSUTM', '<Iiiibb',
['iTOW', 'East', 'North', 'Alt', 'Zone', 'Hem']),
(CLASS_NAV, MSG_NAV_SBAS):
UBloxDescriptor('NAV_SBAS', '<IBBbBBBBB', [
'iTOW', 'geo', 'mode', 'sys', 'service', 'cnt', 'reserved01', 'reserved02',
'reserved03'
], 'cnt', 'BBBBBBhHh', [
'svid', 'flags', 'udre', 'svSys', 'svService', 'reserved1', 'prc', 'reserved2', 'ic'
]),
(CLASS_NAV, MSG_NAV_POSECEF):
UBloxDescriptor('NAV_POSECEF', '<IiiiI', ['iTOW', 'ecefX', 'ecefY', 'ecefZ', 'pAcc']),
(CLASS_NAV, MSG_NAV_VELECEF):
UBloxDescriptor('NAV_VELECEF', '<IiiiI', ['iTOW', 'ecefVX', 'ecefVY', 'ecefVZ',
'sAcc']),
(CLASS_NAV, MSG_NAV_TIMEGPS):
UBloxDescriptor('NAV_TIMEGPS', '<IihbBI',
['iTOW', 'fTOW', 'week', 'leapS', 'valid', 'tAcc']),
(CLASS_NAV, MSG_NAV_TIMEUTC):
UBloxDescriptor('NAV_TIMEUTC', '<IIiHBBBBBB', [
'iTOW', 'tAcc', 'nano', 'year', 'month', 'day', 'hour', 'min', 'sec', 'valid'
]),
(CLASS_NAV, MSG_NAV_CLOCK):
UBloxDescriptor('NAV_CLOCK', '<IiiII', ['iTOW', 'clkB', 'clkD', 'tAcc', 'fAcc']),
(CLASS_NAV, MSG_NAV_DGPS):
UBloxDescriptor('NAV_DGPS', '<IihhBBH',
['iTOW', 'age', 'baseId', 'baseHealth', 'numCh', 'status', 'reserved1'],
'numCh', '<BBHff', ['svid', 'flags', 'ageC', 'prc', 'prrc']),
(CLASS_NAV, MSG_NAV_SVINFO):
UBloxDescriptor('NAV_SVINFO', '<IBBH', ['iTOW', 'numCh', 'globalFlags',
'reserved2'], 'numCh', '<BBBBBbhi',
['chn', 'svid', 'flags', 'quality', 'cno', 'elev', 'azim', 'prRes']),
(CLASS_RXM, MSG_RXM_SVSI):
UBloxDescriptor('RXM_SVSI', '<IhBB', ['iTOW', 'week', 'numVis', 'numSV'], 'numSV',
'<BBhbB', ['svid', 'svFlag', 'azim', 'elev', 'age']),
(CLASS_RXM, MSG_RXM_EPH):
UBloxDescriptor('RXM_EPH', '<II , 8I 8I 8I',
['svid', 'how', 'sf1d[8]', 'sf2d[8]', 'sf3d[8]']),
(CLASS_AID, MSG_AID_EPH):
UBloxDescriptor('AID_EPH', '<II , 8I 8I 8I',
['svid', 'how', 'sf1d[8]', 'sf2d[8]', 'sf3d[8]']),
(CLASS_AID, MSG_AID_HUI):
UBloxDescriptor('AID_HUI', '<Iddi 6h 8f I',
['health', 'utcA0', 'utcA1', 'utcTOW', 'utcWNT', 'utcLS', 'utcWNF',
'utcDN', 'utcLSF', 'utcSpare', 'klobA0', 'klobA1', 'klobA2', 'klobA3',
'klobB0', 'klobB1', 'klobB2', 'klobB3', 'flags']),
(CLASS_AID, MSG_AID_AOP):
UBloxDescriptor('AID_AOP', '<B47B , 48B 48B 48B',
['svid', 'data[47]', 'optional0[48]', 'optional1[48]',
'optional1[48]']),
(CLASS_RXM, MSG_RXM_RAW):
UBloxDescriptor('RXM_RAW', '<dHbBB3B', [
'rcvTow', 'week', 'leapS', 'numMeas', 'recStat', 'reserved1[3]'
], 'numMeas', '<ddfBBBBHBBBBBB', [
'prMes', 'cpMes', 'doMes', 'gnssId', 'svId', 'sigId', 'freqId', 'locktime', 'cno',
'prStdev', 'cpStdev', 'doStdev', 'trkStat', 'reserved3'
]),
(CLASS_RXM, MSG_RXM_SFRB):
UBloxDescriptor('RXM_SFRB', '<BB10I', ['chn', 'svid', 'dwrd[10]']),
(CLASS_RXM, MSG_RXM_SFRBX):
UBloxDescriptor('RXM_SFRBX', '<8B', ['gnssId', 'svid', 'reserved1', 'freqId', 'numWords',
'reserved2', 'version', 'reserved3'], 'numWords', 'I', ['dwrd']),
(CLASS_AID, MSG_AID_ALM):
UBloxDescriptor('AID_ALM', '<II', '_remaining', 'I', ['dwrd']),
(CLASS_RXM, MSG_RXM_ALM):
UBloxDescriptor('RXM_ALM', '<II , 8I', ['svid', 'week', 'dwrd[8]']),
(CLASS_CFG, MSG_CFG_ANT):
UBloxDescriptor('CFG_ANT', '<HH', ['flags', 'pins']),
(CLASS_CFG, MSG_CFG_ODO):
UBloxDescriptor('CFG_ODO', '<B3BBB6BBB2BBB2B', [
'version', 'reserved1[3]', 'flags', 'odoCfg', 'reserverd2[6]', 'cogMaxSpeed',
'cogMaxPosAcc', 'reserved3[2]', 'velLpGain', 'cogLpGain', 'reserved[2]'
]),
(CLASS_CFG, MSG_CFG_NAV5):
UBloxDescriptor('CFG_NAV5', '<HBBiIbBHHHHBBIII', [
'mask', 'dynModel', 'fixMode', 'fixedAlt', 'fixedAltVar', 'minElev', 'drLimit',
'pDop', 'tDop', 'pAcc', 'tAcc', 'staticHoldThresh', 'dgpsTimeOut', 'reserved2',
'reserved3', 'reserved4'
]),
(CLASS_CFG, MSG_CFG_NAVX5):
UBloxDescriptor('CFG_NAVX5', '<HHIBBBBBBBBBBHIBBBBBBHII', [
'version', 'mask1', 'reserved0', 'reserved1', 'reserved2', 'minSVs', 'maxSVs',
'minCNO', 'reserved5', 'iniFix3D', 'reserved6', 'reserved7', 'reserved8',
'wknRollover', 'reserved9', 'reserved10', 'reserved11', 'usePPP', 'useAOP',
'reserved12', 'reserved13', 'aopOrbMaxErr', 'reserved3', 'reserved4'
]),
(CLASS_MON, MSG_MON_HW):
UBloxDescriptor('MON_HW', '<IIIIHHBBBBIB17BHIII', [
'pinSel', 'pinBank', 'pinDir', 'pinVal', 'noisePerMS', 'agcCnt', 'aStatus', 'aPower',
'flags', 'reserved1', 'usedMask', 'VP[17]', 'jamInd', 'reserved3', 'pinInq', 'pullH',
'pullL'
]),
(CLASS_MON, MSG_MON_HW2):
UBloxDescriptor('MON_HW2', '<bBbBB3BI8BI4B', [
'ofsI', 'magI', 'ofsQ', 'magQ', 'cfgSource', 'reserved1[3]', 'lowLevCfg',
'reserved2[8]', 'postStatus', 'reserved3[4]'
]),
(CLASS_MON, MSG_MON_SCHD):
UBloxDescriptor('MON_SCHD', '<IIIIHHHBB', [
'tskRun', 'tskSchd', 'tskOvrr', 'tskReg', 'stack', 'stackSize', 'CPUIdle', 'flySly',
'ptlSly'
]),
(CLASS_MON, MSG_MON_VER):
UBloxDescriptor('MON_VER', '<30s10s,30s', ['swVersion', 'hwVersion', 'romVersion'],
'_remaining', '30s', ['extension']),
(CLASS_TIM, MSG_TIM_TP):
UBloxDescriptor('TIM_TP', '<IIiHBB',
['towMS', 'towSubMS', 'qErr', 'week', 'flags', 'reserved1']),
(CLASS_TIM, MSG_TIM_TM2):
UBloxDescriptor('TIM_TM2', '<BBHHHIIIII', [
'ch', 'flags', 'count', 'wnR', 'wnF', 'towMsR', 'towSubMsR', 'towMsF', 'towSubMsF',
'accEst'
]),
(CLASS_TIM, MSG_TIM_SVIN):
UBloxDescriptor('TIM_SVIN', '<IiiiIIBBH', [
'dur', 'meanX', 'meanY', 'meanZ', 'meanV', 'obs', 'valid', 'active', 'reserved1'
]),
(CLASS_INF, MSG_INF_ERROR):
UBloxDescriptor('INF_ERR', '<18s', ['str']),
(CLASS_INF, MSG_INF_DEBUG):
UBloxDescriptor('INF_DEBUG', '<18s', ['str'])
}
class UBloxMessage:
'''UBlox message class - holds a UBX binary message'''
def __init__(self):
self._buf = b""
self._fields = {}
self._recs = []
self._unpacked = False
self.debug_level = 1
def __str__(self):
'''format a message as a string'''
if not self.valid():
return 'UBloxMessage(INVALID)'
type = self.msg_type()
if type in msg_types:
return msg_types[type].format(self)
return 'UBloxMessage(UNKNOWN %s, %u)' % (str(type), self.msg_length())
def as_dict(self):
'''format a message as a string'''
if not self.valid():
return 'UBloxMessage(INVALID)'
type = self.msg_type()
if type in msg_types:
return msg_types[type].format(self)
return 'UBloxMessage(UNKNOWN %s, %u)' % (str(type), self.msg_length())
def __getattr__(self, name):
'''allow access to message fields'''
try:
return self._fields[name]
except KeyError:
if name == 'recs':
return self._recs
raise AttributeError(name)
def __setattr__(self, name, value):
'''allow access to message fields'''
if name.startswith('_'):
self.__dict__[name] = value
else:
self._fields[name] = value
def have_field(self, name):
'''return True if a message contains the given field'''
return name in self._fields
def debug(self, level, msg):
'''write a debug message'''
if self.debug_level >= level:
print(msg)
def unpack(self):
'''unpack a message'''
if not self.valid():
raise UBloxError('INVALID MESSAGE')
type = self.msg_type()
if type not in msg_types:
raise UBloxError('Unknown message %s length=%u' % (str(type), len(self._buf)))
msg_types[type].unpack(self)
return self._fields, self._recs
def pack(self):
'''pack a message'''
if not self.valid():
raise UBloxError('INVALID MESSAGE')
type = self.msg_type()
if type not in msg_types:
raise UBloxError('Unknown message %s' % str(type))
msg_types[type].pack(self)
def name(self):
'''return the short string name for a message'''
if not self.valid():
raise UBloxError('INVALID MESSAGE')
type = self.msg_type()
if type not in msg_types:
raise UBloxError('Unknown message %s length=%u' % (str(type), len(self._buf)))
return msg_types[type].name
def msg_class(self):
'''return the message class'''
return self._buf[2]
def msg_id(self):
'''return the message id within the class'''
return self._buf[3]
def msg_type(self):
'''return the message type tuple (class, id)'''
return (self.msg_class(), self.msg_id())
def msg_length(self):
'''return the payload length'''
(payload_length, ) = struct.unpack('<H', self._buf[4:6])
return payload_length
def valid_so_far(self):
'''check if the message is valid so far'''
if len(self._buf) > 0 and self._buf[0] != PREAMBLE1:
return False
if len(self._buf) > 1 and self._buf[1] != PREAMBLE2:
self.debug(1, "bad pre2")
return False
if self.needed_bytes() == 0 and not self.valid():
if len(self._buf) > 8:
self.debug(1, "bad checksum len=%u needed=%u" % (len(self._buf),
self.needed_bytes()))
else:
self.debug(1, "bad len len=%u needed=%u" % (len(self._buf), self.needed_bytes()))
return False
return True
def add(self, bytes):
'''add some bytes to a message'''
self._buf += bytes
while not self.valid_so_far() and len(self._buf) > 0:
'''handle corrupted streams'''
self._buf = self._buf[1:]
if self.needed_bytes() < 0:
self._buf = ""
def checksum(self, data=None):
'''return a checksum tuple for a message'''
if data is None:
data = self._buf[2:-2]
#cs = 0
ck_a = 0
ck_b = 0
for i in data:
ck_a = (ck_a + i) & 0xFF
ck_b = (ck_b + ck_a) & 0xFF
return (ck_a, ck_b)
def valid_checksum(self):
'''check if the checksum is OK'''
(ck_a, ck_b) = self.checksum()
#d = self._buf[2:-2]
(ck_a2, ck_b2) = struct.unpack('<BB', self._buf[-2:])
return ck_a == ck_a2 and ck_b == ck_b2
def needed_bytes(self):
'''return number of bytes still needed'''
if len(self._buf) < 6:
return 8 - len(self._buf)
return self.msg_length() + 8 - len(self._buf)
def valid(self):
'''check if a message is valid'''
return len(self._buf) >= 8 and self.needed_bytes() == 0 and self.valid_checksum()
class UBlox:
'''main UBlox control class.
port can be a file (for reading only) or a serial device
'''
def __init__(self, port, baudrate=115200, timeout=0, panda=False, grey=False):
self.serial_device = port
self.baudrate = baudrate
self.use_sendrecv = False
self.read_only = False
self.debug_level = 0
if panda:
from panda import Panda, PandaSerial
self.panda = Panda()
# resetting U-Blox module
self.panda.set_esp_power(0)
time.sleep(0.1)
self.panda.set_esp_power(1)
time.sleep(0.5)
# can't set above 9600 now...
self.baudrate = 9600
self.dev = PandaSerial(self.panda, 1, self.baudrate)
self.baudrate = 460800
print("upping baud:", self.baudrate)
self.send_nmea("$PUBX,41,1,0007,0003,%u,0" % self.baudrate)
time.sleep(0.1)
self.dev = PandaSerial(self.panda, 1, self.baudrate)
elif grey:
import cereal.messaging as messaging
class BoarddSerial():
def __init__(self):
self.ubloxRaw = messaging.sub_sock('ubloxRaw')
self.buf = b""
def read(self, n):
for msg in messaging.drain_sock(self.ubloxRaw, len(self.buf) < n):
self.buf += msg.ubloxRaw
ret = self.buf[:n]
self.buf = self.buf[n:]
return ret
def write(self, dat):
pass
self.dev = BoarddSerial()
else:
if self.serial_device.startswith("tcp:"):
import socket
a = self.serial_device.split(':')
destination_addr = (a[1], int(a[2]))
self.dev = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dev.connect(destination_addr)
self.dev.setblocking(1)
self.dev.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.use_sendrecv = True
elif os.path.isfile(self.serial_device):
self.read_only = True
self.dev = open(self.serial_device, mode='rb')
else:
import serial
self.dev = serial.Serial(
self.serial_device,
baudrate=self.baudrate,
dsrdtr=False,
rtscts=False,
xonxoff=False,
timeout=timeout)
self.logfile = None
self.log = None
self.preferred_dynamic_model = None
self.preferred_usePPP = None
self.preferred_dgps_timeout = None
def close(self):
'''close the device'''
self.dev.close()
self.dev = None
def set_debug(self, debug_level):
'''set debug level'''
self.debug_level = debug_level
def debug(self, level, msg):
'''write a debug message'''
if self.debug_level >= level:
print(msg)
def set_logfile(self, logfile, append=False):
'''setup logging to a file'''
if self.log is not None:
self.log.close()
self.log = None
self.logfile = logfile
if self.logfile is not None:
if append:
mode = 'ab'
else:
mode = 'wb'
self.log = open(self.logfile, mode=mode)
def set_preferred_dynamic_model(self, model):
'''set the preferred dynamic model for receiver'''
self.preferred_dynamic_model = model
if model is not None:
self.configure_poll(CLASS_CFG, MSG_CFG_NAV5)
def set_preferred_dgps_timeout(self, timeout):
'''set the preferred DGPS timeout for receiver'''
self.preferred_dgps_timeout = timeout
if timeout is not None:
self.configure_poll(CLASS_CFG, MSG_CFG_NAV5)
def set_preferred_usePPP(self, usePPP):
'''set the preferred usePPP setting for the receiver'''
if usePPP is None:
self.preferred_usePPP = None
return
self.preferred_usePPP = int(usePPP)
self.configure_poll(CLASS_CFG, MSG_CFG_NAVX5)
def nmea_checksum(self, msg):
d = msg[1:]
cs = 0
for i in d:
cs ^= ord(i)
return cs
def write(self, buf):
'''write some bytes'''
if not self.read_only:
if self.use_sendrecv:
return self.dev.send(buf)
if type(buf) == str:
return self.dev.write(str.encode(buf))
else:
return self.dev.write(buf)
def read(self, n):
'''read some bytes'''
if self.use_sendrecv:
import socket
try:
return self.dev.recv(n)
except socket.error:
return ''
return self.dev.read(n)
def send_nmea(self, msg):
if not self.read_only:
s = msg + "*%02X" % self.nmea_checksum(msg) + "\r\n"
self.write(s)
def set_binary(self):
'''put a UBlox into binary mode using a NMEA string'''
if not self.read_only:
print("try set binary at %u" % self.baudrate)
self.send_nmea("$PUBX,41,0,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,1,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,2,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,3,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,4,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,5,0007,0001,%u,0" % self.baudrate)
def disable_nmea(self):
''' stop sending all types of nmea messages '''
self.send_nmea("$PUBX,40,GSV,1,1,1,1,1,0")
self.send_nmea("$PUBX,40,GGA,0,0,0,0,0,0")
self.send_nmea("$PUBX,40,GSA,0,0,0,0,0,0")
self.send_nmea("$PUBX,40,VTG,0,0,0,0,0,0")
self.send_nmea("$PUBX,40,TXT,0,0,0,0,0,0")
self.send_nmea("$PUBX,40,RMC,0,0,0,0,0,0")
def seek_percent(self, pct):
'''seek to the given percentage of a file'''
self.dev.seek(0, 2)
filesize = self.dev.tell()
self.dev.seek(pct * 0.01 * filesize)
def special_handling(self, msg):
'''handle automatic configuration changes'''
if msg.name() == 'CFG_NAV5':
msg.unpack()
sendit = False
pollit = False
if self.preferred_dynamic_model is not None and msg.dynModel != self.preferred_dynamic_model:
msg.dynModel = self.preferred_dynamic_model
sendit = True
pollit = True
if self.preferred_dgps_timeout is not None and msg.dgpsTimeOut != self.preferred_dgps_timeout:
msg.dgpsTimeOut = self.preferred_dgps_timeout
self.debug(2, "Setting dgpsTimeOut=%u" % msg.dgpsTimeOut)
sendit = True
# we don't re-poll for this one, as some receivers refuse to set it
if sendit:
msg.pack()
self.send(msg)
if pollit:
self.configure_poll(CLASS_CFG, MSG_CFG_NAV5)
if msg.name() == 'CFG_NAVX5' and self.preferred_usePPP is not None:
msg.unpack()
if msg.usePPP != self.preferred_usePPP:
msg.usePPP = self.preferred_usePPP
msg.mask = 1 << 13
msg.pack()
self.send(msg)
self.configure_poll(CLASS_CFG, MSG_CFG_NAVX5)
def receive_message(self, ignore_eof=False):
'''blocking receive of one ublox message'''
msg = UBloxMessage()
while True:
n = msg.needed_bytes()
b = self.read(n)
if not b:
if ignore_eof:
time.sleep(0.01)
continue
if len(msg._buf) > 0:
self.debug(1, "dropping %d bytes" % len(msg._buf))
return None
msg.add(b)
if self.log is not None:
self.log.write(b)
self.log.flush()
if msg.valid():
self.special_handling(msg)
return msg
def receive_message_noerror(self, ignore_eof=False):
'''blocking receive of one ublox message, ignoring errors'''
try:
return self.receive_message(ignore_eof=ignore_eof)
except UBloxError as e:
print(e)
return None
except OSError as e:
# Occasionally we get hit with 'resource temporarily unavailable'
# messages here on the serial device, catch them too.
print(e)
return None
def send(self, msg):
'''send a preformatted ublox message'''
if not msg.valid():
self.debug(1, "invalid send")
return
if not self.read_only:
self.write(msg._buf)
def send_message(self, msg_class, msg_id, payload):
'''send a ublox message with class, id and payload'''
msg = UBloxMessage()
msg._buf = struct.pack('<BBBBH', 0xb5, 0x62, msg_class, msg_id, len(payload))
msg._buf += payload
(ck_a, ck_b) = msg.checksum(msg._buf[2:])
msg._buf += struct.pack('<BB', ck_a, ck_b)
self.send(msg)
def configure_solution_rate(self, rate_ms=200, nav_rate=1, timeref=0):
'''configure the solution rate in milliseconds'''
payload = struct.pack('<HHH', rate_ms, nav_rate, timeref)
self.send_message(CLASS_CFG, MSG_CFG_RATE, payload)
def configure_message_rate(self, msg_class, msg_id, rate):
'''configure the message rate for a given message'''
payload = struct.pack('<BBB', msg_class, msg_id, rate)
self.send_message(CLASS_CFG, MSG_CFG_SET_RATE, payload)
def configure_port(self, port=1, inMask=3, outMask=3, mode=2240, baudrate=None):
'''configure a IO port'''
if baudrate is None:
baudrate = self.baudrate
payload = struct.pack('<BBH8BHHBBBB', port, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, inMask,
outMask, 0, 0, 0, 0)
self.send_message(CLASS_CFG, MSG_CFG_PRT, payload)
def configure_loadsave(self, clearMask=0, saveMask=0, loadMask=0, deviceMask=0):
'''configure configuration load/save'''
payload = struct.pack('<IIIB', clearMask, saveMask, loadMask, deviceMask)
self.send_message(CLASS_CFG, MSG_CFG_CFG, payload)
def configure_poll(self, msg_class, msg_id, payload=b''):
'''poll a configuration message'''
self.send_message(msg_class, msg_id, payload)
def configure_poll_port(self, portID=None):
'''poll a port configuration'''
if portID is None:
self.configure_poll(CLASS_CFG, MSG_CFG_PRT)
else:
self.configure_poll(CLASS_CFG, MSG_CFG_PRT, struct.pack('<B', portID))
def configure_min_max_sats(self, min_sats=4, max_sats=32):
'''Set the minimum/maximum number of satellites for a solution in the NAVX5 message'''
payload = struct.pack('<HHIBBBBBBBBBBHIBBBBBBHII', 0, 4, 0, 0, 0, min_sats, max_sats,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.send_message(CLASS_CFG, MSG_CFG_NAVX5, payload)
def module_reset(self, set, mode):
''' Reset the module for hot/warm/cold start'''
payload = struct.pack('<HBB', set, mode, 0)
self.send_message(CLASS_CFG, MSG_CFG_RST, payload)
| 31.571286 | 100 | 0.61975 |
5c6ffc7bca5fd691a011156fe4c8deea600920ed | 30,283 | py | Python | src/python/pants/backend/go/util_rules/third_party_pkg_test.py | wimax-grapl/pants | 0aabd417a772ea4e39999c4415c67db40de679a4 | [
"Apache-2.0"
] | 1,806 | 2015-01-05T07:31:00.000Z | 2022-03-31T11:35:41.000Z | src/python/pants/backend/go/util_rules/third_party_pkg_test.py | wimax-grapl/pants | 0aabd417a772ea4e39999c4415c67db40de679a4 | [
"Apache-2.0"
] | 9,565 | 2015-01-02T19:01:59.000Z | 2022-03-31T23:25:16.000Z | src/python/pants/backend/go/util_rules/third_party_pkg_test.py | riisi/pants | b33327389fab67c47b919710ea32f20ca284b1a6 | [
"Apache-2.0"
] | 443 | 2015-01-06T20:17:57.000Z | 2022-03-31T05:28:17.000Z | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from textwrap import dedent
import pytest
from pants.backend.go.target_types import GoModTarget
from pants.backend.go.util_rules import sdk, third_party_pkg
from pants.backend.go.util_rules.third_party_pkg import (
AllThirdPartyPackages,
AllThirdPartyPackagesRequest,
ThirdPartyPkgInfo,
ThirdPartyPkgInfoRequest,
)
from pants.engine.fs import EMPTY_DIGEST, Digest, Snapshot
from pants.engine.process import ProcessExecutionFailure
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner, engine_error
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*sdk.rules(),
*third_party_pkg.rules(),
QueryRule(AllThirdPartyPackages, [AllThirdPartyPackagesRequest]),
QueryRule(ThirdPartyPkgInfo, [ThirdPartyPkgInfoRequest]),
],
target_types=[GoModTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
GO_MOD = dedent(
"""\
module example.com/third-party-module
go 1.16
require github.com/google/uuid v1.3.0
require (
rsc.io/quote v1.5.2
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
rsc.io/sampler v1.3.0 // indirect
)
"""
)
GO_SUM = dedent(
"""\
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:qgOY6WgZOaTkIIMiVjBQcw93ERBE4m30iBm00nkL0i8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
"""
)
def set_up_go_mod(rule_runner: RuleRunner, go_mod: str, go_sum: str) -> Digest:
return rule_runner.make_snapshot({"go.mod": go_mod, "go.sum": go_sum}).digest
def test_download_and_analyze_all_packages(rule_runner: RuleRunner) -> None:
input_digest = rule_runner.make_snapshot({"go.mod": GO_MOD, "go.sum": GO_SUM}).digest
all_packages = rule_runner.request(
AllThirdPartyPackages, [AllThirdPartyPackagesRequest(input_digest, "go.mod")]
)
assert set(all_packages.import_paths_to_pkg_info.keys()) == {
"golang.org/x/text/encoding/japanese",
"golang.org/x/text/message/catalog",
"golang.org/x/text/internal/testtext",
"golang.org/x/text/encoding/ianaindex",
"golang.org/x/text/cmd/gotext",
"golang.org/x/text/width",
"golang.org/x/text/internal/format",
"rsc.io/sampler",
"golang.org/x/text/internal/tag",
"golang.org/x/text/unicode/norm",
"golang.org/x/text/number",
"golang.org/x/text/transform",
"golang.org/x/text/internal",
"golang.org/x/text/internal/utf8internal",
"golang.org/x/text/language/display",
"golang.org/x/text/internal/stringset",
"golang.org/x/text/encoding/korean",
"golang.org/x/text/internal/triegen",
"golang.org/x/text/secure/bidirule",
"golang.org/x/text/secure/precis",
"golang.org/x/text/language",
"golang.org/x/text/encoding/unicode/utf32",
"golang.org/x/text/internal/colltab",
"golang.org/x/text/unicode/rangetable",
"golang.org/x/text/encoding/htmlindex",
"golang.org/x/text/internal/export/idna",
"golang.org/x/text/encoding/charmap",
"golang.org/x/text/unicode/cldr",
"golang.org/x/text/secure",
"golang.org/x/text/internal/ucd",
"golang.org/x/text/feature/plural",
"golang.org/x/text/unicode",
"golang.org/x/text/encoding/traditionalchinese",
"golang.org/x/text/runes",
"golang.org/x/text/internal/catmsg",
"rsc.io/quote/buggy",
"golang.org/x/text/encoding/simplifiedchinese",
"golang.org/x/text/cases",
"golang.org/x/text/encoding/internal",
"github.com/google/uuid",
"golang.org/x/text/encoding/internal/enctest",
"golang.org/x/text/collate/build",
"golang.org/x/text",
"golang.org/x/text/unicode/bidi",
"golang.org/x/text/search",
"golang.org/x/text/unicode/runenames",
"golang.org/x/text/message",
"golang.org/x/text/encoding",
"golang.org/x/text/encoding/unicode",
"rsc.io/quote",
"golang.org/x/text/currency",
"golang.org/x/text/internal/number",
"golang.org/x/text/collate/tools/colcmp",
"golang.org/x/text/encoding/internal/identifier",
"golang.org/x/text/collate",
"golang.org/x/text/internal/gen",
}
def assert_pkg_info(
import_path: str,
dir_path: str,
imports: tuple[str, ...],
go_files: tuple[str, ...],
extra_files: tuple[str, ...],
minimum_go_version: str | None,
) -> None:
assert import_path in all_packages.import_paths_to_pkg_info
pkg_info = all_packages.import_paths_to_pkg_info[import_path]
assert pkg_info.import_path == import_path
assert pkg_info.dir_path == dir_path
assert pkg_info.imports == imports
assert pkg_info.go_files == go_files
assert not pkg_info.s_files
snapshot = rule_runner.request(Snapshot, [pkg_info.digest])
assert set(snapshot.files) == {
os.path.join(dir_path, file_name) for file_name in (*go_files, *extra_files)
}
assert pkg_info.minimum_go_version == minimum_go_version
assert_pkg_info(
import_path="github.com/google/uuid",
dir_path="github.com/google/uuid@v1.3.0",
imports=(
"bytes",
"crypto/md5",
"crypto/rand",
"crypto/sha1",
"database/sql/driver",
"encoding/binary",
"encoding/hex",
"encoding/json",
"errors",
"fmt",
"hash",
"io",
"net",
"os",
"strings",
"sync",
"time",
),
go_files=(
"dce.go",
"doc.go",
"hash.go",
"marshal.go",
"node.go",
"node_net.go",
"null.go",
"sql.go",
"time.go",
"util.go",
"uuid.go",
"version1.go",
"version4.go",
),
extra_files=(
".travis.yml",
"CONTRIBUTING.md",
"CONTRIBUTORS",
"LICENSE",
"README.md",
"go.mod",
"json_test.go",
"node_js.go",
"null_test.go",
"seq_test.go",
"sql_test.go",
"uuid_test.go",
),
minimum_go_version=None,
)
assert_pkg_info(
import_path="golang.org/x/text/unicode/bidi",
dir_path="golang.org/x/text@v0.0.0-20170915032832-14c0d48ead0c/unicode/bidi",
imports=("container/list", "fmt", "log", "sort", "unicode/utf8"),
go_files=("bidi.go", "bracket.go", "core.go", "prop.go", "tables.go", "trieval.go"),
extra_files=(
"core_test.go",
"gen.go",
"gen_ranges.go",
"gen_trieval.go",
"ranges_test.go",
"tables_test.go",
),
minimum_go_version=None,
)
def test_invalid_go_sum(rule_runner: RuleRunner) -> None:
digest = set_up_go_mod(
rule_runner,
dedent(
"""\
module example.com/third-party-module
go 1.17
require github.com/google/uuid v1.3.0
"""
),
dedent(
"""\
github.com/google/uuid v1.3.0 h1:00000gmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:00000e4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
"""
),
)
with engine_error(ProcessExecutionFailure, contains="SECURITY ERROR"):
rule_runner.request(AllThirdPartyPackages, [AllThirdPartyPackagesRequest(digest, "go.mod")])
def test_missing_go_sum(rule_runner: RuleRunner) -> None:
digest = set_up_go_mod(
rule_runner,
dedent(
"""\
module example.com/third-party-module
go 1.17
require github.com/google/uuid v1.3.0
"""
),
# `go.sum` is for a different module.
dedent(
"""\
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
"""
),
)
with engine_error(contains="github.com/google/uuid@v1.3.0: missing go.sum entry"):
rule_runner.request(AllThirdPartyPackages, [AllThirdPartyPackagesRequest(digest, "go.mod")])
def test_stale_go_mod(rule_runner: RuleRunner) -> None:
digest = set_up_go_mod(
rule_runner,
# Go 1.17+ expects indirect dependencies to be included in the `go.mod`, i.e.
# `golang.org/x/xerrors `.
dedent(
"""\
module example.com/third-party-module
go 1.17
require github.com/google/go-cmp v0.5.6
"""
),
dedent(
"""\
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
"""
),
)
with engine_error(ProcessExecutionFailure, contains="updates to go.mod needed"):
rule_runner.request(AllThirdPartyPackages, [AllThirdPartyPackagesRequest(digest, "go.mod")])
def test_pkg_missing(rule_runner: RuleRunner) -> None:
digest = set_up_go_mod(rule_runner, GO_MOD, GO_SUM)
with engine_error(
AssertionError, contains="The package `another_project.org/foo` was not downloaded"
):
rule_runner.request(
ThirdPartyPkgInfo,
[ThirdPartyPkgInfoRequest("another_project.org/foo", digest, "go.mod")],
)
def test_module_with_no_packages(rule_runner) -> None:
digest = set_up_go_mod(
rule_runner,
dedent(
"""\
module example.com/third-party-module
go 1.17
require github.com/Azure/go-autorest v13.3.2+incompatible
"""
),
dedent(
"""\
github.com/Azure/go-autorest v13.3.2+incompatible h1:VxzPyuhtnlBOzc4IWCZHqpyH2d+QMLQEuy3wREyY4oc=
github.com/Azure/go-autorest v13.3.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
"""
),
)
all_packages = rule_runner.request(
AllThirdPartyPackages, [AllThirdPartyPackagesRequest(digest, "go.mod")]
)
assert not all_packages.import_paths_to_pkg_info
def test_unsupported_sources(rule_runner: RuleRunner) -> None:
# `golang.org/x/mobile/bind/objc` uses `.h` files on both Linux and macOS.
digest = set_up_go_mod(
rule_runner,
dedent(
"""\
module example.com/unsupported
go 1.16
require golang.org/x/mobile v0.0.0-20210924032853-1c027f395ef7
"""
),
dedent(
"""\
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56 h1:estk1glOnSVeJ9tdEZZc5mAMDZk5lNJNyJ6DvrBkTEU=
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20210924032853-1c027f395ef7 h1:CyFUjc175y/mbMjxe+WdqI72jguLyjQChKCDe9mfTvg=
golang.org/x/mobile v0.0.0-20210924032853-1c027f395ef7/go.mod h1:c4YKU3ZylDmvbw+H/PSvm42vhdWbuxCzbonauEAP9B8=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
"""
),
)
pkg_info = rule_runner.request(
ThirdPartyPkgInfo,
[ThirdPartyPkgInfoRequest("golang.org/x/mobile/bind/objc", digest, "go.mod")],
)
assert pkg_info.error is not None
def test_determine_pkg_info_module_with_replace_directive(rule_runner: RuleRunner) -> None:
"""Regression test for https://github.com/pantsbuild/pants/issues/13138."""
digest = set_up_go_mod(
rule_runner,
dedent(
"""\
module example.com/third-party-module
go 1.16
require github.com/hashicorp/consul/api v1.3.0
"""
),
dedent(
"""\
github.com/Azure/go-autorest v13.3.2+incompatible h1:VxzPyuhtnlBOzc4IWCZHqpyH2d+QMLQEuy3wREyY4oc=
github.com/Azure/go-autorest v13.3.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/hashicorp/consul/api v1.3.0 h1:HXNYlRkkM/t+Y/Yhxtwcy02dlYwIaoxzvxPnS+cqy78=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/sdk v0.3.0 h1:UOxjlb4xVNF93jak1mzzoBatyFju9nrkxpVwIp/QqxQ=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0 h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
"""
),
)
pkg_info = rule_runner.request(
ThirdPartyPkgInfo,
[ThirdPartyPkgInfoRequest("github.com/hashicorp/consul/api", digest, "go.mod")],
)
assert pkg_info.dir_path == "github.com/hashicorp/consul/api@v1.3.0"
assert "raw.go" in pkg_info.go_files
def test_ambiguous_package(rule_runner: RuleRunner) -> None:
digest = set_up_go_mod(
rule_runner,
dedent(
"""\
module example.com/third-party-module
go 1.16
require github.com/ugorji/go v1.1.4
require github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8
"""
),
dedent(
"""\
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
"""
),
)
pkg_info = rule_runner.request(
ThirdPartyPkgInfo,
[ThirdPartyPkgInfoRequest("github.com/ugorji/go/codec", digest, "go.mod")],
)
assert pkg_info.error is not None
# This particular error is tricky because `Dir` will not have been set, which we need to
# determine the dir_path and the digest.
assert pkg_info.dir_path == ""
assert pkg_info.digest == EMPTY_DIGEST
| 54.761302 | 135 | 0.691972 |
c7791218311ce14814ad0bfbb5cc68e390d94d4a | 5,620 | py | Python | Chapter12/videos_sharing_smart_contract/tests/test_videos_sharing.py | HowToBeCalculated/Hands-On-Blockchain-for-Python-Developers | f9634259dd3dc509f36a5ccf3a5182c0d2ec79c4 | [
"MIT"
] | 62 | 2019-03-18T04:41:41.000Z | 2022-03-31T05:03:13.000Z | Chapter12/videos_sharing_smart_contract/tests/test_videos_sharing.py | HowToBeCalculated/Hands-On-Blockchain-for-Python-Developers | f9634259dd3dc509f36a5ccf3a5182c0d2ec79c4 | [
"MIT"
] | 2 | 2020-06-14T21:56:03.000Z | 2022-01-07T05:32:01.000Z | Chapter12/videos_sharing_smart_contract/tests/test_videos_sharing.py | HowToBeCalculated/Hands-On-Blockchain-for-Python-Developers | f9634259dd3dc509f36a5ccf3a5182c0d2ec79c4 | [
"MIT"
] | 42 | 2019-02-22T03:10:36.000Z | 2022-02-20T04:47:04.000Z | import pytest
import eth_tester
def upload_video(video_sharing, chain, account, video_path, video_title):
txn_hash = video_sharing.functions.upload_video(video_path, video_title).transact({'from': account})
chain.wait.for_receipt(txn_hash)
def transfer_coins(video_sharing, chain, source, destination, amount):
txn_hash = video_sharing.functions.transfer(destination, amount).transact({'from': source})
chain.wait.for_receipt(txn_hash)
def like_video(video_sharing, chain, video_liker, video_uploader, index):
txn_hash = video_sharing.functions.like_video(video_uploader, index).transact({'from': video_liker})
chain.wait.for_receipt(txn_hash)
def test_upload_video(web3, chain):
video_sharing, _ = chain.provider.get_or_deploy_contract('VideosSharing')
t = eth_tester.EthereumTester()
video_uploader = t.get_accounts()[1]
index = video_sharing.functions.latest_videos_index(video_uploader).call()
assert index == 0
upload_video(video_sharing, chain, video_uploader, b'video-ipfs-path', b"video title")
index = video_sharing.functions.latest_videos_index(video_uploader).call()
path = video_sharing.functions.videos_path(video_uploader, 0).call()
title = video_sharing.functions.videos_title(video_uploader, 0).call()
assert index == 1
assert path == b'video-ipfs-path'
assert title == b"video title"
upload_video(video_sharing, chain, video_uploader, b'video-ipfs-path2', b"video title2")
index = video_sharing.functions.latest_videos_index(video_uploader).call()
path = video_sharing.functions.videos_path(video_uploader, 1).call()
title = video_sharing.functions.videos_title(video_uploader, 1).call()
assert index == 2
assert path == b'video-ipfs-path2'
assert title == b"video title2"
events = video_sharing.events.UploadVideo.createFilter(fromBlock=0).get_all_entries()
assert events[0]['args']['_user'] == video_uploader
assert events[0]['args']['_index'] == 0
assert events[1]['args']['_user'] == video_uploader
assert events[1]['args']['_index'] == 1
def test_like_video(web3, chain):
video_sharing, _ = chain.provider.get_or_deploy_contract('VideosSharing')
t = eth_tester.EthereumTester()
manager = t.get_accounts()[0]
video_uploader = t.get_accounts()[1]
video_liker = t.get_accounts()[2]
video_liker2 = t.get_accounts()[3]
transfer_coins(video_sharing, chain, manager, video_liker, 100)
transfer_coins(video_sharing, chain, manager, video_liker2, 100)
transfer_coins(video_sharing, chain, manager, video_uploader, 50)
upload_video(video_sharing, chain, video_uploader, b'video-ipfs-path', b"video title")
liked = video_sharing.functions.video_has_been_liked(video_liker, video_uploader, 0).call()
assert liked == False
liked2 = video_sharing.functions.video_has_been_liked(video_liker2, video_uploader, 0).call()
assert liked2 == False
video_uploader_balance = video_sharing.functions.balanceOf(video_uploader).call()
assert video_uploader_balance == 50
video_liker_balance = video_sharing.functions.balanceOf(video_liker).call()
assert video_liker_balance == 100
video_liker2_balance = video_sharing.functions.balanceOf(video_liker2).call()
assert video_liker2_balance == 100
aggregate_likes = video_sharing.functions.video_aggregate_likes(video_uploader, 0).call()
assert aggregate_likes == 0
like_video(video_sharing, chain, video_liker, video_uploader, 0)
liked = video_sharing.functions.video_has_been_liked(video_liker, video_uploader, 0).call()
assert liked == True
liked2 = video_sharing.functions.video_has_been_liked(video_liker2, video_uploader, 0).call()
assert liked2 == False
video_uploader_balance = video_sharing.functions.balanceOf(video_uploader).call()
assert video_uploader_balance == 51
video_liker_balance = video_sharing.functions.balanceOf(video_liker).call()
assert video_liker_balance == 99
video_liker2_balance = video_sharing.functions.balanceOf(video_liker2).call()
assert video_liker2_balance == 100
aggregate_likes = video_sharing.functions.video_aggregate_likes(video_uploader, 0).call()
assert aggregate_likes == 1
like_video(video_sharing, chain, video_liker2, video_uploader, 0)
liked = video_sharing.functions.video_has_been_liked(video_liker2, video_uploader, 0).call()
assert liked == True
liked2 = video_sharing.functions.video_has_been_liked(video_liker2, video_uploader, 0).call()
assert liked2 == True
video_uploader_balance = video_sharing.functions.balanceOf(video_uploader).call()
assert video_uploader_balance == 52
video_liker_balance = video_sharing.functions.balanceOf(video_liker).call()
assert video_liker_balance == 99
video_liker2_balance = video_sharing.functions.balanceOf(video_liker2).call()
assert video_liker2_balance == 99
aggregate_likes = video_sharing.functions.video_aggregate_likes(video_uploader, 0).call()
assert aggregate_likes == 2
events = video_sharing.events.LikeVideo.createFilter(fromBlock=0).get_all_entries()
assert events[0]['args']['_video_liker'] == video_liker
assert events[0]['args']['_video_uploader'] == video_uploader
assert events[0]['args']['_index'] == 0
assert events[1]['args']['_video_liker'] == video_liker2
assert events[1]['args']['_video_uploader'] == video_uploader
assert events[1]['args']['_index'] == 0
with pytest.raises(eth_tester.exceptions.TransactionFailed):
like_video(video_sharing, chain, video_liker, video_uploader, 0)
| 46.065574 | 104 | 0.754093 |
5545fe74c60b96404f1778318325773ea7e4e356 | 10,905 | py | Python | client/verta/tests/monitoring/alerts/test_entities.py | lsb/modeldb | bf484b4110980268d22714e4ff907c28e0ece89e | [
"Apache-2.0"
] | null | null | null | client/verta/tests/monitoring/alerts/test_entities.py | lsb/modeldb | bf484b4110980268d22714e4ff907c28e0ece89e | [
"Apache-2.0"
] | null | null | null | client/verta/tests/monitoring/alerts/test_entities.py | lsb/modeldb | bf484b4110980268d22714e4ff907c28e0ece89e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from collections import namedtuple
import pytest
from verta._internal_utils import (
_utils,
time_utils,
)
from verta.monitoring import comparison
from verta.monitoring.alert import (
FixedAlerter,
RangeAlerter,
ReferenceAlerter,
)
from verta.monitoring.alert.status import (
Alerting,
Ok,
)
from verta.monitoring.alert.entities import Alert, Alerts
from verta.monitoring.summaries.queries import SummarySampleQuery
from verta.monitoring.notification_channel import (
SlackNotificationChannel,
)
from verta import data_types
class TestIntegration:
"""Alerts + related entities/objects."""
def test_add_notification_channels(
self,
client,
summary,
created_entities,
):
name = _utils.generate_default_name()
alerter = FixedAlerter(comparison.GreaterThan(0.7))
channel1 = client.monitoring.notification_channels.create(
_utils.generate_default_name(),
SlackNotificationChannel(_utils.generate_default_name()),
)
created_entities.append(channel1)
channel2 = client.monitoring.notification_channels.create(
_utils.generate_default_name(),
SlackNotificationChannel(_utils.generate_default_name()),
)
created_entities.append(channel2)
alert = summary.alerts.create(
name,
alerter,
notification_channels=[channel1],
)
retrieved_channel_ids = alert._msg.notification_channels.keys()
assert set(retrieved_channel_ids) == {channel1.id}
alert.add_notification_channels([channel2])
alert._refresh_cache()
retrieved_channel_ids = alert._msg.notification_channels.keys()
assert set(retrieved_channel_ids) == {channel1.id, channel2.id}
def test_set_status(self, summary, summary_sample):
name = _utils.generate_default_name()
alerter = FixedAlerter(comparison.GreaterThan(0.7))
alert = summary.alerts.create(name, alerter)
assert alert.status == Ok()
assert alert.last_evaluated_at is None
alert.set_status(Alerting([summary_sample]))
assert alert.status == Alerting([summary_sample])
assert alert.last_evaluated_at == time_utils.datetime_from_millis(
alert._msg.last_evaluated_at_millis
)
alert.set_status(Ok())
assert alert.status == Ok()
class TestAlert:
"""Tests that aren't specific to an alerter type."""
def test_update_last_evaluated_at(self, summary):
name = _utils.generate_default_name()
alerter = FixedAlerter(comparison.GreaterThan(0.7))
alert = summary.alerts.create(name, alerter)
alert._fetch_with_no_cache()
initial = alert._msg.last_evaluated_at_millis
alert._update_last_evaluated_at()
alert._fetch_with_no_cache()
assert alert._msg.last_evaluated_at_millis > initial
yesterday = time_utils.now() - datetime.timedelta(days=1)
yesterday_millis = time_utils.epoch_millis(yesterday)
# TODO: remove following line when backend stops round to nearest sec
yesterday_millis = round(yesterday_millis, -3)
alert._update_last_evaluated_at(yesterday)
alert._fetch_with_no_cache()
assert alert._msg.last_evaluated_at_millis == yesterday_millis
def test_creation_query_params(self, summary):
"""`labels` and `starting_from`"""
name = _utils.generate_default_name()
alerter = FixedAlerter(comparison.GreaterThan(0.7))
labels = {"datasource": ["census2010", "census2020"]}
starting_from = datetime.datetime(year=2021, month=5, day=10, tzinfo=time_utils.utc)
# none passed
alert = summary.alerts.create(
name,
alerter,
)
expected_sample_query = SummarySampleQuery(
summary_query=summary.alerts._build_summary_query(),
created_after=alert.created_at,
)
assert alert.summary_sample_query == expected_sample_query
assert alert.labels == {}
assert alert.starting_from is None
# just labels
alert = summary.alerts.create(
name,
alerter,
labels=labels,
)
expected_sample_query = SummarySampleQuery(
summary_query=summary.alerts._build_summary_query(),
labels=labels,
created_after=alert.created_at,
)
assert alert.summary_sample_query == expected_sample_query
assert alert.labels == labels
assert alert.starting_from is None
# starting_from
alert = summary.alerts.create(
name,
alerter,
labels=labels,
starting_from=starting_from,
)
expected_sample_query = SummarySampleQuery(
summary_query=summary.alerts._build_summary_query(),
labels=labels,
time_window_start=starting_from,
created_after=alert.created_at,
)
assert alert.summary_sample_query == expected_sample_query
assert alert.labels == labels
assert alert.starting_from == starting_from
def test_creation_override_datetimes(self, summary, strs):
strs = iter(strs)
alerter = FixedAlerter(comparison.GreaterThan(0.7))
created_at = time_utils.now() - datetime.timedelta(weeks=1)
updated_at = time_utils.now() - datetime.timedelta(days=1)
last_evaluated_at = time_utils.now() - datetime.timedelta(hours=1)
created_at_millis = time_utils.epoch_millis(created_at)
updated_at_millis = time_utils.epoch_millis(updated_at)
last_evaluated_at_millis = time_utils.epoch_millis(last_evaluated_at)
# as datetime
alert = summary.alerts.create(
next(strs),
alerter,
_created_at=created_at,
_updated_at=updated_at,
_last_evaluated_at=last_evaluated_at,
)
assert alert._msg.created_at_millis == created_at_millis
assert alert._msg.updated_at_millis == updated_at_millis
assert alert._msg.last_evaluated_at_millis == last_evaluated_at_millis
# as millis
alert = summary.alerts.create(
next(strs),
alerter,
_created_at=created_at_millis,
_updated_at=updated_at_millis,
_last_evaluated_at=last_evaluated_at_millis,
)
assert alert._msg.created_at_millis == created_at_millis
assert alert._msg.updated_at_millis == updated_at_millis
assert alert._msg.last_evaluated_at_millis == last_evaluated_at_millis
def test_alerts_summary(self):
MockSummary = namedtuple("Summary", ["id", "name", "monitored_entity_id"])
monitored_entity_id = 5
summary = MockSummary(123, "my_test_summary", monitored_entity_id)
offline_alerts = Alerts(
None,
None,
monitored_entity_id=monitored_entity_id,
summary=summary,
)
query = offline_alerts._build_summary_query()
assert query
assert summary.id in query._ids
assert summary.name in query._names
assert summary.monitored_entity_id in query._monitored_entity_ids
class TestNonReferenceAlerters:
@pytest.mark.parametrize(
"alerter", [FixedAlerter(comparison.GreaterThan(0.7)), RangeAlerter(-1.0, 1.0)]
)
def test_crud(self, client, summary, alerter):
name = _utils.generate_default_name()
created_alert = summary.alerts.create(name, alerter)
assert isinstance(created_alert, Alert)
assert created_alert._msg.alerter_type == alerter._TYPE
assert created_alert.monitored_entity_id == summary.monitored_entity_id
assert summary.id in created_alert.summary_sample_query.summary_query._ids
retrieved_alert = summary.alerts.get(id=created_alert.id)
client_retrieved_alert = client.monitoring.alerts.get(id=created_alert.id)
assert retrieved_alert.id == client_retrieved_alert.id
assert isinstance(retrieved_alert, Alert)
assert retrieved_alert._msg.alerter_type == alerter._TYPE
assert retrieved_alert.alerter._as_proto() == alerter._as_proto()
listed_alerts = summary.alerts.list()
assert created_alert.id in map(lambda a: a.id, listed_alerts)
client_listed_alerts = client.monitoring.alerts.list()
assert created_alert.id in map(lambda a: a.id, client_listed_alerts)
assert summary.alerts.delete([created_alert])
@pytest.mark.parametrize(
"alerter", [FixedAlerter(comparison.GreaterThan(0.7)), RangeAlerter(-1.0, 1.0)]
)
def test_repr(self, summary, alerter):
"""__repr__() does not raise exceptions"""
name = _utils.generate_default_name()
created_alert = summary.alerts.create(name, alerter)
assert repr(created_alert)
retrieved_alert = summary.alerts.get(id=created_alert.id)
assert repr(retrieved_alert)
class TestReference:
def test_crud(self, client, summary, summary_sample):
name = _utils.generate_default_name()
alerter = ReferenceAlerter(comparison.GreaterThan(0.7), summary_sample)
created_alert = summary.alerts.create(name, alerter)
assert isinstance(created_alert, Alert)
assert created_alert._msg.alerter_type == alerter._TYPE
assert created_alert.monitored_entity_id == summary.monitored_entity_id
assert summary.id in created_alert.summary_sample_query.summary_query._ids
retrieved_alert = summary.alerts.get(id=created_alert.id)
client_retrieved_alert = client.monitoring.alerts.get(id=created_alert.id)
assert retrieved_alert.id == client_retrieved_alert.id
assert isinstance(retrieved_alert, Alert)
assert retrieved_alert._msg.alerter_type == alerter._TYPE
assert retrieved_alert.alerter._as_proto() == alerter._as_proto()
assert retrieved_alert.alerter._reference_sample_id == summary_sample.id
listed_alerts = summary.alerts.list()
assert created_alert.id in map(lambda a: a.id, listed_alerts)
client_listed_alerts = client.monitoring.alerts.list()
assert created_alert.id in map(lambda a: a.id, client_listed_alerts)
assert summary.alerts.delete([created_alert])
def test_repr(self, summary, summary_sample):
"""__repr__() does not raise exceptions"""
name = _utils.generate_default_name()
alerter = ReferenceAlerter(comparison.GreaterThan(0.7), summary_sample)
created_alert = summary.alerts.create(name, alerter)
assert repr(created_alert)
retrieved_alert = summary.alerts.get(id=created_alert.id)
assert repr(retrieved_alert)
| 37.996516 | 92 | 0.682898 |
481ff00188f78034656c93d13e61771de84db6e2 | 1,125 | py | Python | calisma1.py | donmezmerve/calismas | d941dbaac6b022ae517387b2aa8be564e007532a | [
"MIT"
] | null | null | null | calisma1.py | donmezmerve/calismas | d941dbaac6b022ae517387b2aa8be564e007532a | [
"MIT"
] | null | null | null | calisma1.py | donmezmerve/calismas | d941dbaac6b022ae517387b2aa8be564e007532a | [
"MIT"
] | null | null | null | import pandas
import numpy
def read_file(filename): # fonksıyon tanımle
data = pandas.read_csv(filename, delim_whitespace=True) #data dıye degıskene kaydettık
temp_list=numpy.asarray(data['Temperature']) #dosya formatını gormek ıcın
return temp_list
def calculate_costs(temp): # bır tane data poınt alacak o yuzden temp dedık
heating_cost= 0 # heatıng cost olmayabılır
if temp<8 :
heating_cost= 8-temp
cooling_cost=0
if temp>16:
cooling_cost= temp-16
#print(temp,heating_cost, cooling_cost)
return (heating_cost, cooling_cost)
def calculate_avg(temp_list):
total_heating_cost=0 #toplu dereceler
total_cooling_cost=0
for temp in temp_list:
costs=calculate_costs(temp)
total_heating_cost += costs[0]
total_cooling_cost += costs[1]
nyears= 2018-1951 +1
return (total_heating_cost/nyears, total_cooling_cost/nyears)
#Read File
temp_list=read_file("Schiphol.txt")
#for temp in temp_list:
# calculate_costs(temp) #forun ıcıne yazdıgını paranteze yaz
avg=calculate_avg(temp_list)
print("heating:" , avg[0], "cooling:", avg[1]) | 31.25 | 91 | 0.728889 |
059bbbcd468fb3bd80f720a794049eca9aed284f | 1,616 | py | Python | vsts/vsts/customer_intelligence/v4_0/customer_intelligence_client.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/customer_intelligence/v4_0/customer_intelligence_client.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | vsts/vsts/customer_intelligence/v4_0/customer_intelligence_client.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class CustomerIntelligenceClient(VssClient):
"""CustomerIntelligence
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(CustomerIntelligenceClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def publish_events(self, events):
"""PublishEvents.
[Preview API]
:param [CustomerIntelligenceEvent] events:
"""
content = self._serialize.body(events, '[CustomerIntelligenceEvent]')
self._send(http_method='POST',
location_id='b5cc35c2-ff2b-491d-a085-24b6e9f396fd',
version='4.0-preview.1',
content=content)
| 41.435897 | 94 | 0.572401 |
6f935b8da8c0007ddf226b8df8412f5adf85d5bc | 11,190 | py | Python | python/mtap/io/serialization.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 3 | 2020-03-06T21:24:24.000Z | 2021-03-21T06:38:00.000Z | python/mtap/io/serialization.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 40 | 2019-10-14T17:02:54.000Z | 2022-03-09T13:35:54.000Z | python/mtap/io/serialization.py | benknoll-umn/mtap | 67d506aa4ffc960acca1988ec12c5391c15ad736 | [
"Apache-2.0"
] | 2 | 2019-10-14T15:42:46.000Z | 2020-03-05T23:29:01.000Z | # Copyright 2019 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serialization, serializers, and helper methods for going to and from flattened python dictionary
representations of events.
Attributes:
JsonSerializer (Serializer): For serializing to and from json.
"""
import io
import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Union, Dict, Any, Optional
import mtap
from mtap import data, processing
from mtap import descriptions as d
logger = logging.getLogger(__name__)
__all__ = [
'event_to_dict',
'document_to_dict',
'dict_to_event',
'dict_to_document',
'Serializer',
'SerializationProcessor',
'JsonSerializer',
'YamlSerializer',
'PickleSerializer',
'standard_serializers',
'get_serializer'
]
def event_to_dict(event: mtap.Event, *, include_label_text: bool = False) -> Dict:
"""A helper method that turns an event into a python dictionary.
Args:
event (Event): The event object.
Keyword Args:
include_label_text (bool): Whether to include the text labels cover with the labels.
Returns:
dict: A dictionary object suitable for serialization.
"""
d = {
'event_id': event.event_id,
'metadata': {},
'documents': {}
}
for k, v in event.metadata.items():
d['metadata'][k] = v
for doc in event.documents.values():
d['documents'][doc.document_name] = document_to_dict(doc,
include_label_text=include_label_text)
return d
def document_to_dict(document: mtap.Document, *, include_label_text: bool = False) -> Dict:
"""A helper method that turns a document into a python dictionary.
Args:
document (Document): The document object.
Keyword Args:
include_label_text (bool): Whether to include the text labels cover with the labels.
Returns:
dict: A dictionary object suitable for serialization.
"""
d = {
'text': document.text,
'label_indices': {}
}
for index_name, index in document.labels.items():
adapter = index.adapter
if adapter is None:
adapter = data.GENERIC_ADAPTER
d['label_indices'][index_name] = adapter.pack(
index,
include_label_text=include_label_text
)
return d
def dict_to_event(d: Dict, *, client: Optional[mtap.EventsClient] = None) -> mtap.Event:
"""Turns a serialized dictionary into an Event.
Args:
d (dict): The dictionary representation of the event.
client (~typing.Optional[EventsClient]): An events service to create the event on.
Returns:
Event: The deserialized event object.
"""
event = mtap.Event(event_id=d['event_id'], client=client)
for k, v in d['metadata'].items():
event.metadata[k] = v
for k, v in d['documents'].items():
dict_to_document(k, v, event=event)
return event
def dict_to_document(document_name: str,
d: Dict,
*, event: Optional[mtap.Event] = None) -> mtap.Document:
"""Turns a serialized dictionary into a Document.
Args:
document_name (str): The name identifier of the document on the event.
d (dict): The dictionary representation of the document.
event (~typing.Optional[Event]): An event that the document should be added to.
Returns:
Document: The deserialized Document object.
"""
document = mtap.Document(document_name=document_name, text=d['text'])
if event is not None:
event.add_document(document)
for k, v in d['label_indices'].items():
adapter = document.get_default_adapter(k)
index = adapter.unpack(v, k, document=document)
document.add_labels(k, index, distinct=index.distinct)
return document
class Serializer(ABC):
"""Abstract base class for a serializer of MTAP events.
"""
@property
@abstractmethod
def extension(self) -> str:
"""str: Filename extension, including period. Ex: ``'.json'``."""
...
@abstractmethod
def event_to_file(self, event: mtap.Event, f: Union[Path, str, io.IOBase],
*, include_label_text: bool = False):
"""Writes the event to a file.
Args:
event (Event): The event object to serialize.
f (~typing.Union[~pathlib.Path, str, ~io.IOBase]):
A file or a path to a file to write the event to.
include_label_text (bool):
Whether, when serializing, to include the text that each label covers with the rest
of the label.
"""
...
@abstractmethod
def file_to_event(self, f: Union[Path, str, io.IOBase], *,
client: Optional[mtap.EventsClient] = None) -> mtap.Event:
"""Loads an event from a serialized file.
Args:
f (~typing.Union[~pathlib.Path, str, ~io.IOBase]): The file to load from.
client (~typing.Optional[EventsClient]): The events service to load the event into.
Returns:
Event: The loaded event object.
"""
...
@mtap.processor('mtap-serializer',
description='Serializes events to a specific directory',
parameters=[d.parameter('filename', data_type='str',
description='Optional override for the filename to write the '
'document to.')])
class SerializationProcessor(mtap.EventProcessor):
"""An MTAP :obj:`EventProcessor` that serializes events to a specific directory.
Args:
ser (Serializer): The serializer to use.
output_dir (str): The output_directory.
"""
def __init__(self, ser: Serializer, output_dir: str, include_label_text: bool = False):
self.serializer = ser
self.output_dir = output_dir
self.include_label_text = include_label_text
Path(self.output_dir).mkdir(parents=True, exist_ok=True)
def process(self, event: mtap.Event, params: Dict[str, Any]):
name = params.get('filename', event.event_id + self.serializer.extension)
path = Path(self.output_dir, name)
self.serializer.event_to_file(event, path, include_label_text=self.include_label_text)
class _JsonSerializer(Serializer):
"""Serializer implementation that performs serialization to JSON.
"""
@property
def extension(self) -> str:
return '.json'
def event_to_file(self, event: mtap.Event, f: Path, *, include_label_text: bool = False):
import json
with processing.Processor.started_stopwatch('transform'):
d = event_to_dict(event, include_label_text=include_label_text)
with processing.Processor.started_stopwatch('io'):
try:
json.dump(d, f)
except AttributeError:
f = Path(f)
f.parent.mkdir(parents=True, exist_ok=True)
with f.open('w') as f:
json.dump(d, f)
def file_to_event(self, f: Union[Path, str, io.IOBase],
client: Optional[mtap.EventsClient] = None) -> mtap.Event:
import json
with processing.Processor.started_stopwatch('io'):
try:
d = json.load(f)
except AttributeError:
if isinstance(f, str):
f = Path(f)
with f.open('r') as f:
d = json.load(f)
with processing.Processor.started_stopwatch('transform'):
return dict_to_event(d, client=client)
class _YamlSerializer(Serializer):
@property
def extension(self) -> str:
return '.yml'
def event_to_file(self, event: mtap.Event, f: Union[Path, str, io.IOBase], *,
include_label_text: bool = False):
import yaml
try:
from yaml import CDumper as Dumper
except ImportError:
from yaml import Dumper
with processing.Processor.started_stopwatch('transform'):
d = event_to_dict(event, include_label_text=include_label_text)
with processing.Processor.started_stopwatch('io'):
if isinstance(f, io.IOBase):
yaml.dump(d, f, Dumper=Dumper)
else:
f = Path(f)
with f.open('w') as f:
yaml.dump(d, f, Dumper=Dumper)
def file_to_event(self, f: Union[Path, str, io.IOBase], *,
client: Optional[mtap.EventsClient] = None) -> mtap.Event:
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
with processing.Processor.started_stopwatch('io'):
if isinstance(f, io.IOBase):
d = yaml.load(f, Loader=Loader)
else:
with Path(f).open() as f:
d = yaml.load(f, Loader=Loader)
with processing.Processor.started_stopwatch('transform'):
return dict_to_event(d, client=client)
class _PickleSerializer(Serializer):
@property
def extension(self) -> str:
return '.pickle'
def event_to_file(self, event: mtap.Event, f: Union[Path, str, io.IOBase], *,
include_label_text: bool = False):
import pickle
with processing.Processor.started_stopwatch('transform'):
d = event_to_dict(event, include_label_text=include_label_text)
with processing.Processor.started_stopwatch('io'):
try:
pickle.dump(d, f)
except TypeError:
with Path(f).open('wb') as f:
pickle.dump(d, f)
def file_to_event(self, f: Union[Path, str, io.IOBase], *,
client: Optional[mtap.EventsClient] = None) -> mtap.Event:
import pickle
with processing.Processor.started_stopwatch('io'):
try:
d = pickle.load(f)
except TypeError:
with Path(f).open('rb') as f:
d = pickle.load(f)
with processing.Processor.started_stopwatch('transform'):
return dict_to_event(d, client=client)
JsonSerializer = _JsonSerializer()
YamlSerializer = _YamlSerializer()
PickleSerializer = _PickleSerializer()
standard_serializers = {
'json': JsonSerializer,
'yml': YamlSerializer,
'pickle': PickleSerializer
}
def get_serializer(identifier):
return standard_serializers[identifier]
| 33.909091 | 102 | 0.615728 |
a6898cc7362dd7c3ba61568492958f7941be0962 | 1,679 | py | Python | setup.py | diegojromerolopez/django-async-include | af447c14ed2571e4f16423c75f5c725cd26c9780 | [
"MIT"
] | 14 | 2017-03-11T22:26:54.000Z | 2022-02-01T12:09:57.000Z | setup.py | diegojromerolopez/django-async-include | af447c14ed2571e4f16423c75f5c725cd26c9780 | [
"MIT"
] | 11 | 2017-03-23T22:40:48.000Z | 2021-08-22T00:22:21.000Z | setup.py | diegojromerolopez/django-async-include | af447c14ed2571e4f16423c75f5c725cd26c9780 | [
"MIT"
] | 4 | 2017-03-22T23:27:15.000Z | 2021-06-26T06:15:17.000Z | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
root_dir_path = os.path.dirname(os.path.abspath(__file__))
long_description = open(os.path.join(root_dir_path, "README.md")).read()
data_files = []
for dirpath, dirnames, filenames in os.walk('.'):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
if '__init__.py' in filenames:
continue
elif filenames:
data_files.append(
[dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name="django-async-include",
version="0.6.7",
author="Diego J. Romero López",
author_email="diegojromerolopez@gmail.com",
description=(
"A simple application for Django to include "
"(and fetch) asynchronous templates."
),
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
'Programming Language :: Python :: Implementation :: CPython',
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries',
],
install_requires=[
"pycryptodome>=3.10.1",
"jsonpickle>=2.0.0"
],
license="MIT",
keywords="django template asynchronous template_tag",
url='https://github.com/diegojromerolopez/django-async-include',
packages=find_packages('.'),
data_files=data_files,
include_package_data=True,
)
| 31.679245 | 72 | 0.640858 |
98ff1bafed433665948f64272fca27494513e7e1 | 542 | py | Python | CeV - Gustavo Guanabara/exerc034.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | 1 | 2021-12-11T19:53:41.000Z | 2021-12-11T19:53:41.000Z | CeV - Gustavo Guanabara/exerc034.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | CeV - Gustavo Guanabara/exerc034.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | #034 : programa para ler o salario do funcionario e dar um aumento percentual
# caso o salario seja maior q 1250, dar +10%
# para salario menor q 1250, dar 15%
sal = float(input("Qual o seu salario atual? R$"))
if sal > 1250.00 :
nsal = sal + (sal * 0.10)
print(f'Seu salario antigo era R${sal:.2f}, '
f'com o acréscimo de 10% seu salário atual é R${nsal:.2f}.')
else:
nsal = sal + (sal * 0.15)
print(f'Seu salário antigo era R${sal:.2f}, '
f'com o acréscimo de 15%, seu salário atual é R${nsal:.2f}.')
| 36.133333 | 77 | 0.623616 |
12622bd8108be1d2928443cf1ad2df3f6bd9a4a6 | 2,131 | py | Python | src/main.py | nkosinathintuli/eee3096s-prac-06-iot-server | 40369e2baf9c2588c0ee38559c520c8e96f79ca0 | [
"MIT"
] | null | null | null | src/main.py | nkosinathintuli/eee3096s-prac-06-iot-server | 40369e2baf9c2588c0ee38559c520c8e96f79ca0 | [
"MIT"
] | null | null | null | src/main.py | nkosinathintuli/eee3096s-prac-06-iot-server | 40369e2baf9c2588c0ee38559c520c8e96f79ca0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from csv import DictReader
import threading
import datetime
import socket
import csv
import os
TCP_IP = ''
P1_IP = '192.168.137.105'
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
global temp_ADC
temp_ADC = -1
global LDR_ADC
LDR_ADC = -1
global counter
counter=time.time()
e=datetime.datetime.now()
global tim
tim = e.strftime("%H:%M:%S")
#Receives data from client
def recieving(cmd):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print('Connection address:', addr)
while 1:
if(cmd=="0"):
break
data = conn.recv(BUFFER_SIZE)
if not data: continue
print("received data:", data)
conn.send(data) # echo
conn, addr = s.accept()
conn.close()
def send_cmd(cmd):
print("Sending...")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((P1_IP, TCP_PORT))
s.send(cmd)
data = s.recv(BUFFER_SIZE)
s.close()
print("received data:", data)
pass
#save new data to CSV file
def add(row):
with open('sensorlog.csv', 'a',encoding='UTF8',newline='') as csvfile:
writer1 = writer(csvfile)
writer1.writerow(row)
csvfile.close()
#create a new CSV file
def create():
header = ['Date', 'Time', 'Temparature', 'LDR']
with open('sensorlog.csv', 'w',encoding='UTF8',newline='') as csvfile:
writer1 = writer(csvfile)
writer1.writerow(header)
csvfile.close()
#print the last 10 samples items
def logcheck():
with open('sensorlog.csv', 'r') as read_obj:
csv_dict_reader = DictReader(read_obj)
count=1
print("The last 10 items to be samples are listed")
for row in csv_dict_reader:
print(count,row['Date'], row['Time'],row['Temparature'],row['LDR'],flush=True)
count=count+1
if(count>=11):
break
#checks if the pi samples or not
def status(active1):
if(active1=="Sensor On"):
print("The last data was sampled at:",tim,flush=True)
elif(active1=='Sensor Off'):
print("The Pi is not sampling",flush=True)
| 24.494253 | 90 | 0.651807 |
c69de00cd893bded53257278f8f19478f2f38554 | 21,570 | py | Python | pirates/leveleditor/worldData/interior_shanty_store_clothing.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/leveleditor/worldData/interior_shanty_store_clothing.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/leveleditor/worldData/interior_shanty_store_clothing.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.interior_shanty_store_clothing
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1156268617.43dzlu0z': {'Type': 'Building Interior', 'Name': '', 'Instanced': True, 'Objects': {'1167780096.44kmuller': {'Type': 'Prop_Groups', 'DisableCollision': False, 'Hpr': VBase3(11.301, 0.0, 0.0), 'Pos': Point3(38.194, 25.733, 12.07), 'Scale': VBase3(0.689, 0.689, 0.689), 'Visual': {'Model': 'models/props/prop_group_C'}}, '1167782443.0kmuller': {'Type': 'Prop_Groups', 'DisableCollision': False, 'Hpr': VBase3(-178.549, 0.0, 0.0), 'Pos': Point3(14.354, 29.304, 12.07), 'Scale': VBase3(0.568, 0.568, 0.568), 'Visual': {'Model': 'models/props/prop_group_G'}}, '1167782540.07kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(-13.809, 0.0, 0.0), 'Pos': Point3(24.357, 28.658, 12.07), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.72, 0.7, 0.59, 1.0), 'Model': 'models/props/crate'}}, '1167782751.35kmuller': {'Type': 'Furniture', 'DisableCollision': True, 'Hpr': VBase3(86.779, 0.0, 0.0), 'Pos': Point3(1.51, -6.711, 0.209), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.8, 0.8, 0.8, 1.0), 'Model': 'models/props/cabinet_shanty'}}, '1167784962.05kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Hpr': VBase3(95.414, 0.0, 0.0), 'Pos': Point3(3.37, 5.558, 0.0), 'Scale': VBase3(1.328, 1.328, 1.328), 'Visual': {'Model': 'models/props/Trunk_square'}}, '1167785011.94kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Hpr': VBase3(102.087, 0.0, 0.0), 'Pos': Point3(2.82, 9.429, 0.0), 'Scale': VBase3(1.328, 1.328, 1.328), 'Visual': {'Color': (0.800000011920929, 0.7900000214576721, 0.8299999833106995, 1.0), 'Model': 'models/props/Trunk_square'}}, '1167785099.21kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Hpr': VBase3(86.244, 0.0, 0.0), 'Pos': Point3(2.848, 6.854, 2.641), 'Scale': VBase3(1.212, 1.212, 1.212), 'Visual': {'Model': 'models/props/Trunk_rounded_2'}}, '1167785202.19kmuller': {'Type': 'Prop_Groups', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-13.186, -25.221, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.35, 0.35, 0.35, 1.0), 'Model': 'models/props/prop_group_G'}}, '1167785325.35kmuller': {'Type': 'Prop_Groups', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(-5.251, 8.176, 12.07), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/prop_group_A'}}, '1167785354.14kmuller': {'Type': 'Prop_Groups', 'DisableCollision': False, 'Hpr': VBase3(104.949, 0.0, 0.0), 'Pos': Point3(-6.18, -15.155, 12.07), 'Scale': VBase3(0.724, 0.724, 0.724), 'Visual': {'Color': (0.78, 0.7, 0.59, 1.0), 'Model': 'models/props/prop_group_G'}}, '1167785512.3kmuller': {'Type': 'Interior_furnishings', 'DisableCollision': True, 'Hpr': VBase3(92.178, 0.0, 0.0), 'Pos': Point3(3.098, 0.68, 0.0), 'Scale': VBase3(1.89, 1.89, 1.89), 'Visual': {'Model': 'models/props/stove_potbelly'}}, '1167785607.47kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-4.102, 0.0, 0.0), 'Pos': Point3(15.064, 29.499, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0), 'Model': 'models/props/cabinet_shanty'}}, '1167785669.93kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(24.255, 29.866, 0.005), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.59, 0.59, 0.49, 1.0), 'Model': 'models/props/table_shanty'}}, '1167785733.82kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-73.849, 0.0, 0.0), 'Pos': Point3(24.887, 27.495, -0.038), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_shanty'}}, '1167785799.32kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': VBase3(-33.083, 0.0, 0.0), 'Pos': Point3(-16.959, 26.878, 12.07), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5899999737739563, 0.5899999737739563, 0.49000000953674316, 1.0), 'Model': 'models/props/crates_group_1'}}, '1167785908.85kmuller': {'Type': 'Tools', 'DisableCollision': False, 'Hpr': VBase3(0.748, 6.607, -6.475), 'Pos': Point3(20.373, -30.67, 0.22), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.800000011920929, 0.7900000214576721, 0.8299999833106995, 1.0), 'Model': 'models/props/broom'}}, '1167785969.36kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': VBase3(93.5, 0.0, 0.0), 'Pos': Point3(-2.559, -29.914, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.44999998807907104, 0.46000000834465027, 0.5099999904632568, 1.0), 'Model': 'models/props/crates_group_1'}}, '1167786054.93kmuller': {'Type': 'Furniture', 'DisableCollision': True, 'Hpr': VBase3(-141.425, 3.579, 4.475), 'Pos': Point3(4.487, -2.466, 0.302), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_shanty'}}, '1167786166.41kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Objects': {'1167786189.04kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Hpr': VBase3(-113.151, 0.0, 0.0), 'Pos': Point3(0.696, 0.25, 6.191), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Trunk_rounded_2'}}}, 'Pos': Point3(38.107, -28.85, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/crates_group_2'}}, '1167786176.07kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Objects': {'1167786198.72kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Hpr': VBase3(-91.163, 0.0, 0.0), 'Pos': Point3(0.905, -0.641, 2.74), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.85, 0.79, 1.0, 1.0), 'Model': 'models/props/Trunk_rounded'}}}, 'Pos': Point3(40.275, -25.249, -0.015), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.800000011920929, 0.7900000214576721, 0.8299999833106995, 1.0), 'Model': 'models/props/crate'}}, '1167786208.66kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Hpr': VBase3(-145.519, 0.0, 0.0), 'Pos': Point3(34.734, -29.301, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Trunk_rounded'}}, '1167786359.93kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-5.052, -8.153, 12.112), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/crates_group_2'}}, '1167786376.6kmuller': {'Type': 'Crate', 'DisableCollision': False, 'Hpr': VBase3(9.654, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(-5.948, 9.523, 12.07), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5899999737739563, 0.5899999737739563, 0.49000000953674316, 1.0), 'Model': 'models/props/crate_04'}}, '1167866790.57kmuller': {'Type': 'Furniture', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(90.126, 0.0, 0.0), 'Pos': Point3(14.134, -4.896, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.8999999761581421, 0.8999999761581421, 0.699999988079071, 1.0), 'Model': 'models/props/counter_shanty'}}, '1167866812.07kmuller': {'Type': 'Furniture', 'DisableCollision': True, 'Hpr': VBase3(89.428, 0.0, 0.0), 'Pos': Point3(14.133, 5.559, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.8999999761581421, 0.8999999761581421, 0.699999988079071, 1.0), 'Model': 'models/props/counter_shanty'}}, '1167866949.6kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(3.993, -25.609, 0.002), 'Scale': VBase3(0.77, 0.77, 0.77), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/crates_group_2'}}, '1167867134.67kmuller': {'Type': 'Prop_Groups', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-14.119, 22.598, 0.0), 'Scale': VBase3(0.918, 0.918, 0.918), 'Visual': {'Color': (0.37, 0.35, 0.35, 1.0), 'Model': 'models/props/prop_group_G'}}, '1167951025.38kmuller': {'Type': 'Interior_furnishings', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-0.484, -0.234, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/interior_wall_shanty'}}, '1167970781.36kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(13.146, 1.41, 16.96), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chandelier_jail'}}, '1167970845.8kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': VBase3(-90.787, 0.0, 0.0), 'Pos': Point3(-1.74, 5.115, 4.943), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/lamp_candle'}}, '1167970934.47kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': VBase3(173.499, 0.0, 0.0), 'Pos': Point3(19.116, -28.833, 10.855), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/lamp_candle'}}, '1174678243.43dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': True, 'Hpr': VBase3(52.322, -39.411, -26.119), 'Intensity': '0.9697', 'LightType': 'DIRECTIONAL', 'Pos': Point3(38.519, -4.522, 9.634), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1174678336.35dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': True, 'Hpr': VBase3(177.382, -43.236, -9.914), 'Intensity': '0.4545', 'LightType': 'DIRECTIONAL', 'Pos': Point3(28.883, 22.108, 9.972), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1185409859.14kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(50.85, 0.0, 0.0), 'Pos': Point3(-15.275, 25.187, 11.578), 'Scale': VBase3(2.101, 2.101, 2.101), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185409943.56kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(2.564, 1.041, -0.68), 'Scale': VBase3(1.0, 4.213, 2.062), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1185410005.71kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(14.164, 0.218, -0.335), 'Scale': VBase3(0.797, 4.272, 1.0), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1185410087.71kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-136.712, 0.0, 0.0), 'Pos': Point3(36.76, -25.861, -0.235), 'Scale': VBase3(1.671, 1.507, 1.507), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185410136.21kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-72.882, 0.0, 0.0), 'Pos': Point3(8.884, 30.327, 11.762), 'Scale': VBase3(0.377, 1.0, 1.0), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185410197.79kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-131.292, 0.0, 0.0), 'Pos': Point3(-8.46, -8.085, 11.538), 'Scale': VBase3(0.583, 1.0, 1.079), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185410250.46kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-15.111, 0.0, 0.0), 'Pos': Point3(-7.712, -21.201, 11.826), 'Scale': VBase3(0.416, 1.0, 1.162), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185410321.0kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(162.788, 0.0, 0.0), 'Pos': Point3(-13.021, -17.989, -0.142), 'Scale': VBase3(1.724, 1.442, 1.442), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185410366.92kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(89.735, 0.0, 0.0), 'Pos': Point3(-10.749, 29.389, -0.519), 'Scale': VBase3(0.417, 1.0, 1.543), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1257816816.97caoconno': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(90.0, 0.0, 0.0), 'Pos': Point3(41.986, -7.423, 0.079), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1257816894.69caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(86.69, 0.0, 0.0), 'Pos': Point3(0.939, -11.586, 8.859), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1257816960.16caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(31.115, 21.163, 8.859), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1257817043.88caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(86.69, 0.0, 0.0), 'Pos': Point3(0.99, 11.935, 8.859), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1257817076.3caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(9.327, 0.0, 0.0), 'Pos': Point3(1.849, -5.05, 8.825), 'Scale': VBase3(0.548, 0.548, 0.548), 'VisSize': '', 'Visual': {'Color': (0.9300000071525574, 0.75, 1.0, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}}, '1257817102.41caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-2.396, 0.0, 0.0), 'Pos': Point3(1.767, -5.172, 5.685), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 0.800000011920929, 0.800000011920929, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}}, '1257817129.64caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-42.609, 0.0, 0.0), 'Pos': Point3(1.46, -6.4, 8.825), 'Scale': VBase3(0.848, 0.848, 0.848), 'VisSize': '', 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}}, '1257817179.86caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(1.46, -8.095, 8.863), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}}, '1257817213.33caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-2.396, 0.0, 0.0), 'Pos': Point3(1.643, -8.147, 5.685), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 0.800000011920929, 0.800000011920929, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}}, '1257817216.0caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-2.396, 0.0, 0.0), 'Pos': Point3(1.703, -6.692, 5.685), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (1.0, 0.800000011920929, 0.800000011920929, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}}, '1257817253.33caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(14.61, -6.849, 3.585), 'Scale': VBase3(1.478, 1.478, 1.478), 'VisSize': '', 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift02_winter08'}}, '1257817324.8caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(174.295, 2.868, 0.0), 'Pos': Point3(19.01, -29.411, 9.2), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}}, 'Visual': {'Model': 'models/buildings/interior_shanty_store'}}}, 'Node Links': [], 'Layers': {}, 'ObjectIds': {'1156268617.43dzlu0z': '["Objects"]["1156268617.43dzlu0z"]', '1167780096.44kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167780096.44kmuller"]', '1167782443.0kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167782443.0kmuller"]', '1167782540.07kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167782540.07kmuller"]', '1167782751.35kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167782751.35kmuller"]', '1167784962.05kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167784962.05kmuller"]', '1167785011.94kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785011.94kmuller"]', '1167785099.21kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785099.21kmuller"]', '1167785202.19kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785202.19kmuller"]', '1167785325.35kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785325.35kmuller"]', '1167785354.14kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785354.14kmuller"]', '1167785512.3kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785512.3kmuller"]', '1167785607.47kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785607.47kmuller"]', '1167785669.93kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785669.93kmuller"]', '1167785733.82kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785733.82kmuller"]', '1167785799.32kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785799.32kmuller"]', '1167785908.85kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785908.85kmuller"]', '1167785969.36kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167785969.36kmuller"]', '1167786054.93kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786054.93kmuller"]', '1167786166.41kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786166.41kmuller"]', '1167786176.07kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786176.07kmuller"]', '1167786189.04kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786166.41kmuller"]["Objects"]["1167786189.04kmuller"]', '1167786198.72kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786176.07kmuller"]["Objects"]["1167786198.72kmuller"]', '1167786208.66kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786208.66kmuller"]', '1167786359.93kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786359.93kmuller"]', '1167786376.6kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167786376.6kmuller"]', '1167866790.57kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167866790.57kmuller"]', '1167866812.07kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167866812.07kmuller"]', '1167866949.6kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167866949.6kmuller"]', '1167867134.67kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167867134.67kmuller"]', '1167951025.38kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167951025.38kmuller"]', '1167970781.36kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167970781.36kmuller"]', '1167970845.8kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167970845.8kmuller"]', '1167970934.47kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1167970934.47kmuller"]', '1174678243.43dzlu': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1174678243.43dzlu"]', '1174678336.35dzlu': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1174678336.35dzlu"]', '1185409859.14kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185409859.14kmuller"]', '1185409943.56kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185409943.56kmuller"]', '1185410005.71kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185410005.71kmuller"]', '1185410087.71kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185410087.71kmuller"]', '1185410136.21kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185410136.21kmuller"]', '1185410197.79kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185410197.79kmuller"]', '1185410250.46kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185410250.46kmuller"]', '1185410321.0kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185410321.0kmuller"]', '1185410366.92kmuller': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1185410366.92kmuller"]', '1257816816.97caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257816816.97caoconno"]', '1257816894.69caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257816894.69caoconno"]', '1257816960.16caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257816960.16caoconno"]', '1257817043.88caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817043.88caoconno"]', '1257817076.3caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817076.3caoconno"]', '1257817102.41caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817102.41caoconno"]', '1257817129.64caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817129.64caoconno"]', '1257817179.86caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817179.86caoconno"]', '1257817213.33caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817213.33caoconno"]', '1257817216.0caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817216.0caoconno"]', '1257817253.33caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817253.33caoconno"]', '1257817324.8caoconno': '["Objects"]["1156268617.43dzlu0z"]["Objects"]["1257817324.8caoconno"]'}}
extraInfo = {'camPos': Point3(0, -14, 0), 'camHpr': VBase3(0, 0, 0), 'focalLength': 0.7921423316, 'skyState': -2, 'fog': 0} | 3,081.428571 | 21,142 | 0.667269 |
3b955f5026a253155508bbf078358f9426a60a9a | 650 | py | Python | common/src/stack/command/stack/commands/list/host/storage/controller/__init__.py | anooprajendra/stacki | 5e3f51c928ff5367a7441f07bf28f0121e7abdff | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | common/src/stack/command/stack/commands/list/host/storage/controller/__init__.py | anooprajendra/stacki | 5e3f51c928ff5367a7441f07bf28f0121e7abdff | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | common/src/stack/command/stack/commands/list/host/storage/controller/__init__.py | anooprajendra/stacki | 5e3f51c928ff5367a7441f07bf28f0121e7abdff | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
class Command(stack.commands.list.host.command):
"""
List the storage controller configuration for a given host(s).
<arg optional='1' type='string' name='host' repeat='1'>
Zero, one or more host names. If no host names are supplied,
the routes for all the hosts are listed.
</arg>
"""
def run(self, params, args):
self.addText(self.command('list.storage.controller', self._argv + ['scope=host'], verbose_errors = False))
return self.rc
| 28.26087 | 108 | 0.72 |
3c426ef1e6eeaa67db33e31fff1a014d81f3b0ff | 434 | py | Python | test/5__pika__rec.py | qq453388937/d | 95c282b8cdcb603724c7e5907fc268cfadb51fce | [
"MIT"
] | null | null | null | test/5__pika__rec.py | qq453388937/d | 95c282b8cdcb603724c7e5907fc268cfadb51fce | [
"MIT"
] | 1 | 2020-12-10T06:28:54.000Z | 2020-12-10T06:28:54.000Z | test/5__pika__rec.py | qq453388937/d | 95c282b8cdcb603724c7e5907fc268cfadb51fce | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='helloQueue')
def callback(ch, method, properties, body):
print(" [>>>]Receive %r" % body)
channel.basic_consume(callback, queue='helloQueue', no_ack=True)
print(' [*] Waitting for message. To exit press CTRL+C')
channel.start_consuming() # 获取队列中的信息 | 28.933333 | 81 | 0.737327 |
fcff7d83b5e8e4e771bdae36e9eedc1cba991e2a | 30,748 | py | Python | python/ray/cloudpickle/cloudpickle_fast.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 3 | 2020-12-03T17:48:45.000Z | 2022-01-22T08:09:46.000Z | python/ray/cloudpickle/cloudpickle_fast.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 6 | 2022-03-18T14:06:24.000Z | 2022-03-26T07:13:16.000Z | python/ray/cloudpickle/cloudpickle_fast.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 1 | 2020-12-12T13:59:22.000Z | 2020-12-12T13:59:22.000Z | """
New, fast version of the CloudPickler.
This new CloudPickler class can now extend the fast C Pickler instead of the
previous Python implementation of the Pickler class. Because this functionality
is only available for Python versions 3.8+, a lot of backward-compatibility
code is also removed.
Note that the C Pickler sublassing API is CPython-specific. Therefore, some
guards present in cloudpickle.py that were written to handle PyPy specificities
are not present in cloudpickle_fast.py
"""
import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _is_importable,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items,
)
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None, buffer_callback=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
def dumps(obj, protocol=None, buffer_callback=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
)
cp.dump(obj)
return file.getvalue()
else:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj)
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
load, loads = pickle.load, pickle.loads
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
# -------------------------------------------------
def _class_getnewargs(obj):
type_kwargs = {}
if "__slots__" in obj.__dict__:
type_kwargs["__slots__"] = obj.__slots__
__dict__ = obj.__dict__.get('__dict__', None)
if isinstance(__dict__, property):
type_kwargs['__dict__'] = __dict__
return (type(obj), obj.__name__, _get_bases(obj), type_kwargs,
_get_or_create_tracker_id(obj), None)
def _enum_getnewargs(obj):
members = dict((e.name, e.value) for e in obj)
return (obj.__bases__, obj.__name__, obj.__qualname__, members,
obj.__module__, _get_or_create_tracker_id(obj), None)
# COLLECTION OF OBJECTS RECONSTRUCTORS
# ------------------------------------
def _file_reconstructor(retval):
return retval
# COLLECTION OF OBJECTS STATE GETTERS
# -----------------------------------
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
"__name__": func.__name__,
"__qualname__": func.__qualname__,
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in
func.__globals__}
closure_values = (
list(map(_get_cell_contents, func.__closure__))
if func.__closure__ is not None else ()
)
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values))
slotstate["__globals__"] = f_globals
state = func.__dict__
return state, slotstate
def _class_getstate(obj):
clsdict = _extract_class_dict(obj)
clsdict.pop('__weakref__', None)
if issubclass(type(obj), abc.ABCMeta):
# If obj is an instance of an ABCMeta subclass, dont pickle the
# cache/negative caches populated during isinstance/issubclass
# checks, but pickle the list of registered subclasses of obj.
clsdict.pop('_abc_cache', None)
clsdict.pop('_abc_negative_cache', None)
clsdict.pop('_abc_negative_cache_version', None)
registry = clsdict.pop('_abc_registry', None)
if registry is None:
# in Python3.7+, the abc caches and registered subclasses of a
# class are bundled into the single _abc_impl attribute
clsdict.pop('_abc_impl', None)
(registry, _, _, _) = abc._get_dump(obj)
clsdict["_abc_impl"] = [subclass_weakref()
for subclass_weakref in registry]
else:
# In the above if clause, registry is a set of weakrefs -- in
# this case, registry is a WeakSet
clsdict["_abc_impl"] = [type_ for type_ in registry]
if "__slots__" in clsdict:
# pickle string length optimization: member descriptors of obj are
# created automatically from obj's __slots__ attribute, no need to
# save them in obj's state
if isinstance(obj.__slots__, str):
clsdict.pop(obj.__slots__)
else:
for k in obj.__slots__:
clsdict.pop(k, None)
clsdict.pop('__dict__', None) # unpicklable property object
return (clsdict, {})
def _enum_getstate(obj):
clsdict, slotstate = _class_getstate(obj)
members = dict((e.name, e.value) for e in obj)
# Cleanup the clsdict that will be passed to _rehydrate_skeleton_class:
# Those attributes are already handled by the metaclass.
for attrname in ["_generate_next_value_", "_member_names_",
"_member_map_", "_member_type_",
"_value2member_map_"]:
clsdict.pop(attrname, None)
for member in members:
clsdict.pop(member)
# Special handling of Enum subclasses
return clsdict, slotstate
# COLLECTIONS OF OBJECTS REDUCERS
# -------------------------------
# A reducer is a function taking a single argument (obj), and that returns a
# tuple with all the necessary data to re-construct obj. Apart from a few
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
# correctly pickle an object.
# While many built-in objects (Exceptions objects, instances of the "object"
# class, etc), are shipped with their own built-in reducer (invoked using
# obj.__reduce__), some do not. The following methods were created to "fill
# these holes".
def _code_reduce(obj):
"""codeobject reducer"""
if hasattr(obj, "co_posonlyargcount"): # pragma: no branch
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
obj.co_names, obj.co_varnames, obj.co_filename,
obj.co_name, obj.co_firstlineno, obj.co_lnotab,
obj.co_freevars, obj.co_cellvars
)
return types.CodeType, args
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer"""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents, )
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,)
def _file_reduce(obj):
"""Save a file"""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError(
"Cannot pickle files that map to tty objects"
)
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s"
% obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),)
def _module_reduce(obj):
if _is_importable(obj):
return subimport, (obj.__name__,)
else:
obj.__dict__.pop('__builtins__', None)
return dynamic_subimport, (obj.__name__, vars(obj))
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__))
def _logger_reduce(obj):
return logging.getLogger, (obj.name,)
def _root_logger_reduce(obj):
return logging.getLogger, ()
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),)
def _dynamic_class_reduce(obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum, _enum_getnewargs(obj), _enum_getstate(obj),
None, None, _class_setstate
)
else:
return (
_make_skeleton_class, _class_getnewargs(obj), _class_getstate(obj),
None, None, _class_setstate
)
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj"""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _is_importable(obj):
return _dynamic_class_reduce(obj)
return NotImplemented
def _dict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), )
def _dict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), )
def _dict_items_reduce(obj):
return _make_dict_items, (dict(obj), )
# COLLECTIONS OF OBJECTS STATE SETTERS
# ------------------------------------
# state setters are called at unpickling time, once the object is created and
# it has to be updated to how it was at unpickling time.
def _function_setstate(obj, state):
"""Update the state of a dynaamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
cell_set(obj.__closure__[i], value)
for k, v in slotstate.items():
setattr(obj, k, v)
def _class_setstate(obj, state):
state, slotstate = state
registry = None
for attrname, attr in state.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(obj, attrname, attr)
if registry is not None:
for subclass in registry:
obj.register(subclass)
return obj
class CloudPickler(Pickler):
# set of reducers defined and used by cloudpickle (private)
_dispatch_table = {}
_dispatch_table[classmethod] = _classmethod_reduce
_dispatch_table[io.TextIOWrapper] = _file_reduce
_dispatch_table[logging.Logger] = _logger_reduce
_dispatch_table[logging.RootLogger] = _root_logger_reduce
_dispatch_table[memoryview] = _memoryview_reduce
_dispatch_table[property] = _property_reduce
_dispatch_table[staticmethod] = _classmethod_reduce
_dispatch_table[CellType] = _cell_reduce
_dispatch_table[types.CodeType] = _code_reduce
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
_dispatch_table[types.ModuleType] = _module_reduce
_dispatch_table[types.MethodType] = _method_reduce
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
_dispatch_table[weakref.WeakSet] = _weakset_reduce
_dispatch_table[typing.TypeVar] = _typevar_reduce
_dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
_dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
_dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
# function reducers are defined as instance methods of CloudPickler
# objects, as they rely on a CloudPickler attribute (globals_ref)
def _dynamic_function_reduce(self, func):
"""Reduce a function that is not pickleable via attribute lookup."""
newargs = self._function_getnewargs(func)
state = _function_getstate(func)
return (types.FunctionType, newargs, state, None, None,
_function_setstate)
def _function_reduce(self, obj):
"""Reducer for function objects.
If obj is a top-level attribute of a file-backed module, this
reducer returns NotImplemented, making the CloudPickler fallback to
traditional _pickle.Pickler routines to save obj. Otherwise, it reduces
obj using a custom cloudpickle reducer designed specifically to handle
dynamic functions.
As opposed to cloudpickle.py, There no special handling for builtin
pypy functions because cloudpickle_fast is CPython-specific.
"""
if _is_importable(obj):
return NotImplemented
else:
return self._dynamic_function_reduce(obj)
def _function_getnewargs(self, func):
code = func.__code__
# base_globals represents the future global namespace of func at
# unpickling time. Looking it up and storing it in
# CloudpiPickler.globals_ref allow functions sharing the same globals
# at pickling time to also share them once unpickled, at one condition:
# since globals_ref is an attribute of a CloudPickler instance, and
# that a new CloudPickler is created each time pickle.dump or
# pickle.dumps is called, functions also need to be saved within the
# same invocation of cloudpickle.dump/cloudpickle.dumps (for example:
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
# CloudPickler.dump, as long as the multiple invocations are bound to
# the same CloudPickler.
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
if base_globals == {}:
# Add module attributes used to resolve relative imports
# instructions inside func.
for k in ["__package__", "__name__", "__path__", "__file__"]:
if k in func.__globals__:
base_globals[k] = func.__globals__[k]
# Do not bind the free variables before the function is created to
# avoid infinite recursion.
if func.__closure__ is None:
closure = None
else:
closure = tuple(
_make_empty_cell() for _ in range(len(code.co_freevars)))
return code, base_globals, None, None, closure
def dump(self, obj):
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if "recursion" in e.args[0]:
msg = (
"Could not pickle object as excessively deep recursion "
"required."
)
raise pickle.PicklingError(msg) from e
else:
raise
if pickle.HIGHEST_PROTOCOL >= 5:
# `CloudPickler.dispatch` is only left for backward compatibility - note
# that when using protocol 5, `CloudPickler.dispatch` is not an
# extension of `Pickler.dispatch` dictionary, because CloudPickler
# subclasses the C-implemented Pickler, which does not expose a
# `dispatch` attribute. Earlier versions of the protocol 5 CloudPickler
# used `CloudPickler.dispatch` as a class-level attribute storing all
# reducers implemented by cloudpickle, but the attribute name was not a
# great choice given the meaning of `Cloudpickler.dispatch` when
# `CloudPickler` extends the pure-python pickler.
dispatch = dispatch_table
# Implementation of the reducer_override callback, in order to
# efficiently serialize dynamic functions and classes by subclassing
# the C-implemented Pickler.
# TODO: decorrelate reducer_override (which is tied to CPython's
# implementation - would it make sense to backport it to pypy? - and
# pickle's protocol 5 which is implementation agnostic. Currently, the
# availability of both notions coincide on CPython's pickle and the
# pickle5 backport, but it may not be the case anymore when pypy
# implements protocol 5
def __init__(self, file, protocol=None, buffer_callback=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(
self, file, protocol=protocol, buffer_callback=buffer_callback
)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
self.proto = int(protocol)
def reducer_override(self, obj):
"""Type-agnostic reducing callback for function and classes.
For performance reasons, subclasses of the C _pickle.Pickler class
cannot register custom reducers for functions and classes in the
dispatch_table. Reducer for such types must instead implemented in
the special reducer_override method.
Note that method will be called for any object except a few
builtin-types (int, lists, dicts etc.), which differs from reducers
in the Pickler's dispatch_table, each of them being invoked for
objects of a specific type only.
This property comes in handy for classes: although most classes are
instances of the ``type`` metaclass, some of them can be instances
of other custom metaclasses (such as enum.EnumMeta for example). In
particular, the metaclass will likely not be known in advance, and
thus cannot be special-cased using an entry in the dispatch_table.
reducer_override, among other things, allows us to register a
reducer that will be called for any class, independently of its
type.
Notes:
* reducer_override has the priority over dispatch_table-registered
reducers.
* reducer_override can be used to fix other limitations of
cloudpickle for other types that suffered from type-specific
reducers, such as Exceptions. See
https://github.com/cloudpipe/cloudpickle/issues/248
"""
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
try:
return (
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj)
)
except Exception:
# TODO(suquark): Cloudpickle would misclassify pydantic classes into type hints.
# We temporarily ignores the exceptions here.
pass
t = type(obj)
try:
is_anyclass = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
is_anyclass = False
if is_anyclass:
return _class_reduce(obj)
elif isinstance(obj, types.FunctionType):
return self._function_reduce(obj)
else:
# fallback to save_global, including the Pickler's
# distpatch_table
return NotImplemented
else:
# When reducer_override is not available, hack the pure-Python
# Pickler's types.FunctionType and type savers. Note: the type saver
# must override Pickler.save_global, because pickle.py contains a
# hard-coded call to save_global when pickling meta-classes.
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(self, file, protocol=protocol)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
assert hasattr(self, 'proto')
def _save_reduce_pickle5(self, func, args, state=None, listitems=None,
dictitems=None, state_setter=None, obj=None):
save = self.save
write = self.write
self.save_reduce(
func, args, state=None, listitems=listitems,
dictitems=dictitems, obj=obj
)
# backport of the Python 3.8 state_setter pickle operations
save(state_setter)
save(obj) # simple BINGET opcode as obj is already memoized.
save(state)
write(pickle.TUPLE2)
# Trigger a state_setter(obj, state) function call.
write(pickle.REDUCE)
# The purpose of state_setter is to carry-out an
# inplace modification of obj. We do not care about what the
# method might return, so its output is eventually removed from
# the stack.
write(pickle.POP)
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None): # noqa
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
# Parametrized typing constructs in Python < 3.7 are not
# compatible with type checks and ``isinstance`` semantics. For
# this reason, it is easier to detect them using a
# duck-typing-based check (``_is_parametrized_type_hint``) than
# to populate the Pickler's dispatch with type-specific savers.
self.save_reduce(
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj),
obj=obj
)
elif name is not None:
Pickler.save_global(self, obj, name=name)
elif not _is_importable(obj, name=name):
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
else:
Pickler.save_global(self, obj, name=name)
dispatch[type] = save_global
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
if _is_importable(obj, name=name):
return Pickler.save_global(self, obj, name=name)
elif PYPY and isinstance(obj.__code__, builtin_code_type):
return self.save_pypy_builtin_func(obj)
else:
return self._save_reduce_pickle5(
*self._dynamic_function_reduce(obj), obj=obj
)
def save_pypy_builtin_func(self, obj):
"""Save pypy equivalent of builtin functions.
PyPy does not have the concept of builtin-functions. Instead,
builtin-functions are simple function instances, but with a
builtin-code attribute.
Most of the time, builtin functions should be pickled by attribute.
But PyPy has flaky support for __qualname__, so some builtin
functions such as float.__new__ will be classified as dynamic. For
this reason only, we created this special routine. Because
builtin-functions are not expected to have closure or globals,
there is no additional hack (compared the one already implemented
in pickle) to protect ourselves from reference cycles. A simple
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
also that PyPy improved their support for __qualname__ in v3.6, so
this routing should be removed when cloudpickle supports only PyPy
3.6 and later.
"""
rv = (types.FunctionType, (obj.__code__, {}, obj.__name__,
obj.__defaults__, obj.__closure__),
obj.__dict__)
self.save_reduce(*rv, obj=obj)
dispatch[types.FunctionType] = save_function
| 39.623711 | 110 | 0.654254 |
5a3e0f6ada91e6fa630047a427a61d6cfa78d4ab | 5,423 | py | Python | fhirpy/base/utils.py | motey/fhir-py | b1bf0784f6ede2b12b8c34f7b863e498fd27bf34 | [
"MIT"
] | null | null | null | fhirpy/base/utils.py | motey/fhir-py | b1bf0784f6ede2b12b8c34f7b863e498fd27bf34 | [
"MIT"
] | null | null | null | fhirpy/base/utils.py | motey/fhir-py | b1bf0784f6ede2b12b8c34f7b863e498fd27bf34 | [
"MIT"
] | 1 | 2020-03-19T10:48:59.000Z | 2020-03-19T10:48:59.000Z | import reprlib
from urllib.parse import urlencode, quote, parse_qs, urlparse
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def get_by_path(self, path, default=None):
keys = parse_path(path)
return get_by_path(self, keys, default)
class SearchList(list):
def get_by_path(self, path, default=None):
keys = parse_path(path)
return get_by_path(self, keys, default)
def chunks(l, n):
"""
Yield successive n-sized chunks from l
>>> list(chunks([1, 2, 3, 4], 2))
[[1, 2], [3, 4]]
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def unique_everseen(seq):
"""
>>> unique_everseen(['1', '2', '3', '1', '2'])
['1', '2', '3']
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def encode_params(params):
"""
>>> encode_params({'status:not': ['active', 'entered-in-error']})
'status:not=active&status:not=entered-in-error'
>>> encode_params({'status': ['active,waitlist']})
'status=active,waitlist'
>>> encode_params({'status': 'active,waitlist'})
'status=active,waitlist'
>>> encode_params({'_format': ['json', 'json']})
'_format=json'
>>> encode_params(None)
''
"""
params = params or {}
return urlencode(
{
k: unique_everseen(v) if isinstance(v, list) else [v]
for k, v in params.items()
},
doseq=True,
safe=':,',
quote_via=quote
)
def parse_pagination_url(url):
"""
Parses Bundle.link pagination url and returns path and params
>>> parse_pagination_url('/Patient?_count=100&name=ivan&name=petrov')
('/Patient', {'_count': ['100'], 'name': ['ivan', 'petrov']})
"""
parsed = urlparse(url)
params = parse_qs(parsed.query)
path = parsed.path
return path, params
def convert_values(data, fn):
"""
Recursively converts data values with `fn`
which must return tuple of (converted data, stop flag).
Conversion will be stopped for this branch if stop flag is True
>>> convert_values({}, lambda x: (x, False))
{}
>>> convert_values([], lambda x: (x, False))
[]
>>> convert_values('str', lambda x: (x, False))
'str'
>>> convert_values(
... [{'key1': [1, 2]}, {'key2': [3, 4]}],
... lambda x: (x + 1, False) if isinstance(x, int) else (x, False))
[{'key1': [2, 3]}, {'key2': [4, 5]}]
>>> convert_values(
... [{'replaceable': True}, {'replaceable': False}],
... lambda x: ('replaced', False)
... if isinstance(x, dict) and x.get('replaceable', False)
... else (x, False))
['replaced', {'replaceable': False}]
"""
data, stop = fn(data)
if stop:
return data
if isinstance(data, list):
return SearchList(convert_values(x, fn) for x in data)
if isinstance(data, dict):
return AttrDict(
{key: convert_values(value, fn)
for key, value in data.items()}
)
return data
def parse_path(path):
"""
>>> parse_path(['path', 'to', 0, 'element'])
['path', 'to', 0, 'element']
>>> parse_path('path.to.0.element')
['path', 'to', 0, 'element']
"""
if isinstance(path, str):
return [int(key) if key.isdigit() else key for key in path.split('.')]
elif isinstance(path, list):
return path
else: # pragma: no cover
raise TypeError('Path must be or a dotted string or a list')
def get_by_path(data, path, default=None):
"""
>>> get_by_path({'key': 'value'}, ['key'])
'value'
>>> get_by_path({'key': [{'nkey': 'nvalue'}]}, ['key', 0, 'nkey'])
'nvalue'
>>> get_by_path({
... 'key': [
... {'test': 'test0', 'nkey': 'zero'},
... {'test': 'test1', 'nkey': 'one'}
... ]
... }, ['key', {'test': 'test1'}, 'nkey'])
'one'
>>> get_by_path({'a': 1}, ['b'], 0)
0
>>> get_by_path({'a': {'b': None}}, ['a', 'b'], 0) is None
True
>>> get_by_path({'a': {'b': None}}, ['a', 'b', 'c'], 0)
0
"""
assert isinstance(path, list), 'Path must be a list'
rv = data
try:
for key in path:
if rv is None:
return default
if isinstance(rv, list):
if isinstance(key, int):
rv = rv[key]
elif isinstance(key, dict):
matched_index = -1
for index, item in enumerate(rv):
if all(
[item.get(k, None) == v for k, v in key.items()]
):
matched_index = index
break
if matched_index == -1:
rv = None
else:
rv = rv[matched_index]
else: # pragma: no cover
raise TypeError(
'Can not lookup by {0} in list. '
'Possible lookups are by int or by dict.'.format(
reprlib.repr(key)
)
)
else:
rv = rv[key]
return rv
except (IndexError, KeyError):
return default
| 26.453659 | 78 | 0.501014 |
b8547e44652af13db717e5e07a779937ba2e7188 | 5,520 | py | Python | pyannote/audio/cli/train.py | matakanobu/pyannote-audio | 7699a730f83240d0e8e04d0a626eedd9e3f623fe | [
"MIT"
] | null | null | null | pyannote/audio/cli/train.py | matakanobu/pyannote-audio | 7699a730f83240d0e8e04d0a626eedd9e3f623fe | [
"MIT"
] | null | null | null | pyannote/audio/cli/train.py | matakanobu/pyannote-audio | 7699a730f83240d0e8e04d0a626eedd9e3f623fe | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020-2022 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from types import MethodType
from typing import Optional
import hydra
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.callbacks import (
EarlyStopping,
LearningRateMonitor,
ModelCheckpoint,
RichProgressBar,
)
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.seed import seed_everything
from torch_audiomentations.utils.config import from_dict as get_augmentation
# from pyannote.audio.core.callback import GraduallyUnfreeze
from pyannote.database import FileFinder, get_protocol
@hydra.main(config_path="train_config", config_name="config")
def train(cfg: DictConfig) -> Optional[float]:
# make sure to set the random seed before the instantiation of Trainer
# so that each model initializes with the same weights when using DDP.
seed = int(os.environ.get("PL_GLOBAL_SEED", "0"))
seed_everything(seed=seed)
# instantiate training protocol with optional preprocessors
preprocessors = {"audio": FileFinder()}
if "preprocessor" in cfg:
preprocessor = instantiate(cfg.preprocessor)
preprocessors[preprocessor.preprocessed_key] = preprocessor
protocol = get_protocol(cfg.protocol, preprocessors=preprocessors)
# instantiate data augmentation
augmentation = (
get_augmentation(OmegaConf.to_container(cfg.augmentation))
if "augmentation" in cfg
else None
)
# instantiate task and validation metric
task = instantiate(cfg.task, protocol, augmentation=augmentation)
# validation metric to monitor (and its direction: min or max)
monitor, direction = task.val_monitor
# instantiate model
fine_tuning = cfg.model["_target_"] == "pyannote.audio.cli.pretrained"
model = instantiate(cfg.model)
model.task = task
model.setup(stage="fit")
# number of batches in one epoch
num_batches_per_epoch = model.task.train__len__() // model.task.batch_size
# configure optimizer and scheduler
def configure_optimizers(self):
optimizer = instantiate(cfg.optimizer, self.parameters())
lr_scheduler = instantiate(
cfg.scheduler,
optimizer,
monitor=monitor,
direction=direction,
num_batches_per_epoch=num_batches_per_epoch,
)
return {"optimizer": optimizer, "lr_scheduler": lr_scheduler}
model.configure_optimizers = MethodType(configure_optimizers, model)
callbacks = [RichProgressBar(), LearningRateMonitor()]
if fine_tuning:
# TODO: configure layer freezing
# TODO: for fine-tuning and/or transfer learning, we start by fitting
# TODO: task-dependent layers and gradully unfreeze more layers
# TODO: callbacks.append(GraduallyUnfreeze(epochs_per_stage=1))
pass
checkpoint = ModelCheckpoint(
monitor=monitor,
mode=direction,
save_top_k=None if monitor is None else 5,
every_n_epochs=1,
save_last=True,
save_weights_only=False,
dirpath=".",
filename="{epoch}" if monitor is None else f"{{epoch}}-{{{monitor}:.6f}}",
verbose=False,
)
callbacks.append(checkpoint)
if monitor is not None:
early_stopping = EarlyStopping(
monitor=monitor,
mode=direction,
min_delta=0.0,
patience=cfg.scheduler.patience * 2,
strict=True,
verbose=False,
)
callbacks.append(early_stopping)
# instantiate logger
logger = TensorBoardLogger(".", name="", version="", log_graph=False)
# instantiate trainer
trainer = instantiate(cfg.trainer, callbacks=callbacks, logger=logger)
# in case of fine-tuning, validate the initial model to make sure
# that we actually improve over the initial performance
if fine_tuning:
model.setup(stage="fit")
trainer.validate(model)
# train the model
trainer.fit(model)
# save paths to best models
checkpoint.to_yaml()
# return the best validation score
# this can be used for hyper-parameter optimization with Hydra sweepers
if monitor is not None:
best_monitor = float(checkpoint.best_model_score)
if direction == "min":
return best_monitor
else:
return -best_monitor
if __name__ == "__main__":
train()
| 34.716981 | 82 | 0.70779 |
5eee77e8e3617f89ea8f5b127fd72b7a39522cae | 403 | py | Python | board_project/asgi.py | OkuboAtsushi/board-project | c73beab6ad9525f1fe31d8e9b987476e4b45fd18 | [
"MIT"
] | null | null | null | board_project/asgi.py | OkuboAtsushi/board-project | c73beab6ad9525f1fe31d8e9b987476e4b45fd18 | [
"MIT"
] | null | null | null | board_project/asgi.py | OkuboAtsushi/board-project | c73beab6ad9525f1fe31d8e9b987476e4b45fd18 | [
"MIT"
] | null | null | null | """
ASGI config for board_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'board_project.settings')
application = get_asgi_application()
| 23.705882 | 78 | 0.791563 |
40e057650fc537e4efa8e47c37f7aab9c37aec5c | 1,853 | py | Python | src/probflow/callbacks/learning_rate_scheduler.py | chiragnagpal/probflow | 1ba0619cd4f482a015cd25633d2f113d5d0f3476 | [
"MIT"
] | 134 | 2019-02-18T09:45:35.000Z | 2022-03-26T22:17:34.000Z | src/probflow/callbacks/learning_rate_scheduler.py | chiragnagpal/probflow | 1ba0619cd4f482a015cd25633d2f113d5d0f3476 | [
"MIT"
] | 44 | 2019-04-18T17:41:33.000Z | 2021-09-14T00:40:55.000Z | src/probflow/callbacks/learning_rate_scheduler.py | chiragnagpal/probflow | 1ba0619cd4f482a015cd25633d2f113d5d0f3476 | [
"MIT"
] | 18 | 2019-10-17T05:45:16.000Z | 2022-03-03T11:58:02.000Z | import matplotlib.pyplot as plt
from .callback import Callback
class LearningRateScheduler(Callback):
"""Set the learning rate as a function of the current epoch
Parameters
----------
fn : callable
Function which takes the current epoch as an argument and returns a
learning rate.
verbose : bool
Whether to print the learning rate each epoch (if True) or not (if
False). Default = False
Examples
--------
See the user guide section on :ref:`user-guide-lr-scheduler`.
training`.
"""
def __init__(self, fn, verbose: bool = False):
# Check type
if not callable(fn):
raise TypeError("fn must be a callable")
if not isinstance(fn(1), float):
raise TypeError("fn must return a float given an epoch number")
# Store function
self.fn = fn
self.verbose = verbose
self.current_epoch = 0
self.current_lr = 0
self.epochs = []
self.learning_rate = []
def on_epoch_start(self):
"""Set the learning rate at the start of each epoch."""
self.current_epoch += 1
self.current_lr = self.fn(self.current_epoch)
self.model.set_learning_rate(self.current_lr)
self.epochs += [self.current_epoch]
self.learning_rate += [self.current_lr]
if self.verbose:
print(
f"Epoch {self.current_epoch} - learning rate {self.current_lr}"
)
def plot(self, **kwargs):
"""Plot the learning rate as a function of epoch
Parameters
----------
**kwargs
Additional keyword arguments are passed to matplotlib.pyplot.plot
"""
plt.plot(self.epochs, self.learning_rate, **kwargs)
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
| 28.075758 | 79 | 0.59633 |
172e2d03b7869e0a51dcd9f034b9f056230716de | 21,741 | py | Python | tests/test_services/test_set_2457.py | yangyimincn/ucloud-sdk-python3 | 9732d67f32ec5f46467458ba655c44c193a6bbff | [
"Apache-2.0"
] | 37 | 2019-06-19T09:41:34.000Z | 2022-02-18T08:06:00.000Z | tests/test_services/test_set_2457.py | yangyimincn/ucloud-sdk-python3 | 9732d67f32ec5f46467458ba655c44c193a6bbff | [
"Apache-2.0"
] | 90 | 2019-08-09T09:27:33.000Z | 2022-03-30T15:54:55.000Z | tests/test_services/test_set_2457.py | yangyimincn/ucloud-sdk-python3 | 9732d67f32ec5f46467458ba655c44c193a6bbff | [
"Apache-2.0"
] | 19 | 2019-06-13T02:46:01.000Z | 2021-11-01T07:22:18.000Z | """ Code is generated by ucloud-model, DO NOT EDIT IT. """
import pytest
import logging
from ucloud.core import exc
from ucloud.testing import env, funcs, op, utest
logger = logging.getLogger(__name__)
scenario = utest.Scenario(2457)
@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())
def test_set_2457(client: utest.Client, variables: dict):
scenario.initial(variables)
scenario.run(client)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateVPC",
)
def create_vpc_00(client: utest.Client, variables: dict):
d = {
"Region": variables.get("Region"),
"Network": ["192.168.0.0/16"],
"Name": "ulb-ssl-vpc",
}
try:
resp = client.vpc().create_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
variables["vpc_id"] = utest.value_at_path(resp, "VPCId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateSubnet",
)
def create_subnet_01(client: utest.Client, variables: dict):
d = {
"VPCId": variables.get("vpc_id"),
"SubnetName": "ulb-ssl-subnet",
"Subnet": "192.168.111.0",
"Region": variables.get("Region"),
}
try:
resp = client.vpc().create_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
variables["subnet_id"] = utest.value_at_path(resp, "SubnetId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateULB",
)
def create_ulb_02(client: utest.Client, variables: dict):
d = {
"VPCId": variables.get("vpc_id"),
"ULBName": "ulb-ssl-test",
"Tag": "Default",
"SubnetId": variables.get("subnet_id"),
"Region": variables.get("Region"),
"InnerMode": "No",
"ChargeType": "Dynamic",
}
try:
resp = client.ulb().create_ulb(d)
except exc.RetCodeException as e:
resp = e.json()
variables["ULBId"] = utest.value_at_path(resp, "ULBId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=30,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateVServer",
)
def create_vserver_03(client: utest.Client, variables: dict):
d = {
"VServerName": "vserver-test",
"ULBId": variables.get("ULBId"),
"Region": variables.get("Region"),
"Protocol": "HTTPS",
"PersistenceType": "UserDefined",
"PersistenceInfo": "huangchao",
"Method": "Roundrobin",
"ListenType": "RequestProxy",
"FrontendPort": 443,
"ClientTimeout": 60,
}
try:
resp = client.ulb().create_vserver(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VServerId"] = utest.value_at_path(resp, "VServerId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateSSL",
)
def create_ssl_04(client: utest.Client, variables: dict):
d = {
"UserCert": "-----BEGIN CERTIFICATE-----\nMIIFzTCCBLWgAwIBAgIQQ8IswmAhEIKfNhrKqb0F3DANBgkqhkiG9w0BAQsFADCB\nlzELMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs\nIEluYy4xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxHTAbBgNVBAsT\nFERvbWFpbiBWYWxpZGF0ZWQgU1NMMSEwHwYDVQQDExhUcnVzdEFzaWEgRFYgU1NM\nIENBIC0gRzUwHhcNMTYxMjA2MDAwMDAwWhcNMTcxMjA2MjM1OTU5WjAgMR4wHAYD\nVQQDDBVtLmVjb2xvZ3ktZW1vYmlsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB\nDwAwggEKAoIBAQDxBsuwGdCZdEUs40SQcvUt+9hlmLTgcfkq/h9f1QVPxLq/PC+O\nsG76hOgy6N8f7k7x5XgtPKi9O4ydFl8ViYhEXRjYQcUrTm3lu7s9UT2AIUmK0dI+\nPZgFU5gDwh8fQLoL24T2lPfkD9TngCnDanfo3xbx/e9hsJkf7hKWix8zrxtYYCUT\nt96pTpQeWjr7ggl2bDEfTayJNM+i5xoGBPiQFdxPnKWCjNmXi2dws0d2whi1euRW\ngI5wIXji5WKfUf6EvzG0Uzz6i8vsSLGv8pL7C0AuUI4MrPNDesFeA2LEYclQkpHE\nE49BkpQvCokCW9d8/r5ASUry+7SrJIncU6FxAgMBAAGjggKJMIIChTAgBgNVHREE\nGTAXghVtLmVjb2xvZ3ktZW1vYmlsZS5jb20wCQYDVR0TBAIwADBhBgNVHSAEWjBY\nMFYGBmeBDAECATBMMCMGCCsGAQUFBwIBFhdodHRwczovL2Quc3ltY2IuY29tL2Nw\nczAlBggrBgEFBQcCAjAZDBdodHRwczovL2Quc3ltY2IuY29tL3JwYTAfBgNVHSME\nGDAWgBRtWMd/GufhPy6mjJc1Qrv00zisPzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0l\nBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMIGbBggrBgEFBQcBAQSBjjCBizA8Bggr\nBgEFBQcwAYYwaHR0cDovL3RydXN0YXNpYTItb2NzcC5kaWdpdGFsY2VydHZhbGlk\nYXRpb24uY29tMEsGCCsGAQUFBzAChj9odHRwOi8vdHJ1c3Rhc2lhMi1haWEuZGln\naXRhbGNlcnR2YWxpZGF0aW9uLmNvbS90cnVzdGFzaWFnNS5jcnQwggEDBgorBgEE\nAdZ5AgQCBIH0BIHxAO8AdQDd6x0reg1PpiCLga2BaHB+Lo6dAdVciI09EcTNtuy+\nzAAAAVjT7zdSAAAEAwBGMEQCIDCzWufc1q7hjmrrCetGyoA8EsEqpRSIhmZXStX5\n8b7zAiA6x5aAaDK+yMyeAgw71yi3tRVrWayHN+W0+4BxC8u5UQB2AO5Lvbd1zmC6\n4UJpH6vhnmajD35fsHLYgwDEe4l6qP3LAAABWNPvN4kAAAQDAEcwRQIgZ/LNgg7n\n7AE4O2yZkrXNcqAOmJ3NU2nT6zcnBxPFTTsCIQCjyPbMfWMZTD3kxgxPQ1COw5zJ\nsM0dfNmSr3MiU7EhqDANBgkqhkiG9w0BAQsFAAOCAQEAeyfgUhg9ZWVCaz0f+BQU\n6fMMfmQ1BDzvVFu+ORoAqyJQogxwIdfjrlz/63YFee5qpUsW/aaz4ma3bb4dpE1K\nGsgYe5N3o0xybYlOj+KB61sufYkzQS3HgDevCwjfUlGEbNl4dpO2xh5s5AANXlnz\ns/X0+AJ33/bm+fWIjAbIjluaEoM6GETHTXi4Tlxy0j3nsXsB9tIIUibAdTtButef\nJJRnikGRN+eHjrsLYe0RUmdKOQz1ik6teHt0MQX0aCe8OlXeyGDd9m8u7+y0nAnH\nTVaNuT7vXMWyyXLVUcV898wkBo3Bo3hUiaw0QR0ttgDrf5ZwqPfqpytRW2K5GMZT\nuw==\n-----END CERTIFICATE-----\n\n\n-----BEGIN CERTIFICATE-----\nMIIFZTCCBE2gAwIBAgIQOhAOfxCeGsWcxf/2QNXkQjANBgkqhkiG9w0BAQsFADCB\nyjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL\nExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp\nU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW\nZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0\naG9yaXR5IC0gRzUwHhcNMTYwODExMDAwMDAwWhcNMjYwODEwMjM1OTU5WjCBlzEL\nMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dpZXMsIElu\nYy4xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxHTAbBgNVBAsTFERv\nbWFpbiBWYWxpZGF0ZWQgU1NMMSEwHwYDVQQDExhUcnVzdEFzaWEgRFYgU1NMIENB\nIC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39aSJZG/97x3a\n6Qmuc9+MubagegRAVUmFYHTYTs8IKB2pM7wXN7W8mekdZaEgUjDFxvRBK/DhTb7U\n8ONLsKKdT86aOhzbz2noCTn9wPWnGwkg+/4YKg/dPQQdV9tMsSu0cwqInWHxSAkm\nAI1hYFC9D7Sf7Hp/5cRcD+dK454YMRzNOGLQnCVI8JEqrz6o9SOvQNTqTcfqt6DC\n0UlXG+MPD1eNPjlzf1Vwaab+VSTgySoC+Ikbq2VsdykeOiGXW/OIiASH7+2LcR05\nPmQ7GEOlM8yzoVojFpM8sHz+WxI05ZOPri5+vX3HhHHjWr5432G0dVmgohnZvlVZ\noy8XrlbpAgMBAAGjggF2MIIBcjASBgNVHRMBAf8ECDAGAQH/AgEAMC8GA1UdHwQo\nMCYwJKAioCCGHmh0dHA6Ly9zLnN5bWNiLmNvbS9wY2EzLWc1LmNybDAOBgNVHQ8B\nAf8EBAMCAQYwLgYIKwYBBQUHAQEEIjAgMB4GCCsGAQUFBzABhhJodHRwOi8vcy5z\neW1jZC5jb20wYQYDVR0gBFowWDBWBgZngQwBAgEwTDAjBggrBgEFBQcCARYXaHR0\ncHM6Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYBBQUHAgIwGRoXaHR0cHM6Ly9kLnN5\nbWNiLmNvbS9ycGEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMCkGA1Ud\nEQQiMCCkHjAcMRowGAYDVQQDExFTeW1hbnRlY1BLSS0yLTYwMTAdBgNVHQ4EFgQU\nbVjHfxrn4T8upoyXNUK79NM4rD8wHwYDVR0jBBgwFoAUf9Nlp8Ld7LvwMAnzQzn6\nAq8zMTMwDQYJKoZIhvcNAQELBQADggEBABUphhBbeG7scE3EveIN0dOjXPgwgQi8\nI2ZAKYm6DawoGz1lEJVdvFmkyMbP973X80b7mKmn0nNbe1kjA4M0O0hHaMM1ZaEv\n7e9vHEAoGyysMO6HzPWYMkyNxcCV7Nos2Uv4RvLDpQHh7P4Kt6fUU13ipcynrtQD\n1lFUM0yoTzwwFsPu3Pk+94hL58ErqwqJQwxoHMgLIQeMVHeNKcWFy1bddSbIbCWU\nZs6cMxhrra062ZCpDCbxyEaFNGAtYQMqNz55Z/14XgSUONZ/cJTns6QKhpcgTOwB\nfnNzRnk+aWreP7osKhXlz4zs+llP7goBDKFOMMtoEXx3YjJCKgpqmBU=\n-----END CERTIFICATE-----",
"SSLName": "证书-1",
"Region": variables.get("Region"),
"PrivateKey": "abc",
"CaCert": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEA8QbLsBnQmXRFLONEkHL1LfvYZZi04HH5Kv4fX9UFT8S6vzwv\njrBu+oToMujfH+5O8eV4LTyovTuMnRZfFYmIRF0Y2EHFK05t5bu7PVE9gCFJitHS\nPj2YBVOYA8IfH0C6C9uE9pT35A/U54Apw2p36N8W8f3vYbCZH+4SlosfM68bWGAl\nE7feqU6UHlo6+4IJdmwxH02siTTPoucaBgT4kBXcT5ylgozZl4tncLNHdsIYtXrk\nVoCOcCF44uVin1H+hL8xtFM8+ovL7Eixr/KS+wtALlCODKzzQ3rBXgNixGHJUJKR\nxBOPQZKULwqJAlvXfP6+QElK8vu0qySJ3FOhcQIDAQABAoIBAAPvZnfzk/JNcauv\n8jihh9s+V2QhQCLB+Z14FK8N3U5WGe5xXx1nSAiTDu912d69l1BfvLyQVvjv9fXC\nnb7ORglHs9YkDMIOP8EWdZIkt2pWIMtBbbtSah78JGk7TCLIfcEfzmXwPLPehk1Z\nTFVCcb69lbRRvwzLQ1TAIFGQ5+uCEkW02KAl6kx+JnVpsE8/BjqZKG1Ne+sM6dOC\nGRd44hgiNHKUT3Xtbw6jttiUFDLKYMYtb7PpRAkZFM8tgnBV6dWWJ3xTYW9kOjPh\nXnScNARfphUZVibRhA04og5p1q/MUz9Sz9g2DURuSlo/MP3WZMbVRvZiUN1xhz5v\n2WhsddkCgYEA+gWPFo0TbVbZXUrx9J/ptI9NXNx5zjyUrv87MDt1pnmMDgWrsCEI\nRqQR4Lp2G11GA7IudiA/ipcZqgcRIIFvb+gu1kObox3BGGs59x+DqFeAPXt6dFG2\nW10f9k96/tcbdursurqwd3Zv3cqQqRTKgaP4xHFmexlcwGCF5YwewWMCgYEA9sos\n2acNINXwcNRUPnpg82DOrG9Zjr1aiNo9PDJmwGEdC9QMOUWM85dq0M9g388ttiLU\nWr/U4r5yDuqWJPcKtff2BaxSsZpcQ4Id9eddD9L+sxaBGyD23RtOC+IOlkG6WS4g\niUYulQvW69tBHWiwxQu7YMSIE2B3EuySPOQYlBsCgYEAxNwvqB/4lfT2PUDPdj+b\ncnILBf0LY1nL8GZCol2O6z91CW1pm8rGi2iQMxRd/nnYsPxRHO2TWnpS2M+rqp5/\nsettRYQCPdMlwSZcg7oqnhgXf1GEP6Y/IX0Xt4cpXxLcKywarYRlggqdVlMyyA74\nzE7hhzuK5442u7rEctN7O+UCgYAoM78ipafp1XAZsT0YAG+Stg504J7CNe5tpL+c\n8sjyRd+pcZ2cJsxTUjNAWMf7LZDQvtPBBMb1OPjznRtgYi4IfqBBRFUkQXUOOkAP\nMuViEokTO3NErBYK5svL+8NMjuCAbpc2RYyJEyiru0fcNpW1Q7f+h4VzQp+jIY6h\nBLdMSQKBgGauU7OQksZCEY2MVAcD5dShYYvWLxOkj4dVVwISN1M6ImCAHwXZ6Nak\n6YlzCGT+NbRJbB2cPfsrKXtAJVX15I3iDCKAoGkb+9kiHnPj7Q71KVuWQE6BQx7E\nvE88TSsshwtX1s+qU9UWUrMPodK32q5nO3p8N033NvS9wLNfbcdc\n-----END RSA PRIVATE KEY-----",
}
try:
resp = client.ulb().create_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SSLId_01"] = utest.value_at_path(resp, "SSLId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateSSL",
)
def create_ssl_05(client: utest.Client, variables: dict):
d = {
"UserCert": "-----BEGIN CERTIFICATE-----\nMIIFzTCCBLWgAwIBAgIQQ8IswmAhEIKfNhrKqb0F3DANBgkqhkiG9w0BAQsFADCB\nlzELMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs\nIEluYy4xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxHTAbBgNVBAsT\nFERvbWFpbiBWYWxpZGF0ZWQgU1NMMSEwHwYDVQQDExhUcnVzdEFzaWEgRFYgU1NM\nIENBIC0gRzUwHhcNMTYxMjA2MDAwMDAwWhcNMTcxMjA2MjM1OTU5WjAgMR4wHAYD\nVQQDDBVtLmVjb2xvZ3ktZW1vYmlsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB\nDwAwggEKAoIBAQDxBsuwGdCZdEUs40SQcvUt+9hlmLTgcfkq/h9f1QVPxLq/PC+O\nsG76hOgy6N8f7k7x5XgtPKi9O4ydFl8ViYhEXRjYQcUrTm3lu7s9UT2AIUmK0dI+\nPZgFU5gDwh8fQLoL24T2lPfkD9TngCnDanfo3xbx/e9hsJkf7hKWix8zrxtYYCUT\nt96pTpQeWjr7ggl2bDEfTayJNM+i5xoGBPiQFdxPnKWCjNmXi2dws0d2whi1euRW\ngI5wIXji5WKfUf6EvzG0Uzz6i8vsSLGv8pL7C0AuUI4MrPNDesFeA2LEYclQkpHE\nE49BkpQvCokCW9d8/r5ASUry+7SrJIncU6FxAgMBAAGjggKJMIIChTAgBgNVHREE\nGTAXghVtLmVjb2xvZ3ktZW1vYmlsZS5jb20wCQYDVR0TBAIwADBhBgNVHSAEWjBY\nMFYGBmeBDAECATBMMCMGCCsGAQUFBwIBFhdodHRwczovL2Quc3ltY2IuY29tL2Nw\nczAlBggrBgEFBQcCAjAZDBdodHRwczovL2Quc3ltY2IuY29tL3JwYTAfBgNVHSME\nGDAWgBRtWMd/GufhPy6mjJc1Qrv00zisPzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0l\nBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMIGbBggrBgEFBQcBAQSBjjCBizA8Bggr\nBgEFBQcwAYYwaHR0cDovL3RydXN0YXNpYTItb2NzcC5kaWdpdGFsY2VydHZhbGlk\nYXRpb24uY29tMEsGCCsGAQUFBzAChj9odHRwOi8vdHJ1c3Rhc2lhMi1haWEuZGln\naXRhbGNlcnR2YWxpZGF0aW9uLmNvbS90cnVzdGFzaWFnNS5jcnQwggEDBgorBgEE\nAdZ5AgQCBIH0BIHxAO8AdQDd6x0reg1PpiCLga2BaHB+Lo6dAdVciI09EcTNtuy+\nzAAAAVjT7zdSAAAEAwBGMEQCIDCzWufc1q7hjmrrCetGyoA8EsEqpRSIhmZXStX5\n8b7zAiA6x5aAaDK+yMyeAgw71yi3tRVrWayHN+W0+4BxC8u5UQB2AO5Lvbd1zmC6\n4UJpH6vhnmajD35fsHLYgwDEe4l6qP3LAAABWNPvN4kAAAQDAEcwRQIgZ/LNgg7n\n7AE4O2yZkrXNcqAOmJ3NU2nT6zcnBxPFTTsCIQCjyPbMfWMZTD3kxgxPQ1COw5zJ\nsM0dfNmSr3MiU7EhqDANBgkqhkiG9w0BAQsFAAOCAQEAeyfgUhg9ZWVCaz0f+BQU\n6fMMfmQ1BDzvVFu+ORoAqyJQogxwIdfjrlz/63YFee5qpUsW/aaz4ma3bb4dpE1K\nGsgYe5N3o0xybYlOj+KB61sufYkzQS3HgDevCwjfUlGEbNl4dpO2xh5s5AANXlnz\ns/X0+AJ33/bm+fWIjAbIjluaEoM6GETHTXi4Tlxy0j3nsXsB9tIIUibAdTtButef\nJJRnikGRN+eHjrsLYe0RUmdKOQz1ik6teHt0MQX0aCe8OlXeyGDd9m8u7+y0nAnH\nTVaNuT7vXMWyyXLVUcV898wkBo3Bo3hUiaw0QR0ttgDrf5ZwqPfqpytRW2K5GMZT\nuw==\n-----END CERTIFICATE-----\n\n\n-----BEGIN CERTIFICATE-----\nMIIFZTCCBE2gAwIBAgIQOhAOfxCeGsWcxf/2QNXkQjANBgkqhkiG9w0BAQsFADCB\nyjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL\nExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp\nU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW\nZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0\naG9yaXR5IC0gRzUwHhcNMTYwODExMDAwMDAwWhcNMjYwODEwMjM1OTU5WjCBlzEL\nMAkGA1UEBhMCQ04xJTAjBgNVBAoTHFRydXN0QXNpYSBUZWNobm9sb2dpZXMsIElu\nYy4xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxHTAbBgNVBAsTFERv\nbWFpbiBWYWxpZGF0ZWQgU1NMMSEwHwYDVQQDExhUcnVzdEFzaWEgRFYgU1NMIENB\nIC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39aSJZG/97x3a\n6Qmuc9+MubagegRAVUmFYHTYTs8IKB2pM7wXN7W8mekdZaEgUjDFxvRBK/DhTb7U\n8ONLsKKdT86aOhzbz2noCTn9wPWnGwkg+/4YKg/dPQQdV9tMsSu0cwqInWHxSAkm\nAI1hYFC9D7Sf7Hp/5cRcD+dK454YMRzNOGLQnCVI8JEqrz6o9SOvQNTqTcfqt6DC\n0UlXG+MPD1eNPjlzf1Vwaab+VSTgySoC+Ikbq2VsdykeOiGXW/OIiASH7+2LcR05\nPmQ7GEOlM8yzoVojFpM8sHz+WxI05ZOPri5+vX3HhHHjWr5432G0dVmgohnZvlVZ\noy8XrlbpAgMBAAGjggF2MIIBcjASBgNVHRMBAf8ECDAGAQH/AgEAMC8GA1UdHwQo\nMCYwJKAioCCGHmh0dHA6Ly9zLnN5bWNiLmNvbS9wY2EzLWc1LmNybDAOBgNVHQ8B\nAf8EBAMCAQYwLgYIKwYBBQUHAQEEIjAgMB4GCCsGAQUFBzABhhJodHRwOi8vcy5z\neW1jZC5jb20wYQYDVR0gBFowWDBWBgZngQwBAgEwTDAjBggrBgEFBQcCARYXaHR0\ncHM6Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYBBQUHAgIwGRoXaHR0cHM6Ly9kLnN5\nbWNiLmNvbS9ycGEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMCkGA1Ud\nEQQiMCCkHjAcMRowGAYDVQQDExFTeW1hbnRlY1BLSS0yLTYwMTAdBgNVHQ4EFgQU\nbVjHfxrn4T8upoyXNUK79NM4rD8wHwYDVR0jBBgwFoAUf9Nlp8Ld7LvwMAnzQzn6\nAq8zMTMwDQYJKoZIhvcNAQELBQADggEBABUphhBbeG7scE3EveIN0dOjXPgwgQi8\nI2ZAKYm6DawoGz1lEJVdvFmkyMbP973X80b7mKmn0nNbe1kjA4M0O0hHaMM1ZaEv\n7e9vHEAoGyysMO6HzPWYMkyNxcCV7Nos2Uv4RvLDpQHh7P4Kt6fUU13ipcynrtQD\n1lFUM0yoTzwwFsPu3Pk+94hL58ErqwqJQwxoHMgLIQeMVHeNKcWFy1bddSbIbCWU\nZs6cMxhrra062ZCpDCbxyEaFNGAtYQMqNz55Z/14XgSUONZ/cJTns6QKhpcgTOwB\nfnNzRnk+aWreP7osKhXlz4zs+llP7goBDKFOMMtoEXx3YjJCKgpqmBU=\n-----END CERTIFICATE-----",
"SSLName": "证书-2",
"Region": variables.get("Region"),
"PrivateKey": "abc",
"CaCert": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEA8QbLsBnQmXRFLONEkHL1LfvYZZi04HH5Kv4fX9UFT8S6vzwv\njrBu+oToMujfH+5O8eV4LTyovTuMnRZfFYmIRF0Y2EHFK05t5bu7PVE9gCFJitHS\nPj2YBVOYA8IfH0C6C9uE9pT35A/U54Apw2p36N8W8f3vYbCZH+4SlosfM68bWGAl\nE7feqU6UHlo6+4IJdmwxH02siTTPoucaBgT4kBXcT5ylgozZl4tncLNHdsIYtXrk\nVoCOcCF44uVin1H+hL8xtFM8+ovL7Eixr/KS+wtALlCODKzzQ3rBXgNixGHJUJKR\nxBOPQZKULwqJAlvXfP6+QElK8vu0qySJ3FOhcQIDAQABAoIBAAPvZnfzk/JNcauv\n8jihh9s+V2QhQCLB+Z14FK8N3U5WGe5xXx1nSAiTDu912d69l1BfvLyQVvjv9fXC\nnb7ORglHs9YkDMIOP8EWdZIkt2pWIMtBbbtSah78JGk7TCLIfcEfzmXwPLPehk1Z\nTFVCcb69lbRRvwzLQ1TAIFGQ5+uCEkW02KAl6kx+JnVpsE8/BjqZKG1Ne+sM6dOC\nGRd44hgiNHKUT3Xtbw6jttiUFDLKYMYtb7PpRAkZFM8tgnBV6dWWJ3xTYW9kOjPh\nXnScNARfphUZVibRhA04og5p1q/MUz9Sz9g2DURuSlo/MP3WZMbVRvZiUN1xhz5v\n2WhsddkCgYEA+gWPFo0TbVbZXUrx9J/ptI9NXNx5zjyUrv87MDt1pnmMDgWrsCEI\nRqQR4Lp2G11GA7IudiA/ipcZqgcRIIFvb+gu1kObox3BGGs59x+DqFeAPXt6dFG2\nW10f9k96/tcbdursurqwd3Zv3cqQqRTKgaP4xHFmexlcwGCF5YwewWMCgYEA9sos\n2acNINXwcNRUPnpg82DOrG9Zjr1aiNo9PDJmwGEdC9QMOUWM85dq0M9g388ttiLU\nWr/U4r5yDuqWJPcKtff2BaxSsZpcQ4Id9eddD9L+sxaBGyD23RtOC+IOlkG6WS4g\niUYulQvW69tBHWiwxQu7YMSIE2B3EuySPOQYlBsCgYEAxNwvqB/4lfT2PUDPdj+b\ncnILBf0LY1nL8GZCol2O6z91CW1pm8rGi2iQMxRd/nnYsPxRHO2TWnpS2M+rqp5/\nsettRYQCPdMlwSZcg7oqnhgXf1GEP6Y/IX0Xt4cpXxLcKywarYRlggqdVlMyyA74\nzE7hhzuK5442u7rEctN7O+UCgYAoM78ipafp1XAZsT0YAG+Stg504J7CNe5tpL+c\n8sjyRd+pcZ2cJsxTUjNAWMf7LZDQvtPBBMb1OPjznRtgYi4IfqBBRFUkQXUOOkAP\nMuViEokTO3NErBYK5svL+8NMjuCAbpc2RYyJEyiru0fcNpW1Q7f+h4VzQp+jIY6h\nBLdMSQKBgGauU7OQksZCEY2MVAcD5dShYYvWLxOkj4dVVwISN1M6ImCAHwXZ6Nak\n6YlzCGT+NbRJbB2cPfsrKXtAJVX15I3iDCKAoGkb+9kiHnPj7Q71KVuWQE6BQx7E\nvE88TSsshwtX1s+qU9UWUrMPodK32q5nO3p8N033NvS9wLNfbcdc\n-----END RSA PRIVATE KEY-----",
}
try:
resp = client.ulb().create_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SSLId_02"] = utest.value_at_path(resp, "SSLId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "DataSet.0.SSLId", variables.get("SSLId_01")),
],
action="DescribeSSL",
)
def describe_ssl_06(client: utest.Client, variables: dict):
d = {"SSLId": variables.get("SSLId_01"), "Region": variables.get("Region")}
try:
resp = client.ulb().describe_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "DataSet.0.SSLId", variables.get("SSLId_02")),
],
action="DescribeSSL",
)
def describe_ssl_07(client: utest.Client, variables: dict):
d = {"SSLId": variables.get("SSLId_02"), "Region": variables.get("Region")}
try:
resp = client.ulb().describe_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="BindSSL",
)
def bind_ssl_08(client: utest.Client, variables: dict):
d = {
"VServerId": variables.get("VServerId"),
"ULBId": variables.get("ULBId"),
"SSLId": variables.get("SSLId_01"),
"Region": variables.get("Region"),
}
try:
resp = client.ulb().bind_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=30,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="UpdateSSLBinding",
)
def update_ssl_binding_09(client: utest.Client, variables: dict):
d = {
"VServerId": variables.get("VServerId"),
"ULBId": variables.get("ULBId"),
"Region": variables.get("Region"),
"OldSSLId": variables.get("SSLId_01"),
"NewSSLId": variables.get("SSLId_02"),
}
try:
resp = client.invoke("UpdateSSLBinding", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="UnbindSSL",
)
def unbind_ssl_10(client: utest.Client, variables: dict):
d = {
"VServerId": variables.get("VServerId"),
"ULBId": variables.get("ULBId"),
"SSLId": variables.get("SSLId_02"),
"Region": variables.get("Region"),
}
try:
resp = client.ulb().unbind_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSSL",
)
def delete_ssl_11(client: utest.Client, variables: dict):
d = {
"VServerId": variables.get("VServerId"),
"ULBId": variables.get("ULBId"),
"SSLId": variables.get("SSLId_01"),
"Region": variables.get("Region"),
}
try:
resp = client.ulb().delete_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSSL",
)
def delete_ssl_12(client: utest.Client, variables: dict):
d = {"SSLId": variables.get("SSLId_02"), "Region": variables.get("Region")}
try:
resp = client.ulb().delete_ssl(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteVServer",
)
def delete_vserver_13(client: utest.Client, variables: dict):
d = {
"VServerId": variables.get("VServerId"),
"ULBId": variables.get("ULBId"),
"Region": variables.get("Region"),
}
try:
resp = client.ulb().delete_vserver(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteULB",
)
def delete_ulb_14(client: utest.Client, variables: dict):
d = {"ULBId": variables.get("ULBId"), "Region": variables.get("Region")}
try:
resp = client.ulb().delete_ulb(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSubnet",
)
def delete_subnet_15(client: utest.Client, variables: dict):
d = {
"SubnetId": variables.get("subnet_id"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().delete_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=10,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteVPC",
)
def delete_vpc_16(client: utest.Client, variables: dict):
d = {"VPCId": variables.get("vpc_id"), "Region": variables.get("Region")}
try:
resp = client.vpc().delete_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
| 51.397163 | 4,095 | 0.79449 |
29f360803bbe0577da71a6ed541a8d1aca0f5315 | 20,723 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
deprecated:
removed_in: "2.12"
why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
alternative: https://github.com/theforeman/foreman-ansible-modules
description:
- Allows the management of Katello resources inside your Foreman server.
author:
- Eric D Helms (@ehelms)
requirements:
- nailgun >= 0.28.0
- python >= 2.6
- datetime
options:
server_url:
description:
- URL of Foreman server.
required: true
username:
description:
- Username on Foreman server.
required: true
password:
description:
- Password for user accessing Foreman server.
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host).
choices:
- repository
- manifest
- repository_set
- sync_plan
- content_view
- lifecycle_environment
- activation_key
- product
required: true
action:
description:
- action associated to the entity resource to set or edit in dictionary format.
- Possible Action in relation to Entitys.
- "sync (available when entity=product or entity=repository)"
- "publish (available when entity=content_view)"
- "promote (available when entity=content_view)"
choices:
- sync
- publish
- promote
required: false
params:
description:
- Parameters associated to the entity resource and action, to set or edit in dictionary format.
- Each choice may be only available with specific entitys and actions.
- "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
- The action "None" means no action specified.
- Possible Params in relation to entity and action.
- "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
- "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
- "content ([manifest,None])"
- "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
- "basearch ([repository_set,None])"
- "releaserver ([repository_set,None])"
- "sync_date ([sync_plan,None])"
- "interval ([sync_plan,None])"
- "repositories ([content_view,None])"
- "from_environment ([content_view,promote])"
- "to_environment([content_view,promote])"
- "prior ([lifecycle_environment,None])"
- "content_view ([activation_key,None])"
- "lifecycle_environment ([activation_key,None])"
required: true
task_timeout:
description:
- The timeout in seconds to wait for the started Foreman action to finish.
- If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
default: 1000
required: false
verify_ssl:
description:
- verify the ssl/https connection (e.g for a valid certificate)
default: false
type: bool
required: false
'''
EXAMPLES = '''
---
# Simple Example:
- name: Create Product
katello:
username: admin
password: admin
server_url: https://fakeserver.com
entity: product
params:
name: Centos 7
delegate_to: localhost
# Abstraction Example:
# katello.yml
---
- name: "{{ name }}"
katello:
username: admin
password: admin
server_url: https://fakeserver.com
entity: "{{ entity }}"
params: "{{ params }}"
delegate_to: localhost
# tasks.yml
---
- include: katello.yml
vars:
name: Create Dev Environment
entity: lifecycle_environment
params:
name: Dev
prior: Library
organization: Default Organization
- include: katello.yml
vars:
name: Create Centos Product
entity: product
params:
name: Centos 7
organization: Default Organization
- include: katello.yml
vars:
name: Create 7.2 Repository
entity: repository
params:
name: Centos 7.2
product: Centos 7
organization: Default Organization
content_type: yum
url: http://mirror.centos.org/centos/7/os/x86_64/
- include: katello.yml
vars:
name: Create Centos 7 View
entity: content_view
params:
name: Centos 7 View
organization: Default Organization
repositories:
- name: Centos 7.2
product: Centos 7
- include: katello.yml
vars:
name: Enable RHEL Product
entity: repository_set
params:
name: Red Hat Enterprise Linux 7 Server (RPMs)
product: Red Hat Enterprise Linux Server
organization: Default Organization
basearch: x86_64
releasever: 7
- include: katello.yml
vars:
name: Promote Contentview Environment with longer timeout
task_timeout: 10800
entity: content_view
action: promote
params:
name: MyContentView
organization: MyOrganisation
from_environment: Testing
to_environment: Production
# Best Practices
# In Foreman, things can be done in parallel.
# When a conflicting action is already running,
# the task will fail instantly instead of waiting for the already running action to complete.
# So you should use a "until success" loop to catch this.
- name: Promote Contentview Environment with increased Timeout
katello:
username: ansibleuser
password: supersecret
task_timeout: 10800
entity: content_view
action: promote
params:
name: MyContentView
organization: MyOrganisation
from_environment: Testing
to_environment: Production
register: task_result
until: task_result is success
retries: 9
delay: 120
'''
RETURN = '''# '''
import datetime
import os
import traceback
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except Exception:
HAS_NAILGUN_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NailGun(object):
def __init__(self, server, entities, module, task_timeout):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = task_timeout
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={0}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, organization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception as e:
if "Import is the same as existing data" in e.message:
return False
else:
self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
exception=traceback.format_exc())
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update()
else:
repository.create()
return True
def sync_repository(self, params):
org = self.find_organization(params['organization'])
repository = self.find_repository(params['name'], params['product'], org.name)
return repository.sync()
def repository_set(self, params):
product = self.find_product(params['product'], params['organization'])
del params['product']
del params['organization']
if not product:
return False
else:
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
reposet = reposet.search()[0]
formatted_name = [params['name'].replace('(', '').replace(')', '')]
formatted_name.append(params['basearch'])
if 'releasever' in params:
formatted_name.append(params['releasever'])
formatted_name = ' '.join(formatted_name)
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
repository = repository.search()
if len(repository) == 0:
if 'releasever' in params:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
else:
reposet.enable(data={'basearch': params['basearch']})
return True
def sync_plan(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
products = params['products']
del params['products']
sync_plan = self._entities.SyncPlan(
self._server,
name=params['name'],
organization=org
)
response = sync_plan.search()
sync_plan.sync_date = params['sync_date']
sync_plan.interval = params['interval']
if len(response) == 1:
sync_plan.id = response[0].id
sync_plan.update()
else:
response = sync_plan.create()
sync_plan.id = response[0].id
if products:
ids = []
for name in products:
product = self.find_product(name, org.name)
ids.append(product.id)
sync_plan.add_products(data={'product_ids': ids})
return True
def content_view(self, params):
org = self.find_organization(params['organization'])
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
response = content_view.search()
if len(response) == 1:
content_view.id = response[0].id
content_view.update()
else:
content_view = content_view.create()
if params['repositories']:
repos = []
for repository in params['repositories']:
repository = self.find_repository(repository['name'], repository['product'], org.name)
repos.append(repository)
content_view.repository = repos
content_view.update(['repository'])
def find_content_view_version(self, name, organization, environment):
env = self.find_lifecycle_environment(environment, organization)
content_view = self.find_content_view(name, organization)
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
response = content_view_version.search(['content_view'], {'environment_id': env.id})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View version found for %s" % response)
def publish(self, params):
content_view = self.find_content_view(params['name'], params['organization'])
return content_view.publish()
def promote(self, params):
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
data = {'environment_id': to_environment.id}
return version.promote(data=data)
def lifecycle_environment(self, params):
org = self.find_organization(params['organization'])
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
response = lifecycle_env.search()
if len(response) == 1:
lifecycle_env.id = response[0].id
lifecycle_env.update()
else:
lifecycle_env.create()
return True
def activation_key(self, params):
org = self.find_organization(params['organization'])
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
response = activation_key.search()
if len(response) == 1:
activation_key.id = response[0].id
activation_key.update()
else:
activation_key.create()
if params['content_view']:
content_view = self.find_content_view(params['content_view'], params['organization'])
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
activation_key.content_view = content_view
activation_key.environment = lifecycle_environment
activation_key.update()
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True),
username=dict(type='str', required=True, no_log=True),
password=dict(type='str', required=True, no_log=True),
entity=dict(type='str', required=True,
choices=['repository', 'manifest', 'repository_set', 'sync_plan',
'content_view', 'lifecycle_environment', 'activation_key', 'product']),
action=dict(type='str', choices=['sync', 'publish', 'promote']),
verify_ssl=dict(type='bool', default=False),
task_timeout=dict(type='int', default=1000),
params=dict(type='dict', required=True, no_log=True),
),
supports_check_mode=True,
)
if not HAS_NAILGUN_PACKAGE:
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
entity = module.params['entity']
action = module.params['action']
params = module.params['params']
verify_ssl = module.params['verify_ssl']
task_timeout = module.params['task_timeout']
server = ServerConfig(
url=server_url,
auth=(username, password),
verify=verify_ssl
)
ng = NailGun(server, entities, module, task_timeout)
# Lets make an connection to the server with username and password
try:
org = entities.Organization(server)
org.search()
except Exception as e:
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
result = False
if entity == 'product':
if action == 'sync':
result = ng.sync_product(params)
else:
result = ng.product(params)
elif entity == 'repository':
if action == 'sync':
result = ng.sync_repository(params)
else:
result = ng.repository(params)
elif entity == 'manifest':
result = ng.manifest(params)
elif entity == 'repository_set':
result = ng.repository_set(params)
elif entity == 'sync_plan':
result = ng.sync_plan(params)
elif entity == 'content_view':
if action == 'publish':
result = ng.publish(params)
elif action == 'promote':
result = ng.promote(params)
else:
result = ng.content_view(params)
elif entity == 'lifecycle_environment':
result = ng.lifecycle_environment(params)
elif entity == 'activation_key':
result = ng.activation_key(params)
else:
module.fail_json(changed=False, result="Unsupported entity supplied")
module.exit_json(changed=result, result="%s updated" % entity)
if __name__ == '__main__':
main()
| 33.424194 | 154 | 0.624089 |
fb47021cd797ce1ff920b46b3ba691cd016b2d9c | 1,309 | py | Python | face_detection.py | AnishSangrulkar/AI_Proctoring | 9ca149fdf8400d55afda27a738026318be2ba125 | [
"MIT"
] | null | null | null | face_detection.py | AnishSangrulkar/AI_Proctoring | 9ca149fdf8400d55afda27a738026318be2ba125 | [
"MIT"
] | null | null | null | face_detection.py | AnishSangrulkar/AI_Proctoring | 9ca149fdf8400d55afda27a738026318be2ba125 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import time
modelFile = "res10_300x300_ssd_iter_140000.caffemodel"
configFile = "deploy.prototxt.txt"
net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
#cap = cv2.VideoCapture(0)
#frame=cv2.imread('2020-10-30-201038.jpg')
def main(frame):
frame = cv2.flip(frame, 1)
h, w = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,(300, 300), (104.0, 117.0, 123.0))
net.setInput(blob)
faces = net.forward()
#to draw faces on image
face_count = 0
face=[]
for i in range(faces.shape[2]):
confidence = faces[0, 0, i, 2]
if confidence > 0.85:
face_count = face_count + 1
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
(x, y, x1, y1) = box.astype("int")
#cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
face.append([x,y,x1,y1])
# uncomment to show face_count in image
#frame = cv2.putText(frame, f"No. of faces - {face_count}", (10,30), cv2.FONT_HERSHEY_COMPLEX, 1 , (0,0,0))
#cv2.imshow('feed', frame)
info={'faces':face,'face_count' : face_count}
return info
#print(main(frame))
# uncomment to print fps
#print("FPS : ", round(1.0/ (time.time() - start), 2))
| 29.75 | 111 | 0.582124 |
c7a141d4d334827bdad874f53c02799b3be75fff | 27,751 | py | Python | server/www/teleport/webroot/app/controller/system.py | hgahhja56465/teleport | 56115234dc7bc61ac40eb6ff19fbbd44de7e50ff | [
"Apache-2.0"
] | 640 | 2018-09-12T03:14:13.000Z | 2022-03-30T04:38:09.000Z | server/www/teleport/webroot/app/controller/system.py | hgahhja56465/teleport | 56115234dc7bc61ac40eb6ff19fbbd44de7e50ff | [
"Apache-2.0"
] | 175 | 2018-09-10T19:52:20.000Z | 2022-03-30T04:37:30.000Z | server/www/teleport/webroot/app/controller/system.py | hgahhja56465/teleport | 56115234dc7bc61ac40eb6ff19fbbd44de7e50ff | [
"Apache-2.0"
] | 230 | 2018-09-13T02:40:49.000Z | 2022-03-29T11:53:58.000Z | # -*- coding: utf-8 -*-
import datetime
import hashlib
import json
import shutil
import time
import app.model.system as system_model
import tornado.gen
from app.app_ver import TP_SERVER_VER
from app.base import mail
from app.base.configs import tp_cfg
from app.base.controller import TPBaseHandler, TPBaseJsonHandler
from app.base.logger import *
from app.const import *
from app.base.db import get_db
from app.model import syslog
from app.model import record
from app.model import ops
from app.model import audit
from app.model import user
from app.base.core_server import core_service_async_post_http
from app.base.session import tp_session
from app.logic.auth.ldap import Ldap
from app.base.utils import tp_timestamp_sec
class DoGetTimeHandler(TPBaseJsonHandler):
def post(self):
# time_now = int(datetime.datetime.timestamp())
self.write_json(TPE_OK, data=tp_timestamp_sec())
class ConfigHandler(TPBaseHandler):
@tornado.gen.coroutine
def get(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_CONFIG)
if ret != TPE_OK:
return
cfg = tp_cfg()
# core_detected = False
req = {'method': 'get_config', 'param': []}
_yr = core_service_async_post_http(req)
code, ret_data = yield _yr
if code != TPE_OK:
cfg.update_core(None)
else:
cfg.update_core(ret_data)
if not tp_cfg().core.detected:
total_size = 0
free_size = 0
else:
total_size, _, free_size = shutil.disk_usage(tp_cfg().core.replay_path)
_db = get_db()
db = {'type': _db.db_type}
if _db.db_type == _db.DB_TYPE_SQLITE:
db['sqlite_file'] = _db.sqlite_file
elif _db.db_type == _db.DB_TYPE_MYSQL:
db['mysql_host'] = _db.mysql_host
db['mysql_port'] = _db.mysql_port
db['mysql_db'] = _db.mysql_db
db['mysql_user'] = _db.mysql_user
param = {
'total_size': total_size,
'free_size': free_size,
'core_cfg': tp_cfg().core,
'sys_cfg': tp_cfg().sys,
'web_cfg': {
'version': TP_SERVER_VER,
'core_server_rpc': tp_cfg().common.core_server_rpc,
'db': db
}
}
self.render('system/config.mako', page_param=json.dumps(param))
class RoleHandler(TPBaseHandler):
def get(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_ROLE)
if ret != TPE_OK:
return
self.render('system/role.mako')
class DoExportDBHandler(TPBaseHandler):
def get(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_CONFIG)
if ret != TPE_OK:
return
sql, err = get_db().export_to_sql()
self.set_header('Content-Type', 'application/sql')
self.set_header('Content-Disposition', 'attachment; filename="teleport-db-export-{}.sql"'.format(time.strftime('%Y%m%d-%H%M%S')))
self.write(sql)
self.finish()
class DoRoleUpdateHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_ROLE)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
role_id = int(args['role_id'])
role_name = args['role_name']
privilege = int(args['privilege'])
except:
log.e('\n')
return self.write_json(TPE_PARAM)
if role_id == 0:
err, role_id = system_model.add_role(self, role_name, privilege)
else:
if role_id == 1:
return self.write_json(TPE_FAILED, '禁止修改系统管理员角色!')
err = system_model.update_role(self, role_id, role_name, privilege)
return self.write_json(err, data=role_id)
class DoRoleRemoveHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_ROLE)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
role_id = int(args['role_id'])
except:
log.e('\n')
return self.write_json(TPE_PARAM)
if role_id == 1:
return self.write_json(TPE_FAILED, '禁止删除系统管理员角色!')
err = system_model.remove_role(self, role_id)
return self.write_json(err)
class SysLogHandler(TPBaseHandler):
def get(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_LOG)
if ret != TPE_OK:
return
self.render('system/syslog.mako')
class DoGetLogsHandler(TPBaseJsonHandler):
def post(self):
# return self.write_json(0, data=[])
filter = dict()
order = dict()
order['name'] = 'log_time'
order['asc'] = False
limit = dict()
limit['page_index'] = 0
limit['per_page'] = 25
args = self.get_argument('args', None)
if args is not None:
args = json.loads(args)
tmp = list()
_filter = args['filter']
if _filter is not None:
for i in _filter:
if i == 'user_name':
_x = _filter[i].strip()
if _x == '全部':
tmp.append(i)
if i == 'search':
_x = _filter[i].strip()
if len(_x) == 0:
tmp.append(i)
continue
for i in tmp:
del _filter[i]
filter.update(_filter)
_limit = args['limit']
if _limit['page_index'] < 0:
_limit['page_index'] = 0
if _limit['per_page'] < 10:
_limit['per_page'] = 10
if _limit['per_page'] > 100:
_limit['per_page'] = 100
limit.update(_limit)
_order = args['order']
if _order is not None:
order['name'] = _order['k']
order['asc'] = _order['v']
err, total, record_list = syslog.get_logs(filter, order, _limit)
if err != TPE_OK:
return self.write_json(err)
ret = dict()
ret['page_index'] = limit['page_index']
ret['total'] = total
ret['data'] = record_list
return self.write_json(0, data=ret)
class DoSaveCfgHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_CONFIG)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
processed = False
if 'smtp' in args:
processed = True
_cfg = args['smtp']
_server = _cfg['server']
_port = _cfg['port']
_ssl = _cfg['ssl']
_sender = _cfg['sender']
_password = _cfg['password']
# TODO: encrypt the password before save by core-service.
# TODO: if not send password, use pre-saved password.
err = system_model.save_config(self, '更新SMTP设置', 'smtp', _cfg)
if err == TPE_OK:
# 同时更新内存缓存
tp_cfg().sys.smtp.server = _server
tp_cfg().sys.smtp.port = _port
tp_cfg().sys.smtp.ssl = _ssl
tp_cfg().sys.smtp.sender = _sender
# 特殊处理,防止前端拿到密码
tp_cfg().sys_smtp_password = _password
else:
return self.write_json(err)
# 增加 url-protocol 的配置
if 'global' in args:
processed = True
_cfg = args['global']
_url_proto = _cfg['url_proto']
err = system_model.save_config(self, '更新全局设置', 'global', _cfg)
if err == TPE_OK:
tp_cfg().sys.glob.url_proto = _url_proto
else:
return self.write_json(err)
if 'password' in args:
processed = True
_cfg = args['password']
_allow_reset = _cfg['allow_reset']
_force_strong = _cfg['force_strong']
_timeout = _cfg['timeout']
err = system_model.save_config(self, '更新密码策略设置', 'password', _cfg)
if err == TPE_OK:
tp_cfg().sys.password.allow_reset = _allow_reset
tp_cfg().sys.password.force_strong = _force_strong
tp_cfg().sys.password.timeout = _timeout
else:
return self.write_json(err)
if 'login' in args:
processed = True
_cfg = args['login']
_session_timeout = _cfg['session_timeout']
_retry = _cfg['retry']
_lock_timeout = _cfg['lock_timeout']
_auth = _cfg['auth']
err = system_model.save_config(self, '更新登录策略设置', 'login', _cfg)
if err == TPE_OK:
tp_cfg().sys.login.session_timeout = _session_timeout
tp_cfg().sys.login.retry = _retry
tp_cfg().sys.login.lock_timeout = _lock_timeout
tp_cfg().sys.login.auth = _auth
tp_session().update_default_expire()
else:
return self.write_json(err)
if 'session' in args:
processed = True
_cfg = args['session']
_noop_timeout = _cfg['noop_timeout']
_flag_record = _cfg['flag_record']
_flag_rdp = _cfg['flag_rdp']
_flag_ssh = _cfg['flag_ssh']
err = system_model.save_config(self, '更新连接控制设置', 'session', _cfg)
if err == TPE_OK:
try:
req = {'method': 'set_config', 'param': {'noop_timeout': _noop_timeout}}
_yr = core_service_async_post_http(req)
code, ret_data = yield _yr
if code != TPE_OK:
log.e('can not set runtime-config to core-server.\n')
return self.write_json(code)
except:
pass
tp_cfg().sys.session.noop_timeout = _noop_timeout
tp_cfg().sys.session.flag_record = _flag_record
tp_cfg().sys.session.flag_rdp = _flag_rdp
tp_cfg().sys.session.flag_ssh = _flag_ssh
else:
return self.write_json(err)
if 'storage' in args:
processed = True
_cfg = args['storage']
_keep_log = _cfg['keep_log']
_keep_record = _cfg['keep_record']
_cleanup_hour = _cfg['cleanup_hour']
_cleanup_minute = _cfg['cleanup_minute']
if not ((30 <= _keep_log <= 365) or _keep_log == 0):
return self.write_json(TPE_PARAM, '系统日志保留时间超出范围!')
if not ((30 <= _keep_record <= 365) or _keep_record == 0):
return self.write_json(TPE_PARAM, '会话录像保留时间超出范围!')
err = system_model.save_config(self, '更新存储策略设置', 'storage', _cfg)
if err == TPE_OK:
tp_cfg().sys.storage.keep_log = _keep_log
tp_cfg().sys.storage.keep_record = _keep_record
tp_cfg().sys.storage.cleanup_hour = _cleanup_hour
tp_cfg().sys.storage.cleanup_minute = _cleanup_minute
else:
return self.write_json(err)
if 'ldap' in args:
processed = True
_cfg = args['ldap']
# _password = _cfg['password']
_server = _cfg['server']
_port = _cfg['port']
_use_ssl = _cfg['use_ssl']
_domain = _cfg['domain']
_admin = _cfg['admin']
_base_dn = _cfg['base_dn']
_filter = _cfg['filter']
_attr_username = _cfg['attr_username']
_attr_surname = _cfg['attr_surname']
_attr_email = _cfg['attr_email']
if len(_cfg['password']) == 0:
_cfg['password'] = tp_cfg().sys_ldap_password
if len(_cfg['password']) == 0:
return self.write_json(TPE_PARAM, '请设置LDAP管理员密码')
# TODO: encrypt the password before save by core-service.
err = system_model.save_config(self, '更新LDAP设置', 'ldap', _cfg)
if err == TPE_OK:
tp_cfg().sys.ldap.server = _server
tp_cfg().sys.ldap.port = _port
tp_cfg().sys.ldap.use_ssl = _use_ssl
tp_cfg().sys.ldap.domain = _domain
tp_cfg().sys.ldap.admin = _admin
tp_cfg().sys.ldap.base_dn = _base_dn
tp_cfg().sys.ldap.filter = _filter
tp_cfg().sys.ldap.attr_username = _attr_username
tp_cfg().sys.ldap.attr_surname = _attr_surname
tp_cfg().sys.ldap.attr_email = _attr_email
# 特殊处理,防止前端拿到密码
tp_cfg().sys_ldap_password = _cfg['password']
else:
return self.write_json(err)
if not processed:
return self.write_json(TPE_PARAM)
return self.write_json(TPE_OK)
except:
log.e('\n')
self.write_json(TPE_FAILED)
class DoSendTestMailHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_CONFIG)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
_server = args['server']
_port = int(args['port'])
_ssl = args['ssl']
_sender = args['sender']
_password = args['password']
_recipient = args['recipient']
except:
return self.write_json(TPE_PARAM)
code, msg = yield mail.tp_send_mail(
_recipient,
'您好!\n\n这是一封测试邮件,仅用于验证系统的邮件发送模块工作是否正常。\n\n请忽略本邮件。',
subject='测试邮件',
sender='Teleport Server <{}>'.format(_sender),
server=_server,
port=_port,
use_ssl=_ssl,
username=_sender,
password=_password
)
self.write_json(code, message=msg)
class DoLdapListUserAttrHandler(TPBaseJsonHandler):
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_USER_CREATE)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
cfg = args['ldap']
cfg['port'] = int(cfg['port'])
if len(cfg['password']) == 0:
if len(tp_cfg().sys_ldap_password) == 0:
return self.write_json(TPE_PARAM, message='需要设置LDAP管理员密码')
else:
cfg['password'] = tp_cfg().sys_ldap_password
except:
return self.write_json(TPE_PARAM)
try:
ldap = Ldap(cfg['server'], cfg['port'], cfg['base_dn'], cfg['use_ssl'])
ret, data, err_msg = ldap.get_all_attr(cfg['admin'], cfg['password'], cfg['filter'])
if ret != TPE_OK:
return self.write_json(ret, message=err_msg)
else:
return self.write_json(ret, data=data)
except:
log.e('')
return self.write_json(TPE_PARAM)
class DoLdapConfigTestHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_USER_CREATE)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
cfg = args['ldap']
cfg['port'] = int(cfg['port'])
if len(cfg['password']) == 0:
if len(tp_cfg().sys_ldap_password) == 0:
return self.write_json(TPE_PARAM, message='需要设置LDAP管理员密码')
else:
cfg['password'] = tp_cfg().sys_ldap_password
except:
return self.write_json(TPE_PARAM)
try:
ldap = Ldap(cfg['server'], cfg['port'], cfg['base_dn'], cfg['use_ssl'])
ret, data, err_msg = ldap.list_users(
cfg['admin'], cfg['password'], cfg['filter'],
cfg['attr_username'], cfg['attr_surname'], cfg['attr_email'],
size_limit=10
)
if ret != TPE_OK:
return self.write_json(ret, message=err_msg)
else:
return self.write_json(ret, data=data)
except:
log.e('')
return self.write_json(TPE_PARAM)
class DoLdapGetUsersHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_USER_CREATE)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
if len(tp_cfg().sys_ldap_password) == 0:
return self.write_json(TPE_PARAM, message='LDAP未能正确配置,需要管理员密码')
else:
_password = tp_cfg().sys_ldap_password
_server = tp_cfg().sys.ldap.server
_port = tp_cfg().sys.ldap.port
_use_ssl = tp_cfg().sys.ldap.use_ssl
_admin = tp_cfg().sys.ldap.admin
_base_dn = tp_cfg().sys.ldap.base_dn
_filter = tp_cfg().sys.ldap.filter
_attr_username = tp_cfg().sys.ldap.attr_username
_attr_surname = tp_cfg().sys.ldap.attr_surname
_attr_email = tp_cfg().sys.ldap.attr_email
except:
return self.write_json(TPE_PARAM)
try:
ldap = Ldap(_server, _port, _base_dn, _use_ssl)
ret, data, err_msg = ldap.list_users(_admin, _password, _filter, _attr_username, _attr_surname, _attr_email)
if ret != TPE_OK:
return self.write_json(ret, message=err_msg)
exists_users = user.get_users_by_type(TP_USER_TYPE_LDAP)
bound_users = []
if exists_users is not None:
for u in exists_users:
h = hashlib.sha1()
h.update(u['ldap_dn'].encode())
bound_users.append(h.hexdigest())
ret_data = []
for u in data:
h = hashlib.sha1()
h.update(u.encode())
_id = h.hexdigest()
if _id in bound_users:
continue
_user = data[u]
_user['id'] = h.hexdigest()
ret_data.append(_user)
return self.write_json(ret, data=ret_data)
except:
log.e('')
return self.write_json(TPE_PARAM)
class DoLdapImportHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_USER_CREATE)
if ret != TPE_OK:
return
args = self.get_argument('args', None)
if args is None:
return self.write_json(TPE_PARAM)
try:
args = json.loads(args)
except:
return self.write_json(TPE_JSON_FORMAT)
try:
dn_hash_list = args['ldap_users']
if len(tp_cfg().sys_ldap_password) == 0:
return self.write_json(TPE_PARAM, message='LDAP未能正确配置,需要管理员密码')
else:
_password = tp_cfg().sys_ldap_password
_server = tp_cfg().sys.ldap.server
_port = tp_cfg().sys.ldap.port
_use_ssl = tp_cfg().sys.ldap.use_ssl
_admin = tp_cfg().sys.ldap.admin
_base_dn = tp_cfg().sys.ldap.base_dn
_filter = tp_cfg().sys.ldap.filter
_attr_username = tp_cfg().sys.ldap.attr_username
_attr_surname = tp_cfg().sys.ldap.attr_surname
_attr_email = tp_cfg().sys.ldap.attr_email
except:
return self.write_json(TPE_PARAM)
try:
ldap = Ldap(_server, _port, _base_dn, _use_ssl)
ret, data, err_msg = ldap.list_users(_admin, _password, _filter, _attr_username, _attr_surname, _attr_email)
if ret != TPE_OK:
return self.write_json(ret, message=err_msg)
need_import = []
for u in data:
h = hashlib.sha1()
h.update(u.encode())
dn_hash = h.hexdigest()
for x in dn_hash_list:
if x == dn_hash:
_user = data[u]
_user['dn'] = u
need_import.append(_user)
break
if len(need_import) == 0:
return self.write_json(ret, message='没有可以导入的LDAP用户')
return self._do_import(need_import)
except:
log.e('')
return self.write_json(TPE_PARAM)
def _do_import(self, users):
success = list()
failed = list()
try:
user_list = []
for _u in users:
if 'surname' not in _u:
_u['surname'] = _u['username']
if 'email' not in _u:
_u['email'] = ''
u = dict()
u['_line'] = 0
u['_id'] = 0
u['type'] = TP_USER_TYPE_LDAP
u['ldap_dn'] = _u['dn']
u['username'] = '{}@{}'.format(_u['username'], tp_cfg().sys.ldap.domain)
u['surname'] = _u['surname']
u['email'] = _u['email']
u['mobile'] = ''
u['qq'] = ''
u['wechat'] = ''
u['desc'] = ''
u['password'] = ''
# fix
if len(u['surname']) == 0:
u['surname'] = u['username']
u['username'] = u['username'].lower()
user_list.append(u)
print(user_list)
user.create_users(self, user_list, success, failed)
# 对于创建成功的用户,发送密码邮件函
sys_smtp_password = tp_cfg().sys_smtp_password
if len(sys_smtp_password) > 0:
web_url = '{}://{}'.format(self.request.protocol, self.request.host)
for u in user_list:
if u['_id'] == 0 or len(u['email']) == 0:
continue
mail_body = '{surname} 您好!\n\n已为您创建teleport系统用户账号,现在可以使用以下信息登录teleport系统:\n\n' \
'登录用户名:{username}\n' \
'密码:您正在使用的域登录密码\n' \
'地址:{web_url}\n\n\n\n' \
'[本邮件由teleport系统自动发出,请勿回复]' \
'\n\n' \
''.format(surname=u['surname'], username=u['username'], web_url=web_url)
err, msg = yield mail.tp_send_mail(u['email'], mail_body, subject='用户密码函')
if err != TPE_OK:
failed.append({'line': u['_line'], 'error': '无法发送密码函到邮箱 {},错误:{}。'.format(u['email'], msg)})
# 统计结果
total_success = 0
total_failed = 0
for u in user_list:
if u['_id'] == 0:
total_failed += 1
else:
total_success += 1
# 生成最终结果信息
if len(failed) == 0:
# ret['code'] = TPE_OK
# ret['message'] = '共导入 {} 个用户账号!'.format(total_success)
return self.write_json(TPE_OK, message='共导入 {} 个用户账号!'.format(total_success))
else:
# ret['code'] = TPE_FAILED
msg = ''
if total_success > 0:
msg = '{} 个用户账号导入成功,'.format(total_success)
if total_failed > 0:
msg += '{} 个用户账号未能导入!'.format(total_failed)
# ret['data'] = failed
return self.write_json(TPE_FAILED, data=failed, message=msg)
except:
log.e('got exception when import LDAP user.\n')
# ret['code'] = TPE_FAILED
msg = ''
if len(success) > 0:
msg += '{} 个用户账号导入后发生异常!'.format(len(success))
else:
msg = '发生异常!'
# ret['data'] = failed
return self.write_json(TPE_FAILED, data=failed, message=msg)
class DoCleanupStorageHandler(TPBaseJsonHandler):
@tornado.gen.coroutine
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_SYS_CONFIG)
if ret != TPE_OK:
return
code, msg = yield record.cleanup_storage(self)
self.write_json(code, data=msg)
class DoRebuildOpsAuzMapHandler(TPBaseJsonHandler):
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_OPS_AUZ)
if ret != TPE_OK:
return
err = audit.build_auz_map()
self.write_json(err)
class DoRebuildAuditAuzMapHandler(TPBaseJsonHandler):
def post(self):
ret = self.check_privilege(TP_PRIVILEGE_AUDIT_AUZ)
if ret != TPE_OK:
return
err = ops.build_auz_map()
self.write_json(err)
| 35.396684 | 138 | 0.501099 |
9e201473bcb6a7dbd0590f53e51e82495cf80c27 | 4,447 | py | Python | Chapter17/05_how_to_optimize_a_NN_architecture.py | kksonge/Hands-On-Machine-Learning-for-Algorithmic-Trading | 01516a70f777c78673feaac16a477b457d0381b4 | [
"MIT"
] | 944 | 2019-01-06T22:44:31.000Z | 2022-03-30T14:47:33.000Z | Chapter17/05_how_to_optimize_a_NN_architecture.py | kksonge/Hands-On-Machine-Learning-for-Algorithmic-Trading | 01516a70f777c78673feaac16a477b457d0381b4 | [
"MIT"
] | 14 | 2019-01-05T08:34:25.000Z | 2021-11-15T01:11:54.000Z | Chapter17/05_how_to_optimize_a_NN_architecture.py | Nnamdi-sys/Machine-Learning-for-Algo-Trading | 9082adece2bbdc6375eb58e2253addf0cbbdfd07 | [
"MIT"
] | 598 | 2018-12-09T00:09:50.000Z | 2022-03-18T10:21:13.000Z | # coding: utf-8
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from joblib import dump
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.metrics import roc_auc_score
import tensorflow as tf
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.layers import Dense, Dropout, Activation
from keras.callbacks import EarlyStopping, TensorBoard
np.random.seed(42)
data = pd.read_hdf('data.h5', 'returns')
test_data = data['2017':]
X_train = data[:'2016'].drop('label', axis=1)
y_train = data[:'2016'].label
del data
input_dim = X_train.shape[1]
def auc_roc(y_true, y_pred):
# any tensorflow metric
value, update_op = tf.metrics.auc(y_true, y_pred)
# find all variables created for this metric
metric_vars = [i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]]
# Add metric variables to GLOBAL_VARIABLES collection.
# They will be initialized for new session.
for v in metric_vars:
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
# force to update metric values
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
def make_model(dense_layers, activation, dropout):
'''Creates a multi-layer perceptron model
dense_layers: List of layer sizes; one number per layer
'''
model = Sequential()
for i, layer_size in enumerate(dense_layers, 1):
if i == 1:
model.add(Dense(layer_size, input_dim=input_dim))
model.add(Activation(activation))
else:
model.add(Dense(layer_size))
model.add(Activation(activation))
model.add(Dropout(dropout))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='Adam',
metrics=['binary_accuracy', auc_roc])
return model
clf = KerasClassifier(make_model, epochs=10, batch_size=32)
class OneStepTimeSeriesSplit:
"""Generates tuples of train_idx, test_idx pairs
Assumes the index contains a level labeled 'date'"""
def __init__(self, n_splits=3, test_period_length=1, shuffle=False):
self.n_splits = n_splits
self.test_period_length = test_period_length
self.shuffle = shuffle
self.test_end = n_splits * test_period_length
@staticmethod
def chunks(l, chunk_size):
for i in range(0, len(l), chunk_size):
yield l[i:i + chunk_size]
def split(self, X, y=None, groups=None):
unique_dates = (X.index
.get_level_values('date')
.unique()
.sort_values(ascending=False)[:self.test_end])
dates = X.reset_index()[['date']]
for test_date in self.chunks(unique_dates, self.test_period_length):
train_idx = dates[dates.date < min(test_date)].index
test_idx = dates[dates.date.isin(test_date)].index
if self.shuffle:
np.random.shuffle(list(train_idx))
yield train_idx, test_idx
def get_n_splits(self, X, y, groups=None):
return self.n_splits
cv = OneStepTimeSeriesSplit(n_splits=12)
param_grid = {'dense_layers': [[32], [32, 32], [64], [64, 64], [64, 64, 32], [64, 32], [128]],
'activation' : ['relu', 'tanh'],
'dropout' : [.25, .5, .75],
}
gs = GridSearchCV(estimator=clf,
param_grid=param_grid,
scoring='roc_auc',
cv=cv,
refit=True,
return_train_score=True,
n_jobs=-1,
verbose=1,
error_score=np.nan
)
fit_params = dict(callbacks=[EarlyStopping(monitor='auc_roc', patience=300, verbose=1, mode='max')],
verbose=2,
epochs=50)
gs.fit(X=X_train.astype(float), y=y_train, **fit_params)
print('\nBest Score: {:.2%}'.format(gs.best_score_))
print('Best Params:\n', pd.Series(gs.best_params_))
dump(gs, 'gs.joblib')
gs.best_estimator_.model.save('best_model.h5')
pd.DataFrame(gs.cv_results_).to_csv('cv_results.csv', index=False)
y_pred = gs.best_estimator_.model.predict(test_data.drop('label', axis=1))
print(roc_auc_score(y_true=test_data.label, y_score=y_pred))
| 31.316901 | 100 | 0.638408 |
e372b1cbb27f9c8cd4d0cef757a1712f7fcf501c | 148 | py | Python | mall_spider/utils/__init__.py | 524243642/taobao_spider | 9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e | [
"Unlicense"
] | 12 | 2019-06-06T12:23:08.000Z | 2021-06-15T17:50:07.000Z | mall_spider/utils/__init__.py | 524243642/mall_spider | 9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e | [
"Unlicense"
] | 3 | 2021-03-31T19:02:47.000Z | 2022-02-11T03:43:15.000Z | mall_spider/utils/__init__.py | 524243642/taobao_spider | 9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e | [
"Unlicense"
] | 5 | 2019-09-17T03:55:56.000Z | 2020-12-18T03:34:03.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
default_connect_timeout = 5.0
default_timeout = 10.0
default_retry = 3
default_retry_interval = 0.1
| 18.5 | 29 | 0.722973 |
bf6997fd55c0c8984972d5635f501dec8cd184ed | 9,526 | py | Python | integration-testing/test/test_bonding.py | srinivasreddy/CasperLabs | bb299b3257d8291ccf713e4a27d62d7d486582f5 | [
"Apache-2.0"
] | null | null | null | integration-testing/test/test_bonding.py | srinivasreddy/CasperLabs | bb299b3257d8291ccf713e4a27d62d7d486582f5 | [
"Apache-2.0"
] | null | null | null | integration-testing/test/test_bonding.py | srinivasreddy/CasperLabs | bb299b3257d8291ccf713e4a27d62d7d486582f5 | [
"Apache-2.0"
] | null | null | null | from test.cl_node.common import BONDING_CONTRACT, UNBONDING_CONTRACT
from test.cl_node.client_parser import parse_show_block
from test.cl_node.client_parser import parse_show_blocks
from test.cl_node.casperlabs_network import OneNodeNetwork
from typing import List
def bond_to_the_network(network: OneNodeNetwork, contract: str, bond_amount: int):
network.add_new_node_to_network()
assert len(network.docker_nodes) == 2, "Total number of nodes should be 2."
node0, node1 = network.docker_nodes
block_hash = node1.bond(
session_contract=contract, payment_contract=contract, amount=bond_amount
)
return block_hash
def assert_pre_state_of_network(network: OneNodeNetwork, stakes: List[int]):
node0 = network.docker_nodes[0]
blocks = parse_show_blocks(node0.client.show_blocks(1000))
assert len(blocks) == 1
genesis_block = blocks[0]
item = list(
filter(
lambda x: x.stake in stakes
and x.validator_public_key == node0.from_address,
genesis_block.summary.header.state.bonds,
)
)
assert len(item) == 0
def test_bonding(one_node_network_fn):
"""
Feature file: consensus.feature
Scenario: Bonding a validator node to an existing network.
"""
bonding_amount = 1
assert_pre_state_of_network(one_node_network_fn, [bonding_amount])
block_hash = bond_to_the_network(
one_node_network_fn, BONDING_CONTRACT, bonding_amount
)
node0, node1 = one_node_network_fn.docker_nodes
assert block_hash is not None
r = node1.client.show_deploys(block_hash)[0]
assert r.is_error is False
assert r.error_message == ""
block1 = node1.client.show_block(block_hash)
block_ds = parse_show_block(block1)
public_key = node1.genesis_account.public_key_hex
item = list(
filter(
lambda x: x.stake == bonding_amount
and x.validator_public_key == public_key,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 1
def test_double_bonding(one_node_network_fn):
"""
Feature file: consensus.feature
Scenario: Bonding a validator node twice to an existing network.
"""
bonding_amount = 1
stakes = [1, 2]
assert_pre_state_of_network(one_node_network_fn, stakes)
block_hash = bond_to_the_network(
one_node_network_fn, BONDING_CONTRACT, bonding_amount
)
assert block_hash is not None
node1 = one_node_network_fn.docker_nodes[1]
block_hash = node1.bond(
session_contract=BONDING_CONTRACT,
payment_contract=BONDING_CONTRACT,
amount=bonding_amount,
)
assert block_hash is not None
r = node1.client.show_deploys(block_hash)[0]
assert r.is_error is False
assert r.error_message == ""
node1 = one_node_network_fn.docker_nodes[1]
block1 = node1.client.show_block(block_hash)
block_ds = parse_show_block(block1)
public_key = node1.genesis_account.public_key_hex
item = list(
filter(
lambda x: x.stake == bonding_amount + bonding_amount
and x.validator_public_key == public_key,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 1
def test_invalid_bonding(one_node_network_fn):
"""
Feature file: consensus.feature
Scenario: Bonding a validator node to an existing network.
"""
# 190 is current total staked amount.
bonding_amount = (190 * 1000) + 1
block_hash = bond_to_the_network(
one_node_network_fn, BONDING_CONTRACT, bonding_amount
)
assert block_hash is not None
node1 = one_node_network_fn.docker_nodes[1]
block1 = node1.client.show_block(block_hash)
r = node1.client.show_deploys(block_hash)[0]
assert r.is_error is True
assert r.error_message == "Exit code: 5"
block_ds = parse_show_block(block1)
public_key = node1.genesis_account.public_key_hex
item = list(
filter(
lambda x: x.stake == bonding_amount
and x.validator_public_key == public_key,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 0
def test_unbonding(one_node_network_fn):
"""
Feature file: consensus.feature
Scenario: unbonding a bonded validator node from an existing network.
"""
bonding_amount = 1
assert_pre_state_of_network(one_node_network_fn, [bonding_amount])
block_hash = bond_to_the_network(
one_node_network_fn, BONDING_CONTRACT, bonding_amount
)
assert block_hash is not None
node1 = one_node_network_fn.docker_nodes[1]
public_key = node1.genesis_account.public_key_hex
block_hash2 = node1.unbond(
session_contract=UNBONDING_CONTRACT,
payment_contract=UNBONDING_CONTRACT,
maybe_amount=None,
)
assert block_hash2 is not None
r = node1.client.show_deploys(block_hash2)[0]
assert r.is_error is False
assert r.error_message == ""
block2 = node1.client.show_block(block_hash2)
block_ds = parse_show_block(block2)
item = list(
filter(
lambda x: x.stake == bonding_amount
and x.validator_public_key == public_key,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 0
def test_partial_amount_unbonding(one_node_network_fn):
"""
Feature file: consensus.feature
Scenario: unbonding a bonded validator node with partial bonding amount from an existing network.
"""
bonding_amount = 11
unbond_amount = 4
assert_pre_state_of_network(
one_node_network_fn,
[bonding_amount, unbond_amount, bonding_amount - unbond_amount],
)
block_hash = bond_to_the_network(
one_node_network_fn, BONDING_CONTRACT, bonding_amount
)
assert block_hash is not None
node1 = one_node_network_fn.docker_nodes[1]
public_key = node1.genesis_account.public_key_hex
block_hash2 = node1.unbond(
session_contract=UNBONDING_CONTRACT,
payment_contract=UNBONDING_CONTRACT,
maybe_amount=unbond_amount,
)
r = node1.client.show_deploys(block_hash2)[0]
assert r.is_error is False
assert r.error_message == ""
assert block_hash2 is not None
block2 = node1.client.show_block(block_hash2)
block_ds = parse_show_block(block2)
item = list(
filter(
lambda x: x.stake == bonding_amount - unbond_amount
and x.validator_public_key == public_key,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 1
def test_invalid_unbonding(one_node_network_fn):
"""
Feature file: consensus.feature
Scenario: unbonding a bonded validator node from an existing network.
"""
bonding_amount = 2000
assert_pre_state_of_network(one_node_network_fn, [bonding_amount])
block_hash = bond_to_the_network(
one_node_network_fn, BONDING_CONTRACT, bonding_amount
)
assert block_hash is not None
node1 = one_node_network_fn.docker_nodes[1]
block_hash2 = node1.unbond(
session_contract=UNBONDING_CONTRACT,
payment_contract=UNBONDING_CONTRACT,
maybe_amount=1985, # 1985 > (2000+190) * 0.9
)
assert block_hash2 is not None
r = node1.client.show_deploys(block_hash2)[0]
assert r.is_error is True
assert r.error_message == "Exit code: 6"
block2 = node1.client.show_block(block_hash2)
block_ds = parse_show_block(block2)
item = list(
filter(
lambda x: x.stake == bonding_amount
and x.validator_public_key == node1.genesis_account.public_key_hex,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 1
block_hash2 = node1.unbond(
session_contract=UNBONDING_CONTRACT,
payment_contract=UNBONDING_CONTRACT,
maybe_amount=None,
)
assert block_hash2 is not None
r = node1.client.show_deploys(block_hash2)[0]
assert r.is_error is True
assert r.error_message == "Exit code: 6"
block2 = node1.client.show_block(block_hash2)
block_ds = parse_show_block(block2)
item = list(
filter(
lambda x: x.stake == bonding_amount
and x.validator_public_key == node1.genesis_account.public_key_hex,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 1
def test_unbonding_without_bonding(one_node_network_fn):
"""
Feature file: consensus.feature
Scenario: unbonding a validator node which was not bonded to an existing network.
"""
bonding_amount = 1
assert_pre_state_of_network(one_node_network_fn, [bonding_amount])
one_node_network_fn.add_new_node_to_network()
assert (
len(one_node_network_fn.docker_nodes) == 2
), "Total number of nodes should be 2."
node0, node1 = one_node_network_fn.docker_nodes[:2]
public_key = node1.genesis_account.public_key_hex
block_hash = node1.unbond(
session_contract=UNBONDING_CONTRACT,
payment_contract=UNBONDING_CONTRACT,
maybe_amount=None,
)
assert block_hash is not None
r = node1.client.show_deploys(block_hash)[0]
assert r.is_error is True
assert r.error_message == "Exit code: 0"
block2 = node1.client.show_block(block_hash)
block_ds = parse_show_block(block2)
item = list(
filter(
lambda x: x.validator_public_key == public_key,
block_ds.summary.header.state.bonds,
)
)
assert len(item) == 0
| 32.623288 | 101 | 0.691686 |
4e638bb76013276f2e3f5a1aaad71bcca78e2bf1 | 1,396 | py | Python | py_cron_expression/util.py | vubon/PyCronExpression | 3d8dae321b3f243cae05101c85a1f367dd118e31 | [
"MIT"
] | 3 | 2020-05-03T06:49:18.000Z | 2020-05-28T12:09:53.000Z | py_cron_expression/util.py | vubon/PyCronExpression | 3d8dae321b3f243cae05101c85a1f367dd118e31 | [
"MIT"
] | null | null | null | py_cron_expression/util.py | vubon/PyCronExpression | 3d8dae321b3f243cae05101c85a1f367dd118e31 | [
"MIT"
] | null | null | null | """
@since: 26 may, 2020
# To define the time you can provide concrete values for
# minute (m), hour (h), day of month (dom), month (mon),
# and day of week (dow) or use '*' in these fields (for 'any').#
# Notice that tasks will be started based on the cron's system
# daemon's notion of time and timezones.
Linux platform cron job definition
# .---------------- minute (0 - 59)
# | .------------- hour (0 - 23)
# | | .---------- day of month (1 - 31)
# | | | .------- month (1 - 12) OR jan,feb,mar,apr ...
# | | | | .---- day of week (0 - 6) (Sunday=0 or 7)
# | | | | |
# * * * * *
AWS platform cron job definition
# .---------------------- minute (0 - 59)
# | .------------------- hour (0 - 23)
# | | .---------------- day of month (1 - 31)
# | | | .------------- month (1 - 12) OR jan,feb,mar,apr ...
# | | | | .---------- day of week (0 - 6) (Sunday=0 or 7)
# | | | | | .-------- year
# | | | | | |
# * * * * ? *
aws docs link: https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html
"""
PLATFORMS = {
"linux": "* * * * *",
"aws": "* * * * ? *"
}
def remove_zero(number: str) -> str:
"""
:param number:
:return:
"""
if number.startswith("0") and number.endswith("0"):
return "0"
elif number.startswith("0"):
return number.replace("0", "")
else:
return number
| 29.702128 | 94 | 0.47063 |
b75af80e8bd7586d1657caf2b4802aafad2cd822 | 1,537 | py | Python | lab-server/controller/setup.py | CoderDojoTC/python-minecraft | d78ff6d30e47b887f5db12f23de81dd716576c0b | [
"MIT"
] | 31 | 2015-01-30T22:30:55.000Z | 2022-02-12T17:10:26.000Z | lab-server/controller/setup.py | mikemccllstr/mikemccllstr-python-minecraft | b1765ad7bb39dfad00944a7d8fa914484c88f95a | [
"MIT"
] | 16 | 2015-01-20T23:56:43.000Z | 2016-01-03T04:14:04.000Z | lab-server/controller/setup.py | mikemccllstr/mikemccllstr-python-minecraft | b1765ad7bb39dfad00944a7d8fa914484c88f95a | [
"MIT"
] | 24 | 2015-02-21T22:52:43.000Z | 2021-12-14T00:18:34.000Z | #!/usr/bin/env python
import lsc
from setuptools import setup, find_packages
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = ''
setup(
name='lsc',
version=lsc.__version__,
packages=find_packages(),
description='CoderDojo Twin Cities, Python-Minecraft, Lab Server Controller',
long_description=long_description,
author='Mike McCallister',
author_email='mike@mccllstr.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
platforms=['Any'],
install_requires=[
'cliff',
'cliff-tablib',
'docker-py',
'gspread',
'csvkit',
],
entry_points={
'console_scripts': [
'lsc = lsc.shell:main',
],
'lab_server_controller': [
'test = lsc.commands.environment:Test',
'show = lsc.commands.lab:Show',
'process-commands = lsc.commands.lab:ProcessCommands',
],
'cliff.formatter.list': [
'unicsv = cliffuni.formatters:UniCsvFormatter',
],
'cliff.formatter.show': [
'unicsv = cliffuni.formatters:UniCsvFormatter',
],
},
)
| 22.940299 | 81 | 0.579701 |
848d5d6804e7a4e9e8a8cb6560f112bfabbb8669 | 2,173 | py | Python | server/helper-code/visualizer-tensorboard/w2v_visualizer.py | malakhovks/word2cluster | 5d191a6dff1bea9d1e4ad024a004f7a2a4f1f130 | [
"MIT"
] | 4 | 2021-01-10T13:42:20.000Z | 2021-08-01T12:14:48.000Z | server/helper-code/visualizer-tensorboard/w2v_visualizer.py | malakhovks/word2cluster | 5d191a6dff1bea9d1e4ad024a004f7a2a4f1f130 | [
"MIT"
] | null | null | null | server/helper-code/visualizer-tensorboard/w2v_visualizer.py | malakhovks/word2cluster | 5d191a6dff1bea9d1e4ad024a004f7a2a4f1f130 | [
"MIT"
] | null | null | null | import sys
import os
import pathlib
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def visualize(model, output_path):
meta_file = "w2x_metadata.tsv"
placeholder = np.zeros((len(model.wv.index2word), model.vector_size))
with open(os.path.join(output_path, meta_file), 'wb') as file_metadata:
for i, word in enumerate(model.wv.index2word):
placeholder[i] = model[word]
# temporary solution for https://github.com/tensorflow/tensorflow/issues/9094
if word == '':
print("Emply Line, should replecaed by any thing else, or will cause a bug of tensorboard")
file_metadata.write("{0}".format('<Empty Line>').encode('utf-8') + b'\n')
else:
file_metadata.write("{0}".format(word).encode('utf-8') + b'\n')
# define the model without training
sess = tf.InteractiveSession()
embedding = tf.Variable(placeholder, trainable=False, name='w2x_metadata')
tf.global_variables_initializer().run()
saver = tf.train.Saver()
writer = tf.summary.FileWriter(output_path, sess.graph)
# adding into projector
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = 'w2x_metadata'
embed.metadata_path = meta_file
# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)
saver.save(sess, os.path.join(output_path, 'w2x_metadata.ckpt'))
print('Run `tensorboard --logdir={0}` to run visualize result on tensorboard'.format(output_path))
if __name__ == "__main__":
"""
Use model.save_word2vec_format to save w2v_model as word2evc format
Then just run `python w2v_visualizer.py word2vec.text visualize_result`
"""
try:
model_path = sys.argv[1]
output_path = sys.argv[2]
except:
print("Please provide model path and output path")
model = KeyedVectors.load_word2vec_format(model_path)
pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
visualize(model, output_path) | 38.122807 | 107 | 0.693051 |
ece3e1e6d527304ffc1d83612a8c6c8466c63b64 | 1,673 | py | Python | tests/cluster_tests/InternalParallel/InternalParallelExtModel/lorentzAttractor.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 159 | 2017-03-24T21:07:06.000Z | 2022-03-20T13:44:40.000Z | tests/cluster_tests/InternalParallel/InternalParallelExtModel/lorentzAttractor.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 1,667 | 2017-03-27T14:41:22.000Z | 2022-03-31T19:50:06.000Z | tests/cluster_tests/InternalParallel/InternalParallelExtModel/lorentzAttractor.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 95 | 2017-03-24T21:05:03.000Z | 2022-03-08T17:30:22.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' from wikipedia: dx/dt = sigma*(y-x) ; dy/dt = x*(rho-z)-y dz/dt = x*y-beta*z ; '''
import time
import numpy as np
#import pylab as pyl
#import random
#import mpl_toolkits.mplot3d.axes3d as p3
def initialize(self,runInfoDict,inputFiles):
self.sigma = 10.0
self.rho = 28.0
self.beta = 8.0/3.0
return
def run(self,Input):
max_time = 0.03
t_step = 0.01
time.sleep(0.0005)
numberTimeSteps = int(max_time/t_step)
self.x = np.zeros(numberTimeSteps)
self.y = np.zeros(numberTimeSteps)
self.z = np.zeros(numberTimeSteps)
self.time = np.zeros(numberTimeSteps)
self.x0 = Input['x0']
self.y0 = Input['y0']
self.z0 = Input['z0']
self.x[0] = Input['x0']
self.y[0] = Input['y0']
self.z[0] = Input['z0']
self.time[0]= 0
for t in range (numberTimeSteps-1):
self.time[t+1] = self.time[t] + t_step
self.x[t+1] = self.x[t] + self.sigma*(self.y[t]-self.x[t]) * t_step
self.y[t+1] = self.y[t] + (self.x[t]*(self.rho-self.z[t])-self.y[t]) * t_step
self.z[t+1] = self.z[t] + (self.x[t]*self.y[t]-self.beta*self.z[t]) * t_step
| 31.566038 | 89 | 0.673042 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.