repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
Clinical-Genomics/trailblazer | trailblazer/server/api.py | analyses | def analyses():
"""Display analyses."""
per_page = int(request.args.get('per_page', 50))
page = int(request.args.get('page', 1))
query = store.analyses(status=request.args.get('status'),
query=request.args.get('query'),
is_visible=request.args.get('is_visible') == 'true' or None)
query_page = query.paginate(page, per_page=per_page)
data = []
for analysis_obj in query_page.items:
analysis_data = analysis_obj.to_dict()
analysis_data['user'] = analysis_obj.user.to_dict() if analysis_obj.user else None
analysis_data['failed_jobs'] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data.append(analysis_data)
return jsonify(analyses=data) | python | def analyses():
"""Display analyses."""
per_page = int(request.args.get('per_page', 50))
page = int(request.args.get('page', 1))
query = store.analyses(status=request.args.get('status'),
query=request.args.get('query'),
is_visible=request.args.get('is_visible') == 'true' or None)
query_page = query.paginate(page, per_page=per_page)
data = []
for analysis_obj in query_page.items:
analysis_data = analysis_obj.to_dict()
analysis_data['user'] = analysis_obj.user.to_dict() if analysis_obj.user else None
analysis_data['failed_jobs'] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data.append(analysis_data)
return jsonify(analyses=data) | [
"def",
"analyses",
"(",
")",
":",
"per_page",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'per_page'",
",",
"50",
")",
")",
"page",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'page'",
",",
"1",
")",
")",
"query",
"=",
"store",
".",
"analyses",
"(",
"status",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'status'",
")",
",",
"query",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'query'",
")",
",",
"is_visible",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'is_visible'",
")",
"==",
"'true'",
"or",
"None",
")",
"query_page",
"=",
"query",
".",
"paginate",
"(",
"page",
",",
"per_page",
"=",
"per_page",
")",
"data",
"=",
"[",
"]",
"for",
"analysis_obj",
"in",
"query_page",
".",
"items",
":",
"analysis_data",
"=",
"analysis_obj",
".",
"to_dict",
"(",
")",
"analysis_data",
"[",
"'user'",
"]",
"=",
"analysis_obj",
".",
"user",
".",
"to_dict",
"(",
")",
"if",
"analysis_obj",
".",
"user",
"else",
"None",
"analysis_data",
"[",
"'failed_jobs'",
"]",
"=",
"[",
"job_obj",
".",
"to_dict",
"(",
")",
"for",
"job_obj",
"in",
"analysis_obj",
".",
"failed_jobs",
"]",
"data",
".",
"append",
"(",
"analysis_data",
")",
"return",
"jsonify",
"(",
"analyses",
"=",
"data",
")"
] | Display analyses. | [
"Display",
"analyses",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/server/api.py#L27-L43 |
Clinical-Genomics/trailblazer | trailblazer/server/api.py | analysis | def analysis(analysis_id):
"""Display a single analysis."""
analysis_obj = store.analysis(analysis_id)
if analysis_obj is None:
return abort(404)
if request.method == 'PUT':
analysis_obj.update(request.json)
store.commit()
data = analysis_obj.to_dict()
data['failed_jobs'] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data['user'] = analysis_obj.user.to_dict() if analysis_obj.user else None
return jsonify(**data) | python | def analysis(analysis_id):
"""Display a single analysis."""
analysis_obj = store.analysis(analysis_id)
if analysis_obj is None:
return abort(404)
if request.method == 'PUT':
analysis_obj.update(request.json)
store.commit()
data = analysis_obj.to_dict()
data['failed_jobs'] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data['user'] = analysis_obj.user.to_dict() if analysis_obj.user else None
return jsonify(**data) | [
"def",
"analysis",
"(",
"analysis_id",
")",
":",
"analysis_obj",
"=",
"store",
".",
"analysis",
"(",
"analysis_id",
")",
"if",
"analysis_obj",
"is",
"None",
":",
"return",
"abort",
"(",
"404",
")",
"if",
"request",
".",
"method",
"==",
"'PUT'",
":",
"analysis_obj",
".",
"update",
"(",
"request",
".",
"json",
")",
"store",
".",
"commit",
"(",
")",
"data",
"=",
"analysis_obj",
".",
"to_dict",
"(",
")",
"data",
"[",
"'failed_jobs'",
"]",
"=",
"[",
"job_obj",
".",
"to_dict",
"(",
")",
"for",
"job_obj",
"in",
"analysis_obj",
".",
"failed_jobs",
"]",
"data",
"[",
"'user'",
"]",
"=",
"analysis_obj",
".",
"user",
".",
"to_dict",
"(",
")",
"if",
"analysis_obj",
".",
"user",
"else",
"None",
"return",
"jsonify",
"(",
"*",
"*",
"data",
")"
] | Display a single analysis. | [
"Display",
"a",
"single",
"analysis",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/server/api.py#L47-L60 |
ozgurgunes/django-manifest | manifest/accounts/utils.py | get_gravatar | def get_gravatar(email, size=80, default='identicon'):
""" Get's a Gravatar for a email address.
:param size:
The size in pixels of one side of the Gravatar's square image.
Optional, if not supplied will default to ``80``.
:param default:
Defines what should be displayed if no image is found for this user.
Optional argument which defaults to ``identicon``. The argument can be
a URI to an image or one of the following options:
``404``
Do not load any image if none is associated with the email
hash, instead return an HTTP 404 (File Not Found) response.
``mm``
Mystery-man, a simple, cartoon-style silhouetted outline of a
person (does not vary by email hash).
``identicon``
A geometric pattern based on an email hash.
``monsterid``
A generated 'monster' with different colors, faces, etc.
``wavatar``
Generated faces with differing features and backgrounds
:return: The URI pointing to the Gravatar.
"""
if defaults.ACCOUNTS_GRAVATAR_SECURE:
base_url = 'https://secure.gravatar.com/avatar/'
else: base_url = 'http://www.gravatar.com/avatar/'
gravatar_url = '%(base_url)s%(gravatar_id)s?' % \
{'base_url': base_url,
'gravatar_id': hashlib.md5(email.lower().encode('utf-8')).hexdigest()}
gravatar_url += urllib.parse.urlencode({'s': str(size),
'd': default})
return gravatar_url | python | def get_gravatar(email, size=80, default='identicon'):
""" Get's a Gravatar for a email address.
:param size:
The size in pixels of one side of the Gravatar's square image.
Optional, if not supplied will default to ``80``.
:param default:
Defines what should be displayed if no image is found for this user.
Optional argument which defaults to ``identicon``. The argument can be
a URI to an image or one of the following options:
``404``
Do not load any image if none is associated with the email
hash, instead return an HTTP 404 (File Not Found) response.
``mm``
Mystery-man, a simple, cartoon-style silhouetted outline of a
person (does not vary by email hash).
``identicon``
A geometric pattern based on an email hash.
``monsterid``
A generated 'monster' with different colors, faces, etc.
``wavatar``
Generated faces with differing features and backgrounds
:return: The URI pointing to the Gravatar.
"""
if defaults.ACCOUNTS_GRAVATAR_SECURE:
base_url = 'https://secure.gravatar.com/avatar/'
else: base_url = 'http://www.gravatar.com/avatar/'
gravatar_url = '%(base_url)s%(gravatar_id)s?' % \
{'base_url': base_url,
'gravatar_id': hashlib.md5(email.lower().encode('utf-8')).hexdigest()}
gravatar_url += urllib.parse.urlencode({'s': str(size),
'd': default})
return gravatar_url | [
"def",
"get_gravatar",
"(",
"email",
",",
"size",
"=",
"80",
",",
"default",
"=",
"'identicon'",
")",
":",
"if",
"defaults",
".",
"ACCOUNTS_GRAVATAR_SECURE",
":",
"base_url",
"=",
"'https://secure.gravatar.com/avatar/'",
"else",
":",
"base_url",
"=",
"'http://www.gravatar.com/avatar/'",
"gravatar_url",
"=",
"'%(base_url)s%(gravatar_id)s?'",
"%",
"{",
"'base_url'",
":",
"base_url",
",",
"'gravatar_id'",
":",
"hashlib",
".",
"md5",
"(",
"email",
".",
"lower",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"}",
"gravatar_url",
"+=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"{",
"'s'",
":",
"str",
"(",
"size",
")",
",",
"'d'",
":",
"default",
"}",
")",
"return",
"gravatar_url"
] | Get's a Gravatar for a email address.
:param size:
The size in pixels of one side of the Gravatar's square image.
Optional, if not supplied will default to ``80``.
:param default:
Defines what should be displayed if no image is found for this user.
Optional argument which defaults to ``identicon``. The argument can be
a URI to an image or one of the following options:
``404``
Do not load any image if none is associated with the email
hash, instead return an HTTP 404 (File Not Found) response.
``mm``
Mystery-man, a simple, cartoon-style silhouetted outline of a
person (does not vary by email hash).
``identicon``
A geometric pattern based on an email hash.
``monsterid``
A generated 'monster' with different colors, faces, etc.
``wavatar``
Generated faces with differing features and backgrounds
:return: The URI pointing to the Gravatar. | [
"Get",
"s",
"a",
"Gravatar",
"for",
"a",
"email",
"address",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/utils.py#L10-L52 |
ozgurgunes/django-manifest | manifest/accounts/utils.py | login_redirect | def login_redirect(redirect=None, user=None):
"""
Redirect user after successful sign in.
First looks for a ``requested_redirect``. If not supplied will fall-back
to the user specific account page. If all fails, will fall-back to the
standard Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining
the URI to go next.
:param redirect:
A value normally supplied by ``next`` form field. Gets preference
before the default view which requires the user.
:param user:
A ``User`` object specifying the user who has just logged in.
:return: String containing the URI to redirect to.
"""
if redirect: return redirect
elif user is not None:
return defaults.ACCOUNTS_LOGIN_REDIRECT_URL % \
{'username': user.username}
else: return settings.LOGIN_REDIRECT_URL | python | def login_redirect(redirect=None, user=None):
"""
Redirect user after successful sign in.
First looks for a ``requested_redirect``. If not supplied will fall-back
to the user specific account page. If all fails, will fall-back to the
standard Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining
the URI to go next.
:param redirect:
A value normally supplied by ``next`` form field. Gets preference
before the default view which requires the user.
:param user:
A ``User`` object specifying the user who has just logged in.
:return: String containing the URI to redirect to.
"""
if redirect: return redirect
elif user is not None:
return defaults.ACCOUNTS_LOGIN_REDIRECT_URL % \
{'username': user.username}
else: return settings.LOGIN_REDIRECT_URL | [
"def",
"login_redirect",
"(",
"redirect",
"=",
"None",
",",
"user",
"=",
"None",
")",
":",
"if",
"redirect",
":",
"return",
"redirect",
"elif",
"user",
"is",
"not",
"None",
":",
"return",
"defaults",
".",
"ACCOUNTS_LOGIN_REDIRECT_URL",
"%",
"{",
"'username'",
":",
"user",
".",
"username",
"}",
"else",
":",
"return",
"settings",
".",
"LOGIN_REDIRECT_URL"
] | Redirect user after successful sign in.
First looks for a ``requested_redirect``. If not supplied will fall-back
to the user specific account page. If all fails, will fall-back to the
standard Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining
the URI to go next.
:param redirect:
A value normally supplied by ``next`` form field. Gets preference
before the default view which requires the user.
:param user:
A ``User`` object specifying the user who has just logged in.
:return: String containing the URI to redirect to. | [
"Redirect",
"user",
"after",
"successful",
"sign",
"in",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/utils.py#L54-L77 |
ozgurgunes/django-manifest | manifest/accounts/utils.py | generate_sha1 | def generate_sha1(string, salt=None):
"""
Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a
random string of 5 characters.
:return: Tuple containing the salt and hash.
"""
if not salt:
salt = hashlib.sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
hash = hashlib.sha1((salt+str(string)).encode('utf-8')).hexdigest()
return (salt, hash) | python | def generate_sha1(string, salt=None):
"""
Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a
random string of 5 characters.
:return: Tuple containing the salt and hash.
"""
if not salt:
salt = hashlib.sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
hash = hashlib.sha1((salt+str(string)).encode('utf-8')).hexdigest()
return (salt, hash) | [
"def",
"generate_sha1",
"(",
"string",
",",
"salt",
"=",
"None",
")",
":",
"if",
"not",
"salt",
":",
"salt",
"=",
"hashlib",
".",
"sha1",
"(",
"str",
"(",
"random",
".",
"random",
"(",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"5",
"]",
"hash",
"=",
"hashlib",
".",
"sha1",
"(",
"(",
"salt",
"+",
"str",
"(",
"string",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"(",
"salt",
",",
"hash",
")"
] | Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a
random string of 5 characters.
:return: Tuple containing the salt and hash. | [
"Generates",
"a",
"sha1",
"hash",
"for",
"supplied",
"string",
".",
"Doesn",
"t",
"need",
"to",
"be",
"very",
"secure",
"because",
"it",
"s",
"not",
"used",
"for",
"password",
"checking",
".",
"We",
"got",
"Django",
"for",
"that",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/utils.py#L79-L98 |
ozgurgunes/django-manifest | manifest/accounts/utils.py | get_datetime_now | def get_datetime_now():
"""
Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now().
"""
try:
from django.utils import timezone
return timezone.now()
except ImportError:
return datetime.datetime.now() | python | def get_datetime_now():
"""
Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now().
"""
try:
from django.utils import timezone
return timezone.now()
except ImportError:
return datetime.datetime.now() | [
"def",
"get_datetime_now",
"(",
")",
":",
"try",
":",
"from",
"django",
".",
"utils",
"import",
"timezone",
"return",
"timezone",
".",
"now",
"(",
")",
"except",
"ImportError",
":",
"return",
"datetime",
".",
"datetime",
".",
"now",
"(",
")"
] | Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now(). | [
"Returns",
"datetime",
"object",
"with",
"current",
"point",
"in",
"time",
"."
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/utils.py#L113-L127 |
dusktreader/py-buzz | examples/handle_errors.py | complex_handle_errors | def complex_handle_errors():
"""
This function demonstrates a more complex usage of the handle_errors
ctx_mgr. The following features are demonstrated::
* Handling a specific exception type with ``exception_class``
* Absorbing exceptions by setting ``re_raise`` to ``False``
* Branching with ``do_except``, ``do_else``, and ``do_finally``
"""
print("Demonstrating complex handle_errors example")
def _handler_function(err, final_message, trace):
"""
This function is a helper function for handling an exception from
the handle_errors ctx_mgr.
"""
print("Handling exception")
print("==================")
print("Final Message")
print("-------------")
print(final_message)
print("Traceback")
print("---------")
print(''.join(format_tb(trace)))
with HandleError.handle_errors(
"something went wrong ({example_type})",
example_type='complex example',
re_raise=False,
do_except=_handler_function,
do_else=lambda: print('No exception occurred'),
do_finally=lambda: print('Finished handling exceptions'),
):
print("we are fine")
raise ValueError("here we die")
print("we should not get here") | python | def complex_handle_errors():
"""
This function demonstrates a more complex usage of the handle_errors
ctx_mgr. The following features are demonstrated::
* Handling a specific exception type with ``exception_class``
* Absorbing exceptions by setting ``re_raise`` to ``False``
* Branching with ``do_except``, ``do_else``, and ``do_finally``
"""
print("Demonstrating complex handle_errors example")
def _handler_function(err, final_message, trace):
"""
This function is a helper function for handling an exception from
the handle_errors ctx_mgr.
"""
print("Handling exception")
print("==================")
print("Final Message")
print("-------------")
print(final_message)
print("Traceback")
print("---------")
print(''.join(format_tb(trace)))
with HandleError.handle_errors(
"something went wrong ({example_type})",
example_type='complex example',
re_raise=False,
do_except=_handler_function,
do_else=lambda: print('No exception occurred'),
do_finally=lambda: print('Finished handling exceptions'),
):
print("we are fine")
raise ValueError("here we die")
print("we should not get here") | [
"def",
"complex_handle_errors",
"(",
")",
":",
"print",
"(",
"\"Demonstrating complex handle_errors example\"",
")",
"def",
"_handler_function",
"(",
"err",
",",
"final_message",
",",
"trace",
")",
":",
"\"\"\"\n This function is a helper function for handling an exception from\n the handle_errors ctx_mgr.\n \"\"\"",
"print",
"(",
"\"Handling exception\"",
")",
"print",
"(",
"\"==================\"",
")",
"print",
"(",
"\"Final Message\"",
")",
"print",
"(",
"\"-------------\"",
")",
"print",
"(",
"final_message",
")",
"print",
"(",
"\"Traceback\"",
")",
"print",
"(",
"\"---------\"",
")",
"print",
"(",
"''",
".",
"join",
"(",
"format_tb",
"(",
"trace",
")",
")",
")",
"with",
"HandleError",
".",
"handle_errors",
"(",
"\"something went wrong ({example_type})\"",
",",
"example_type",
"=",
"'complex example'",
",",
"re_raise",
"=",
"False",
",",
"do_except",
"=",
"_handler_function",
",",
"do_else",
"=",
"lambda",
":",
"print",
"(",
"'No exception occurred'",
")",
",",
"do_finally",
"=",
"lambda",
":",
"print",
"(",
"'Finished handling exceptions'",
")",
",",
")",
":",
"print",
"(",
"\"we are fine\"",
")",
"raise",
"ValueError",
"(",
"\"here we die\"",
")",
"print",
"(",
"\"we should not get here\"",
")"
] | This function demonstrates a more complex usage of the handle_errors
ctx_mgr. The following features are demonstrated::
* Handling a specific exception type with ``exception_class``
* Absorbing exceptions by setting ``re_raise`` to ``False``
* Branching with ``do_except``, ``do_else``, and ``do_finally`` | [
"This",
"function",
"demonstrates",
"a",
"more",
"complex",
"usage",
"of",
"the",
"handle_errors",
"ctx_mgr",
".",
"The",
"following",
"features",
"are",
"demonstrated",
"::"
] | train | https://github.com/dusktreader/py-buzz/blob/f2fd97abe158a1688188647992a5be6531058ec3/examples/handle_errors.py#L32-L67 |
pytroll/posttroll | posttroll/message.py | is_valid_data | def is_valid_data(obj):
"""Check if data is JSON serializable.
"""
if obj:
try:
tmp = json.dumps(obj, default=datetime_encoder)
del tmp
except (TypeError, UnicodeDecodeError):
return False
return True | python | def is_valid_data(obj):
"""Check if data is JSON serializable.
"""
if obj:
try:
tmp = json.dumps(obj, default=datetime_encoder)
del tmp
except (TypeError, UnicodeDecodeError):
return False
return True | [
"def",
"is_valid_data",
"(",
"obj",
")",
":",
"if",
"obj",
":",
"try",
":",
"tmp",
"=",
"json",
".",
"dumps",
"(",
"obj",
",",
"default",
"=",
"datetime_encoder",
")",
"del",
"tmp",
"except",
"(",
"TypeError",
",",
"UnicodeDecodeError",
")",
":",
"return",
"False",
"return",
"True"
] | Check if data is JSON serializable. | [
"Check",
"if",
"data",
"is",
"JSON",
"serializable",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message.py#L85-L94 |
pytroll/posttroll | posttroll/message.py | datetime_decoder | def datetime_decoder(dct):
"""Decode datetimes to python objects.
"""
if isinstance(dct, list):
pairs = enumerate(dct)
elif isinstance(dct, dict):
pairs = dct.items()
result = []
for key, val in pairs:
if isinstance(val, six.string_types):
try:
val = strp_isoformat(val)
except ValueError:
pass
elif isinstance(val, (dict, list)):
val = datetime_decoder(val)
result.append((key, val))
if isinstance(dct, list):
return [x[1] for x in result]
elif isinstance(dct, dict):
return dict(result) | python | def datetime_decoder(dct):
"""Decode datetimes to python objects.
"""
if isinstance(dct, list):
pairs = enumerate(dct)
elif isinstance(dct, dict):
pairs = dct.items()
result = []
for key, val in pairs:
if isinstance(val, six.string_types):
try:
val = strp_isoformat(val)
except ValueError:
pass
elif isinstance(val, (dict, list)):
val = datetime_decoder(val)
result.append((key, val))
if isinstance(dct, list):
return [x[1] for x in result]
elif isinstance(dct, dict):
return dict(result) | [
"def",
"datetime_decoder",
"(",
"dct",
")",
":",
"if",
"isinstance",
"(",
"dct",
",",
"list",
")",
":",
"pairs",
"=",
"enumerate",
"(",
"dct",
")",
"elif",
"isinstance",
"(",
"dct",
",",
"dict",
")",
":",
"pairs",
"=",
"dct",
".",
"items",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"pairs",
":",
"if",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"try",
":",
"val",
"=",
"strp_isoformat",
"(",
"val",
")",
"except",
"ValueError",
":",
"pass",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"val",
"=",
"datetime_decoder",
"(",
"val",
")",
"result",
".",
"append",
"(",
"(",
"key",
",",
"val",
")",
")",
"if",
"isinstance",
"(",
"dct",
",",
"list",
")",
":",
"return",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"result",
"]",
"elif",
"isinstance",
"(",
"dct",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"result",
")"
] | Decode datetimes to python objects. | [
"Decode",
"datetimes",
"to",
"python",
"objects",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message.py#L224-L244 |
pytroll/posttroll | posttroll/message.py | _decode | def _decode(rawstr):
"""Convert a raw string to a Message.
"""
# Check for the magick word.
try:
rawstr = rawstr.decode('utf-8')
except (AttributeError, UnicodeEncodeError):
pass
except (UnicodeDecodeError):
try:
rawstr = rawstr.decode('iso-8859-1')
except (UnicodeDecodeError):
rawstr = rawstr.decode('utf-8', 'ignore')
if not rawstr.startswith(_MAGICK):
raise MessageError("This is not a '%s' message (wrong magick word)"
% _MAGICK)
rawstr = rawstr[len(_MAGICK):]
# Check for element count and version
raw = re.split(r"\s+", rawstr, maxsplit=6)
if len(raw) < 5:
raise MessageError("Could node decode raw string: '%s ...'"
% str(rawstr[:36]))
version = raw[4][:len(_VERSION)]
if not _is_valid_version(version):
raise MessageError("Invalid Message version: '%s'" % str(version))
# Start to build message
msg = dict((('subject', raw[0].strip()),
('type', raw[1].strip()),
('sender', raw[2].strip()),
('time', strp_isoformat(raw[3].strip())),
('version', version)))
# Data part
try:
mimetype = raw[5].lower()
data = raw[6]
except IndexError:
mimetype = None
if mimetype is None:
msg['data'] = ''
msg['binary'] = False
elif mimetype == 'application/json':
try:
msg['data'] = json.loads(raw[6], object_hook=datetime_decoder)
msg['binary'] = False
except ValueError:
raise MessageError("JSON decode failed on '%s ...'" % raw[6][:36])
elif mimetype == 'text/ascii':
msg['data'] = str(data)
msg['binary'] = False
elif mimetype == 'binary/octet-stream':
msg['data'] = data
msg['binary'] = True
else:
raise MessageError("Unknown mime-type '%s'" % mimetype)
return msg | python | def _decode(rawstr):
"""Convert a raw string to a Message.
"""
# Check for the magick word.
try:
rawstr = rawstr.decode('utf-8')
except (AttributeError, UnicodeEncodeError):
pass
except (UnicodeDecodeError):
try:
rawstr = rawstr.decode('iso-8859-1')
except (UnicodeDecodeError):
rawstr = rawstr.decode('utf-8', 'ignore')
if not rawstr.startswith(_MAGICK):
raise MessageError("This is not a '%s' message (wrong magick word)"
% _MAGICK)
rawstr = rawstr[len(_MAGICK):]
# Check for element count and version
raw = re.split(r"\s+", rawstr, maxsplit=6)
if len(raw) < 5:
raise MessageError("Could node decode raw string: '%s ...'"
% str(rawstr[:36]))
version = raw[4][:len(_VERSION)]
if not _is_valid_version(version):
raise MessageError("Invalid Message version: '%s'" % str(version))
# Start to build message
msg = dict((('subject', raw[0].strip()),
('type', raw[1].strip()),
('sender', raw[2].strip()),
('time', strp_isoformat(raw[3].strip())),
('version', version)))
# Data part
try:
mimetype = raw[5].lower()
data = raw[6]
except IndexError:
mimetype = None
if mimetype is None:
msg['data'] = ''
msg['binary'] = False
elif mimetype == 'application/json':
try:
msg['data'] = json.loads(raw[6], object_hook=datetime_decoder)
msg['binary'] = False
except ValueError:
raise MessageError("JSON decode failed on '%s ...'" % raw[6][:36])
elif mimetype == 'text/ascii':
msg['data'] = str(data)
msg['binary'] = False
elif mimetype == 'binary/octet-stream':
msg['data'] = data
msg['binary'] = True
else:
raise MessageError("Unknown mime-type '%s'" % mimetype)
return msg | [
"def",
"_decode",
"(",
"rawstr",
")",
":",
"# Check for the magick word.",
"try",
":",
"rawstr",
"=",
"rawstr",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"(",
"AttributeError",
",",
"UnicodeEncodeError",
")",
":",
"pass",
"except",
"(",
"UnicodeDecodeError",
")",
":",
"try",
":",
"rawstr",
"=",
"rawstr",
".",
"decode",
"(",
"'iso-8859-1'",
")",
"except",
"(",
"UnicodeDecodeError",
")",
":",
"rawstr",
"=",
"rawstr",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
"if",
"not",
"rawstr",
".",
"startswith",
"(",
"_MAGICK",
")",
":",
"raise",
"MessageError",
"(",
"\"This is not a '%s' message (wrong magick word)\"",
"%",
"_MAGICK",
")",
"rawstr",
"=",
"rawstr",
"[",
"len",
"(",
"_MAGICK",
")",
":",
"]",
"# Check for element count and version",
"raw",
"=",
"re",
".",
"split",
"(",
"r\"\\s+\"",
",",
"rawstr",
",",
"maxsplit",
"=",
"6",
")",
"if",
"len",
"(",
"raw",
")",
"<",
"5",
":",
"raise",
"MessageError",
"(",
"\"Could node decode raw string: '%s ...'\"",
"%",
"str",
"(",
"rawstr",
"[",
":",
"36",
"]",
")",
")",
"version",
"=",
"raw",
"[",
"4",
"]",
"[",
":",
"len",
"(",
"_VERSION",
")",
"]",
"if",
"not",
"_is_valid_version",
"(",
"version",
")",
":",
"raise",
"MessageError",
"(",
"\"Invalid Message version: '%s'\"",
"%",
"str",
"(",
"version",
")",
")",
"# Start to build message",
"msg",
"=",
"dict",
"(",
"(",
"(",
"'subject'",
",",
"raw",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
",",
"(",
"'type'",
",",
"raw",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
",",
"(",
"'sender'",
",",
"raw",
"[",
"2",
"]",
".",
"strip",
"(",
")",
")",
",",
"(",
"'time'",
",",
"strp_isoformat",
"(",
"raw",
"[",
"3",
"]",
".",
"strip",
"(",
")",
")",
")",
",",
"(",
"'version'",
",",
"version",
")",
")",
")",
"# Data part",
"try",
":",
"mimetype",
"=",
"raw",
"[",
"5",
"]",
".",
"lower",
"(",
")",
"data",
"=",
"raw",
"[",
"6",
"]",
"except",
"IndexError",
":",
"mimetype",
"=",
"None",
"if",
"mimetype",
"is",
"None",
":",
"msg",
"[",
"'data'",
"]",
"=",
"''",
"msg",
"[",
"'binary'",
"]",
"=",
"False",
"elif",
"mimetype",
"==",
"'application/json'",
":",
"try",
":",
"msg",
"[",
"'data'",
"]",
"=",
"json",
".",
"loads",
"(",
"raw",
"[",
"6",
"]",
",",
"object_hook",
"=",
"datetime_decoder",
")",
"msg",
"[",
"'binary'",
"]",
"=",
"False",
"except",
"ValueError",
":",
"raise",
"MessageError",
"(",
"\"JSON decode failed on '%s ...'\"",
"%",
"raw",
"[",
"6",
"]",
"[",
":",
"36",
"]",
")",
"elif",
"mimetype",
"==",
"'text/ascii'",
":",
"msg",
"[",
"'data'",
"]",
"=",
"str",
"(",
"data",
")",
"msg",
"[",
"'binary'",
"]",
"=",
"False",
"elif",
"mimetype",
"==",
"'binary/octet-stream'",
":",
"msg",
"[",
"'data'",
"]",
"=",
"data",
"msg",
"[",
"'binary'",
"]",
"=",
"True",
"else",
":",
"raise",
"MessageError",
"(",
"\"Unknown mime-type '%s'\"",
"%",
"mimetype",
")",
"return",
"msg"
] | Convert a raw string to a Message. | [
"Convert",
"a",
"raw",
"string",
"to",
"a",
"Message",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message.py#L247-L306 |
pytroll/posttroll | posttroll/message.py | _encode | def _encode(msg, head=False, binary=False):
"""Convert a Message to a raw string.
"""
rawstr = str(_MAGICK) + u"{0:s} {1:s} {2:s} {3:s} {4:s}".format(
msg.subject, msg.type, msg.sender, msg.time.isoformat(), msg.version)
if not head and msg.data:
if not binary and isinstance(msg.data, six.string_types):
return (rawstr + ' ' +
'text/ascii' + ' ' + msg.data)
elif not binary:
return (rawstr + ' ' +
'application/json' + ' ' +
json.dumps(msg.data, default=datetime_encoder))
else:
return (rawstr + ' ' +
'binary/octet-stream' + ' ' + msg.data)
return rawstr | python | def _encode(msg, head=False, binary=False):
"""Convert a Message to a raw string.
"""
rawstr = str(_MAGICK) + u"{0:s} {1:s} {2:s} {3:s} {4:s}".format(
msg.subject, msg.type, msg.sender, msg.time.isoformat(), msg.version)
if not head and msg.data:
if not binary and isinstance(msg.data, six.string_types):
return (rawstr + ' ' +
'text/ascii' + ' ' + msg.data)
elif not binary:
return (rawstr + ' ' +
'application/json' + ' ' +
json.dumps(msg.data, default=datetime_encoder))
else:
return (rawstr + ' ' +
'binary/octet-stream' + ' ' + msg.data)
return rawstr | [
"def",
"_encode",
"(",
"msg",
",",
"head",
"=",
"False",
",",
"binary",
"=",
"False",
")",
":",
"rawstr",
"=",
"str",
"(",
"_MAGICK",
")",
"+",
"u\"{0:s} {1:s} {2:s} {3:s} {4:s}\"",
".",
"format",
"(",
"msg",
".",
"subject",
",",
"msg",
".",
"type",
",",
"msg",
".",
"sender",
",",
"msg",
".",
"time",
".",
"isoformat",
"(",
")",
",",
"msg",
".",
"version",
")",
"if",
"not",
"head",
"and",
"msg",
".",
"data",
":",
"if",
"not",
"binary",
"and",
"isinstance",
"(",
"msg",
".",
"data",
",",
"six",
".",
"string_types",
")",
":",
"return",
"(",
"rawstr",
"+",
"' '",
"+",
"'text/ascii'",
"+",
"' '",
"+",
"msg",
".",
"data",
")",
"elif",
"not",
"binary",
":",
"return",
"(",
"rawstr",
"+",
"' '",
"+",
"'application/json'",
"+",
"' '",
"+",
"json",
".",
"dumps",
"(",
"msg",
".",
"data",
",",
"default",
"=",
"datetime_encoder",
")",
")",
"else",
":",
"return",
"(",
"rawstr",
"+",
"' '",
"+",
"'binary/octet-stream'",
"+",
"' '",
"+",
"msg",
".",
"data",
")",
"return",
"rawstr"
] | Convert a Message to a raw string. | [
"Convert",
"a",
"Message",
"to",
"a",
"raw",
"string",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message.py#L318-L335 |
pytroll/posttroll | posttroll/message.py | _getsender | def _getsender():
"""Return local sender.
Don't use the getpass module, it looks at various environment variables
and is unreliable.
"""
import os
import pwd
import socket
host = socket.gethostname()
user = pwd.getpwuid(os.getuid())[0]
return "%s@%s" % (user, host) | python | def _getsender():
"""Return local sender.
Don't use the getpass module, it looks at various environment variables
and is unreliable.
"""
import os
import pwd
import socket
host = socket.gethostname()
user = pwd.getpwuid(os.getuid())[0]
return "%s@%s" % (user, host) | [
"def",
"_getsender",
"(",
")",
":",
"import",
"os",
"import",
"pwd",
"import",
"socket",
"host",
"=",
"socket",
".",
"gethostname",
"(",
")",
"user",
"=",
"pwd",
".",
"getpwuid",
"(",
"os",
".",
"getuid",
"(",
")",
")",
"[",
"0",
"]",
"return",
"\"%s@%s\"",
"%",
"(",
"user",
",",
"host",
")"
] | Return local sender.
Don't use the getpass module, it looks at various environment variables
and is unreliable. | [
"Return",
"local",
"sender",
".",
"Don",
"t",
"use",
"the",
"getpass",
"module",
"it",
"looks",
"at",
"various",
"environment",
"variables",
"and",
"is",
"unreliable",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message.py#L344-L354 |
pytroll/posttroll | posttroll/message.py | Message._validate | def _validate(self):
"""Validate a messages attributes.
"""
if not is_valid_subject(self.subject):
raise MessageError("Invalid subject: '%s'" % self.subject)
if not is_valid_type(self.type):
raise MessageError("Invalid type: '%s'" % self.type)
if not is_valid_sender(self.sender):
raise MessageError("Invalid sender: '%s'" % self.sender)
if not self.binary and not is_valid_data(self.data):
raise MessageError("Invalid data: data is not JSON serializable: %s"
% str(self.data)) | python | def _validate(self):
"""Validate a messages attributes.
"""
if not is_valid_subject(self.subject):
raise MessageError("Invalid subject: '%s'" % self.subject)
if not is_valid_type(self.type):
raise MessageError("Invalid type: '%s'" % self.type)
if not is_valid_sender(self.sender):
raise MessageError("Invalid sender: '%s'" % self.sender)
if not self.binary and not is_valid_data(self.data):
raise MessageError("Invalid data: data is not JSON serializable: %s"
% str(self.data)) | [
"def",
"_validate",
"(",
"self",
")",
":",
"if",
"not",
"is_valid_subject",
"(",
"self",
".",
"subject",
")",
":",
"raise",
"MessageError",
"(",
"\"Invalid subject: '%s'\"",
"%",
"self",
".",
"subject",
")",
"if",
"not",
"is_valid_type",
"(",
"self",
".",
"type",
")",
":",
"raise",
"MessageError",
"(",
"\"Invalid type: '%s'\"",
"%",
"self",
".",
"type",
")",
"if",
"not",
"is_valid_sender",
"(",
"self",
".",
"sender",
")",
":",
"raise",
"MessageError",
"(",
"\"Invalid sender: '%s'\"",
"%",
"self",
".",
"sender",
")",
"if",
"not",
"self",
".",
"binary",
"and",
"not",
"is_valid_data",
"(",
"self",
".",
"data",
")",
":",
"raise",
"MessageError",
"(",
"\"Invalid data: data is not JSON serializable: %s\"",
"%",
"str",
"(",
"self",
".",
"data",
")",
")"
] | Validate a messages attributes. | [
"Validate",
"a",
"messages",
"attributes",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message.py#L188-L199 |
Syndace/python-x3dh | x3dh/state.py | State.__generateSPK | def __generateSPK(self):
"""
Generate a new PK and sign its public key using the IK, add the timestamp aswell
to allow for periodic rotations.
"""
key = self.__KeyPair.generate()
key_serialized = self.__PublicKeyEncoder.encodePublicKey(
key.pub,
self.__curve
)
signature = self.__XEdDSA(mont_priv = self.__ik.priv).sign(key_serialized)
self.__spk = {
"key": key,
"signature": signature,
"timestamp": time.time()
} | python | def __generateSPK(self):
"""
Generate a new PK and sign its public key using the IK, add the timestamp aswell
to allow for periodic rotations.
"""
key = self.__KeyPair.generate()
key_serialized = self.__PublicKeyEncoder.encodePublicKey(
key.pub,
self.__curve
)
signature = self.__XEdDSA(mont_priv = self.__ik.priv).sign(key_serialized)
self.__spk = {
"key": key,
"signature": signature,
"timestamp": time.time()
} | [
"def",
"__generateSPK",
"(",
"self",
")",
":",
"key",
"=",
"self",
".",
"__KeyPair",
".",
"generate",
"(",
")",
"key_serialized",
"=",
"self",
".",
"__PublicKeyEncoder",
".",
"encodePublicKey",
"(",
"key",
".",
"pub",
",",
"self",
".",
"__curve",
")",
"signature",
"=",
"self",
".",
"__XEdDSA",
"(",
"mont_priv",
"=",
"self",
".",
"__ik",
".",
"priv",
")",
".",
"sign",
"(",
"key_serialized",
")",
"self",
".",
"__spk",
"=",
"{",
"\"key\"",
":",
"key",
",",
"\"signature\"",
":",
"signature",
",",
"\"timestamp\"",
":",
"time",
".",
"time",
"(",
")",
"}"
] | Generate a new PK and sign its public key using the IK, add the timestamp aswell
to allow for periodic rotations. | [
"Generate",
"a",
"new",
"PK",
"and",
"sign",
"its",
"public",
"key",
"using",
"the",
"IK",
"add",
"the",
"timestamp",
"aswell",
"to",
"allow",
"for",
"periodic",
"rotations",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L152-L171 |
Syndace/python-x3dh | x3dh/state.py | State.__generateOTPKs | def __generateOTPKs(self, num_otpks = None):
"""
Generate the given amount of OTPKs.
:param num_otpks: Either an integer or None.
If the value of num_otpks is None, set it to the max_num_otpks value of the
configuration.
"""
if num_otpks == None:
num_otpks = self.__max_num_otpks
otpks = []
for _ in range(num_otpks):
otpks.append(self.__KeyPair.generate())
try:
self.__otpks.extend(otpks)
except AttributeError:
self.__otpks = otpks | python | def __generateOTPKs(self, num_otpks = None):
"""
Generate the given amount of OTPKs.
:param num_otpks: Either an integer or None.
If the value of num_otpks is None, set it to the max_num_otpks value of the
configuration.
"""
if num_otpks == None:
num_otpks = self.__max_num_otpks
otpks = []
for _ in range(num_otpks):
otpks.append(self.__KeyPair.generate())
try:
self.__otpks.extend(otpks)
except AttributeError:
self.__otpks = otpks | [
"def",
"__generateOTPKs",
"(",
"self",
",",
"num_otpks",
"=",
"None",
")",
":",
"if",
"num_otpks",
"==",
"None",
":",
"num_otpks",
"=",
"self",
".",
"__max_num_otpks",
"otpks",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"num_otpks",
")",
":",
"otpks",
".",
"append",
"(",
"self",
".",
"__KeyPair",
".",
"generate",
"(",
")",
")",
"try",
":",
"self",
".",
"__otpks",
".",
"extend",
"(",
"otpks",
")",
"except",
"AttributeError",
":",
"self",
".",
"__otpks",
"=",
"otpks"
] | Generate the given amount of OTPKs.
:param num_otpks: Either an integer or None.
If the value of num_otpks is None, set it to the max_num_otpks value of the
configuration. | [
"Generate",
"the",
"given",
"amount",
"of",
"OTPKs",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L174-L195 |
Syndace/python-x3dh | x3dh/state.py | State.__kdf | def __kdf(self, secret_key_material):
"""
:param secret_key_material: A bytes-like object encoding the secret key material.
:returns: A bytes-like object encoding the shared secret key.
"""
salt = b"\x00" * self.__hash_function().digest_size
if self.__curve == "25519":
input_key_material = b"\xFF" * 32
if self.__curve == "448":
input_key_material = b"\xFF" * 57
input_key_material += secret_key_material
hkdf = HKDF(
algorithm=self.__hash_function(),
length=32,
salt=salt,
info=self.__info_string,
backend=self.__class__.CRYPTOGRAPHY_BACKEND
)
return hkdf.derive(input_key_material) | python | def __kdf(self, secret_key_material):
"""
:param secret_key_material: A bytes-like object encoding the secret key material.
:returns: A bytes-like object encoding the shared secret key.
"""
salt = b"\x00" * self.__hash_function().digest_size
if self.__curve == "25519":
input_key_material = b"\xFF" * 32
if self.__curve == "448":
input_key_material = b"\xFF" * 57
input_key_material += secret_key_material
hkdf = HKDF(
algorithm=self.__hash_function(),
length=32,
salt=salt,
info=self.__info_string,
backend=self.__class__.CRYPTOGRAPHY_BACKEND
)
return hkdf.derive(input_key_material) | [
"def",
"__kdf",
"(",
"self",
",",
"secret_key_material",
")",
":",
"salt",
"=",
"b\"\\x00\"",
"*",
"self",
".",
"__hash_function",
"(",
")",
".",
"digest_size",
"if",
"self",
".",
"__curve",
"==",
"\"25519\"",
":",
"input_key_material",
"=",
"b\"\\xFF\"",
"*",
"32",
"if",
"self",
".",
"__curve",
"==",
"\"448\"",
":",
"input_key_material",
"=",
"b\"\\xFF\"",
"*",
"57",
"input_key_material",
"+=",
"secret_key_material",
"hkdf",
"=",
"HKDF",
"(",
"algorithm",
"=",
"self",
".",
"__hash_function",
"(",
")",
",",
"length",
"=",
"32",
",",
"salt",
"=",
"salt",
",",
"info",
"=",
"self",
".",
"__info_string",
",",
"backend",
"=",
"self",
".",
"__class__",
".",
"CRYPTOGRAPHY_BACKEND",
")",
"return",
"hkdf",
".",
"derive",
"(",
"input_key_material",
")"
] | :param secret_key_material: A bytes-like object encoding the secret key material.
:returns: A bytes-like object encoding the shared secret key. | [
":",
"param",
"secret_key_material",
":",
"A",
"bytes",
"-",
"like",
"object",
"encoding",
"the",
"secret",
"key",
"material",
".",
":",
"returns",
":",
"A",
"bytes",
"-",
"like",
"object",
"encoding",
"the",
"shared",
"secret",
"key",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L201-L224 |
Syndace/python-x3dh | x3dh/state.py | State.__checkSPKTimestamp | def __checkSPKTimestamp(self):
"""
Check whether the SPK is too old and generate a new one in that case.
"""
if time.time() - self.__spk["timestamp"] > self.__spk_timeout:
self.__generateSPK() | python | def __checkSPKTimestamp(self):
"""
Check whether the SPK is too old and generate a new one in that case.
"""
if time.time() - self.__spk["timestamp"] > self.__spk_timeout:
self.__generateSPK() | [
"def",
"__checkSPKTimestamp",
"(",
"self",
")",
":",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"__spk",
"[",
"\"timestamp\"",
"]",
">",
"self",
".",
"__spk_timeout",
":",
"self",
".",
"__generateSPK",
"(",
")"
] | Check whether the SPK is too old and generate a new one in that case. | [
"Check",
"whether",
"the",
"SPK",
"is",
"too",
"old",
"and",
"generate",
"a",
"new",
"one",
"in",
"that",
"case",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L230-L236 |
Syndace/python-x3dh | x3dh/state.py | State.__refillOTPKs | def __refillOTPKs(self):
"""
If the amount of available OTPKs fell under the minimum, refills the OTPKs up to
the maximum limit again.
"""
remainingOTPKs = len(self.__otpks)
if remainingOTPKs < self.__min_num_otpks:
self.__generateOTPKs(self.__max_num_otpks - remainingOTPKs) | python | def __refillOTPKs(self):
"""
If the amount of available OTPKs fell under the minimum, refills the OTPKs up to
the maximum limit again.
"""
remainingOTPKs = len(self.__otpks)
if remainingOTPKs < self.__min_num_otpks:
self.__generateOTPKs(self.__max_num_otpks - remainingOTPKs) | [
"def",
"__refillOTPKs",
"(",
"self",
")",
":",
"remainingOTPKs",
"=",
"len",
"(",
"self",
".",
"__otpks",
")",
"if",
"remainingOTPKs",
"<",
"self",
".",
"__min_num_otpks",
":",
"self",
".",
"__generateOTPKs",
"(",
"self",
".",
"__max_num_otpks",
"-",
"remainingOTPKs",
")"
] | If the amount of available OTPKs fell under the minimum, refills the OTPKs up to
the maximum limit again. | [
"If",
"the",
"amount",
"of",
"available",
"OTPKs",
"fell",
"under",
"the",
"minimum",
"refills",
"the",
"OTPKs",
"up",
"to",
"the",
"maximum",
"limit",
"again",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L238-L247 |
Syndace/python-x3dh | x3dh/state.py | State.hideFromPublicBundle | def hideFromPublicBundle(self, otpk_pub):
"""
Hide a one-time pre key from the public bundle.
:param otpk_pub: The public key of the one-time pre key to hide, encoded as a
bytes-like object.
"""
self.__checkSPKTimestamp()
for otpk in self.__otpks:
if otpk.pub == otpk_pub:
self.__otpks.remove(otpk)
self.__hidden_otpks.append(otpk)
self.__refillOTPKs() | python | def hideFromPublicBundle(self, otpk_pub):
"""
Hide a one-time pre key from the public bundle.
:param otpk_pub: The public key of the one-time pre key to hide, encoded as a
bytes-like object.
"""
self.__checkSPKTimestamp()
for otpk in self.__otpks:
if otpk.pub == otpk_pub:
self.__otpks.remove(otpk)
self.__hidden_otpks.append(otpk)
self.__refillOTPKs() | [
"def",
"hideFromPublicBundle",
"(",
"self",
",",
"otpk_pub",
")",
":",
"self",
".",
"__checkSPKTimestamp",
"(",
")",
"for",
"otpk",
"in",
"self",
".",
"__otpks",
":",
"if",
"otpk",
".",
"pub",
"==",
"otpk_pub",
":",
"self",
".",
"__otpks",
".",
"remove",
"(",
"otpk",
")",
"self",
".",
"__hidden_otpks",
".",
"append",
"(",
"otpk",
")",
"self",
".",
"__refillOTPKs",
"(",
")"
] | Hide a one-time pre key from the public bundle.
:param otpk_pub: The public key of the one-time pre key to hide, encoded as a
bytes-like object. | [
"Hide",
"a",
"one",
"-",
"time",
"pre",
"key",
"from",
"the",
"public",
"bundle",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L250-L264 |
Syndace/python-x3dh | x3dh/state.py | State.deleteOTPK | def deleteOTPK(self, otpk_pub):
"""
Delete a one-time pre key, either publicly visible or hidden.
:param otpk_pub: The public key of the one-time pre key to delete, encoded as a
bytes-like object.
"""
self.__checkSPKTimestamp()
for otpk in self.__otpks:
if otpk.pub == otpk_pub:
self.__otpks.remove(otpk)
for otpk in self.__hidden_otpks:
if otpk.pub == otpk_pub:
self.__hidden_otpks.remove(otpk)
self.__refillOTPKs() | python | def deleteOTPK(self, otpk_pub):
"""
Delete a one-time pre key, either publicly visible or hidden.
:param otpk_pub: The public key of the one-time pre key to delete, encoded as a
bytes-like object.
"""
self.__checkSPKTimestamp()
for otpk in self.__otpks:
if otpk.pub == otpk_pub:
self.__otpks.remove(otpk)
for otpk in self.__hidden_otpks:
if otpk.pub == otpk_pub:
self.__hidden_otpks.remove(otpk)
self.__refillOTPKs() | [
"def",
"deleteOTPK",
"(",
"self",
",",
"otpk_pub",
")",
":",
"self",
".",
"__checkSPKTimestamp",
"(",
")",
"for",
"otpk",
"in",
"self",
".",
"__otpks",
":",
"if",
"otpk",
".",
"pub",
"==",
"otpk_pub",
":",
"self",
".",
"__otpks",
".",
"remove",
"(",
"otpk",
")",
"for",
"otpk",
"in",
"self",
".",
"__hidden_otpks",
":",
"if",
"otpk",
".",
"pub",
"==",
"otpk_pub",
":",
"self",
".",
"__hidden_otpks",
".",
"remove",
"(",
"otpk",
")",
"self",
".",
"__refillOTPKs",
"(",
")"
] | Delete a one-time pre key, either publicly visible or hidden.
:param otpk_pub: The public key of the one-time pre key to delete, encoded as a
bytes-like object. | [
"Delete",
"a",
"one",
"-",
"time",
"pre",
"key",
"either",
"publicly",
"visible",
"or",
"hidden",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L267-L285 |
Syndace/python-x3dh | x3dh/state.py | State.getPublicBundle | def getPublicBundle(self):
"""
Fill a PublicBundle object with the public bundle data of this State.
:returns: An instance of PublicBundle, filled with the public data of this State.
"""
self.__checkSPKTimestamp()
ik_pub = self.__ik.pub
spk_pub = self.__spk["key"].pub
spk_sig = self.__spk["signature"]
otpk_pubs = [ otpk.pub for otpk in self.__otpks ]
return PublicBundle(ik_pub, spk_pub, spk_sig, otpk_pubs) | python | def getPublicBundle(self):
"""
Fill a PublicBundle object with the public bundle data of this State.
:returns: An instance of PublicBundle, filled with the public data of this State.
"""
self.__checkSPKTimestamp()
ik_pub = self.__ik.pub
spk_pub = self.__spk["key"].pub
spk_sig = self.__spk["signature"]
otpk_pubs = [ otpk.pub for otpk in self.__otpks ]
return PublicBundle(ik_pub, spk_pub, spk_sig, otpk_pubs) | [
"def",
"getPublicBundle",
"(",
"self",
")",
":",
"self",
".",
"__checkSPKTimestamp",
"(",
")",
"ik_pub",
"=",
"self",
".",
"__ik",
".",
"pub",
"spk_pub",
"=",
"self",
".",
"__spk",
"[",
"\"key\"",
"]",
".",
"pub",
"spk_sig",
"=",
"self",
".",
"__spk",
"[",
"\"signature\"",
"]",
"otpk_pubs",
"=",
"[",
"otpk",
".",
"pub",
"for",
"otpk",
"in",
"self",
".",
"__otpks",
"]",
"return",
"PublicBundle",
"(",
"ik_pub",
",",
"spk_pub",
",",
"spk_sig",
",",
"otpk_pubs",
")"
] | Fill a PublicBundle object with the public bundle data of this State.
:returns: An instance of PublicBundle, filled with the public data of this State. | [
"Fill",
"a",
"PublicBundle",
"object",
"with",
"the",
"public",
"bundle",
"data",
"of",
"this",
"State",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L291-L305 |
Syndace/python-x3dh | x3dh/state.py | State.changed | def changed(self):
"""
Read, whether this State has changed since it was loaded/since this flag was last
cleared.
:returns: A boolean indicating, whether the public bundle data has changed since
last reading this flag.
Clears the flag when reading.
"""
self.__checkSPKTimestamp()
changed = self._changed
self._changed = False
return changed | python | def changed(self):
"""
Read, whether this State has changed since it was loaded/since this flag was last
cleared.
:returns: A boolean indicating, whether the public bundle data has changed since
last reading this flag.
Clears the flag when reading.
"""
self.__checkSPKTimestamp()
changed = self._changed
self._changed = False
return changed | [
"def",
"changed",
"(",
"self",
")",
":",
"self",
".",
"__checkSPKTimestamp",
"(",
")",
"changed",
"=",
"self",
".",
"_changed",
"self",
".",
"_changed",
"=",
"False",
"return",
"changed"
] | Read, whether this State has changed since it was loaded/since this flag was last
cleared.
:returns: A boolean indicating, whether the public bundle data has changed since
last reading this flag.
Clears the flag when reading. | [
"Read",
"whether",
"this",
"State",
"has",
"changed",
"since",
"it",
"was",
"loaded",
"/",
"since",
"this",
"flag",
"was",
"last",
"cleared",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L308-L323 |
Syndace/python-x3dh | x3dh/state.py | State.getSharedSecretActive | def getSharedSecretActive(
self,
other_public_bundle,
allow_zero_otpks = False
):
"""
Do the key exchange, as the active party. This involves selecting keys from the
passive parties' public bundle.
:param other_public_bundle: An instance of PublicBundle, filled with the public
data of the passive party.
:param allow_zero_otpks: A flag indicating whether bundles with no one-time pre
keys are allowed or throw an error. False is the recommended default.
:returns: A dictionary containing the shared secret, the shared associated data
and the data the passive party needs to finalize the key exchange.
The returned structure looks like this::
{
"to_other": {
# The public key of the active parties' identity key pair
"ik": bytes,
# The public key of the active parties' ephemeral key pair
"ek": bytes,
# The public key of the used passive parties' one-time pre key or None
"otpk": bytes or None,
# The public key of the passive parties' signed pre key pair
"spk": bytes
},
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details.
"""
self.__checkSPKTimestamp()
other_ik = self.__KeyPair(pub = other_public_bundle.ik)
other_spk = {
"key": self.__KeyPair(pub = other_public_bundle.spk),
"signature": other_public_bundle.spk_signature
}
other_otpks = [
self.__KeyPair(pub = otpk) for otpk in other_public_bundle.otpks
]
if len(other_otpks) == 0 and not allow_zero_otpks:
raise KeyExchangeException(
"The other public bundle does not contain any OTPKs, which is not " +
"allowed."
)
other_spk_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_spk["key"].pub,
self.__curve
)
if not self.__XEdDSA(mont_pub = other_ik.pub).verify(
other_spk_serialized,
other_spk["signature"]
):
raise KeyExchangeException(
"The signature of this public bundle's spk could not be verifified."
)
ek = self.__KeyPair.generate()
dh1 = self.__ik.getSharedSecret(other_spk["key"])
dh2 = ek.getSharedSecret(other_ik)
dh3 = ek.getSharedSecret(other_spk["key"])
dh4 = b""
otpk = None
if len(other_otpks) > 0:
otpk_index = ord(os.urandom(1)) % len(other_otpks)
otpk = other_otpks[otpk_index]
dh4 = ek.getSharedSecret(otpk)
sk = self.__kdf(dh1 + dh2 + dh3 + dh4)
ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
self.__ik.pub,
self.__curve
)
other_ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_ik.pub,
self.__curve
)
ad = ik_pub_serialized + other_ik_pub_serialized
return {
"to_other": {
"ik": self.__ik.pub,
"ek": ek.pub,
"otpk": otpk.pub if otpk else None,
"spk": other_spk["key"].pub
},
"ad": ad,
"sk": sk
} | python | def getSharedSecretActive(
self,
other_public_bundle,
allow_zero_otpks = False
):
"""
Do the key exchange, as the active party. This involves selecting keys from the
passive parties' public bundle.
:param other_public_bundle: An instance of PublicBundle, filled with the public
data of the passive party.
:param allow_zero_otpks: A flag indicating whether bundles with no one-time pre
keys are allowed or throw an error. False is the recommended default.
:returns: A dictionary containing the shared secret, the shared associated data
and the data the passive party needs to finalize the key exchange.
The returned structure looks like this::
{
"to_other": {
# The public key of the active parties' identity key pair
"ik": bytes,
# The public key of the active parties' ephemeral key pair
"ek": bytes,
# The public key of the used passive parties' one-time pre key or None
"otpk": bytes or None,
# The public key of the passive parties' signed pre key pair
"spk": bytes
},
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details.
"""
self.__checkSPKTimestamp()
other_ik = self.__KeyPair(pub = other_public_bundle.ik)
other_spk = {
"key": self.__KeyPair(pub = other_public_bundle.spk),
"signature": other_public_bundle.spk_signature
}
other_otpks = [
self.__KeyPair(pub = otpk) for otpk in other_public_bundle.otpks
]
if len(other_otpks) == 0 and not allow_zero_otpks:
raise KeyExchangeException(
"The other public bundle does not contain any OTPKs, which is not " +
"allowed."
)
other_spk_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_spk["key"].pub,
self.__curve
)
if not self.__XEdDSA(mont_pub = other_ik.pub).verify(
other_spk_serialized,
other_spk["signature"]
):
raise KeyExchangeException(
"The signature of this public bundle's spk could not be verifified."
)
ek = self.__KeyPair.generate()
dh1 = self.__ik.getSharedSecret(other_spk["key"])
dh2 = ek.getSharedSecret(other_ik)
dh3 = ek.getSharedSecret(other_spk["key"])
dh4 = b""
otpk = None
if len(other_otpks) > 0:
otpk_index = ord(os.urandom(1)) % len(other_otpks)
otpk = other_otpks[otpk_index]
dh4 = ek.getSharedSecret(otpk)
sk = self.__kdf(dh1 + dh2 + dh3 + dh4)
ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
self.__ik.pub,
self.__curve
)
other_ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_ik.pub,
self.__curve
)
ad = ik_pub_serialized + other_ik_pub_serialized
return {
"to_other": {
"ik": self.__ik.pub,
"ek": ek.pub,
"otpk": otpk.pub if otpk else None,
"spk": other_spk["key"].pub
},
"ad": ad,
"sk": sk
} | [
"def",
"getSharedSecretActive",
"(",
"self",
",",
"other_public_bundle",
",",
"allow_zero_otpks",
"=",
"False",
")",
":",
"self",
".",
"__checkSPKTimestamp",
"(",
")",
"other_ik",
"=",
"self",
".",
"__KeyPair",
"(",
"pub",
"=",
"other_public_bundle",
".",
"ik",
")",
"other_spk",
"=",
"{",
"\"key\"",
":",
"self",
".",
"__KeyPair",
"(",
"pub",
"=",
"other_public_bundle",
".",
"spk",
")",
",",
"\"signature\"",
":",
"other_public_bundle",
".",
"spk_signature",
"}",
"other_otpks",
"=",
"[",
"self",
".",
"__KeyPair",
"(",
"pub",
"=",
"otpk",
")",
"for",
"otpk",
"in",
"other_public_bundle",
".",
"otpks",
"]",
"if",
"len",
"(",
"other_otpks",
")",
"==",
"0",
"and",
"not",
"allow_zero_otpks",
":",
"raise",
"KeyExchangeException",
"(",
"\"The other public bundle does not contain any OTPKs, which is not \"",
"+",
"\"allowed.\"",
")",
"other_spk_serialized",
"=",
"self",
".",
"__PublicKeyEncoder",
".",
"encodePublicKey",
"(",
"other_spk",
"[",
"\"key\"",
"]",
".",
"pub",
",",
"self",
".",
"__curve",
")",
"if",
"not",
"self",
".",
"__XEdDSA",
"(",
"mont_pub",
"=",
"other_ik",
".",
"pub",
")",
".",
"verify",
"(",
"other_spk_serialized",
",",
"other_spk",
"[",
"\"signature\"",
"]",
")",
":",
"raise",
"KeyExchangeException",
"(",
"\"The signature of this public bundle's spk could not be verifified.\"",
")",
"ek",
"=",
"self",
".",
"__KeyPair",
".",
"generate",
"(",
")",
"dh1",
"=",
"self",
".",
"__ik",
".",
"getSharedSecret",
"(",
"other_spk",
"[",
"\"key\"",
"]",
")",
"dh2",
"=",
"ek",
".",
"getSharedSecret",
"(",
"other_ik",
")",
"dh3",
"=",
"ek",
".",
"getSharedSecret",
"(",
"other_spk",
"[",
"\"key\"",
"]",
")",
"dh4",
"=",
"b\"\"",
"otpk",
"=",
"None",
"if",
"len",
"(",
"other_otpks",
")",
">",
"0",
":",
"otpk_index",
"=",
"ord",
"(",
"os",
".",
"urandom",
"(",
"1",
")",
")",
"%",
"len",
"(",
"other_otpks",
")",
"otpk",
"=",
"other_otpks",
"[",
"otpk_index",
"]",
"dh4",
"=",
"ek",
".",
"getSharedSecret",
"(",
"otpk",
")",
"sk",
"=",
"self",
".",
"__kdf",
"(",
"dh1",
"+",
"dh2",
"+",
"dh3",
"+",
"dh4",
")",
"ik_pub_serialized",
"=",
"self",
".",
"__PublicKeyEncoder",
".",
"encodePublicKey",
"(",
"self",
".",
"__ik",
".",
"pub",
",",
"self",
".",
"__curve",
")",
"other_ik_pub_serialized",
"=",
"self",
".",
"__PublicKeyEncoder",
".",
"encodePublicKey",
"(",
"other_ik",
".",
"pub",
",",
"self",
".",
"__curve",
")",
"ad",
"=",
"ik_pub_serialized",
"+",
"other_ik_pub_serialized",
"return",
"{",
"\"to_other\"",
":",
"{",
"\"ik\"",
":",
"self",
".",
"__ik",
".",
"pub",
",",
"\"ek\"",
":",
"ek",
".",
"pub",
",",
"\"otpk\"",
":",
"otpk",
".",
"pub",
"if",
"otpk",
"else",
"None",
",",
"\"spk\"",
":",
"other_spk",
"[",
"\"key\"",
"]",
".",
"pub",
"}",
",",
"\"ad\"",
":",
"ad",
",",
"\"sk\"",
":",
"sk",
"}"
] | Do the key exchange, as the active party. This involves selecting keys from the
passive parties' public bundle.
:param other_public_bundle: An instance of PublicBundle, filled with the public
data of the passive party.
:param allow_zero_otpks: A flag indicating whether bundles with no one-time pre
keys are allowed or throw an error. False is the recommended default.
:returns: A dictionary containing the shared secret, the shared associated data
and the data the passive party needs to finalize the key exchange.
The returned structure looks like this::
{
"to_other": {
# The public key of the active parties' identity key pair
"ik": bytes,
# The public key of the active parties' ephemeral key pair
"ek": bytes,
# The public key of the used passive parties' one-time pre key or None
"otpk": bytes or None,
# The public key of the passive parties' signed pre key pair
"spk": bytes
},
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details. | [
"Do",
"the",
"key",
"exchange",
"as",
"the",
"active",
"party",
".",
"This",
"involves",
"selecting",
"keys",
"from",
"the",
"passive",
"parties",
"public",
"bundle",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L329-L438 |
Syndace/python-x3dh | x3dh/state.py | State.getSharedSecretPassive | def getSharedSecretPassive(
self,
passive_exchange_data,
allow_no_otpk = False,
keep_otpk = False
):
"""
Do the key exchange, as the passive party. This involves retrieving data about the
key exchange from the active party.
:param passive_exchange_data: A structure generated by the active party, which
contains data requried to complete the key exchange. See the "to_other" part
of the structure returned by "getSharedSecretActive".
:param allow_no_otpk: A boolean indicating whether to allow key exchange, even if
the active party did not use a one-time pre key. The recommended default is
False.
:param keep_otpk: Keep the one-time pre key after using it, instead of deleting
it. See the notes below.
:returns: A dictionary containing the shared secret and the shared associated
data.
The returned structure looks like this::
{
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
The specification of X3DH dictates to delete one-time pre keys as soon as they are
used.
This behaviour provides security but may lead to considerable usability downsides
in some environments.
For that reason the keep_otpk flag exists.
If set to True, the one-time pre key is not automatically deleted.
USE WITH CARE, THIS MAY INTRODUCE SECURITY LEAKS IF USED INCORRECTLY.
If you decide to set the flag and to keep the otpks, you have to manage deleting
them yourself, e.g. by subclassing this class and overriding this method.
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details.
"""
self.__checkSPKTimestamp()
other_ik = self.__KeyPair(pub = passive_exchange_data["ik"])
other_ek = self.__KeyPair(pub = passive_exchange_data["ek"])
if self.__spk["key"].pub != passive_exchange_data["spk"]:
raise KeyExchangeException(
"The SPK used for this key exchange has been rotated, the key exchange " +
"can not be completed."
)
my_otpk = None
if "otpk" in passive_exchange_data:
for otpk in self.__otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
for otpk in self.__hidden_otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
if not my_otpk:
raise KeyExchangeException(
"The OTPK used for this key exchange has been deleted, the key " +
"exchange can not be completed."
)
elif not allow_no_otpk:
raise KeyExchangeException(
"This key exchange data does not contain an OTPK, which is not allowed."
)
dh1 = self.__spk["key"].getSharedSecret(other_ik)
dh2 = self.__ik.getSharedSecret(other_ek)
dh3 = self.__spk["key"].getSharedSecret(other_ek)
dh4 = b""
if my_otpk:
dh4 = my_otpk.getSharedSecret(other_ek)
sk = self.__kdf(dh1 + dh2 + dh3 + dh4)
other_ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_ik.pub,
self.__curve
)
ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
self.__ik.pub,
self.__curve
)
ad = other_ik_pub_serialized + ik_pub_serialized
if my_otpk and not keep_otpk:
self.deleteOTPK(my_otpk.pub)
return {
"ad": ad,
"sk": sk
} | python | def getSharedSecretPassive(
self,
passive_exchange_data,
allow_no_otpk = False,
keep_otpk = False
):
"""
Do the key exchange, as the passive party. This involves retrieving data about the
key exchange from the active party.
:param passive_exchange_data: A structure generated by the active party, which
contains data requried to complete the key exchange. See the "to_other" part
of the structure returned by "getSharedSecretActive".
:param allow_no_otpk: A boolean indicating whether to allow key exchange, even if
the active party did not use a one-time pre key. The recommended default is
False.
:param keep_otpk: Keep the one-time pre key after using it, instead of deleting
it. See the notes below.
:returns: A dictionary containing the shared secret and the shared associated
data.
The returned structure looks like this::
{
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
The specification of X3DH dictates to delete one-time pre keys as soon as they are
used.
This behaviour provides security but may lead to considerable usability downsides
in some environments.
For that reason the keep_otpk flag exists.
If set to True, the one-time pre key is not automatically deleted.
USE WITH CARE, THIS MAY INTRODUCE SECURITY LEAKS IF USED INCORRECTLY.
If you decide to set the flag and to keep the otpks, you have to manage deleting
them yourself, e.g. by subclassing this class and overriding this method.
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details.
"""
self.__checkSPKTimestamp()
other_ik = self.__KeyPair(pub = passive_exchange_data["ik"])
other_ek = self.__KeyPair(pub = passive_exchange_data["ek"])
if self.__spk["key"].pub != passive_exchange_data["spk"]:
raise KeyExchangeException(
"The SPK used for this key exchange has been rotated, the key exchange " +
"can not be completed."
)
my_otpk = None
if "otpk" in passive_exchange_data:
for otpk in self.__otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
for otpk in self.__hidden_otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
if not my_otpk:
raise KeyExchangeException(
"The OTPK used for this key exchange has been deleted, the key " +
"exchange can not be completed."
)
elif not allow_no_otpk:
raise KeyExchangeException(
"This key exchange data does not contain an OTPK, which is not allowed."
)
dh1 = self.__spk["key"].getSharedSecret(other_ik)
dh2 = self.__ik.getSharedSecret(other_ek)
dh3 = self.__spk["key"].getSharedSecret(other_ek)
dh4 = b""
if my_otpk:
dh4 = my_otpk.getSharedSecret(other_ek)
sk = self.__kdf(dh1 + dh2 + dh3 + dh4)
other_ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_ik.pub,
self.__curve
)
ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
self.__ik.pub,
self.__curve
)
ad = other_ik_pub_serialized + ik_pub_serialized
if my_otpk and not keep_otpk:
self.deleteOTPK(my_otpk.pub)
return {
"ad": ad,
"sk": sk
} | [
"def",
"getSharedSecretPassive",
"(",
"self",
",",
"passive_exchange_data",
",",
"allow_no_otpk",
"=",
"False",
",",
"keep_otpk",
"=",
"False",
")",
":",
"self",
".",
"__checkSPKTimestamp",
"(",
")",
"other_ik",
"=",
"self",
".",
"__KeyPair",
"(",
"pub",
"=",
"passive_exchange_data",
"[",
"\"ik\"",
"]",
")",
"other_ek",
"=",
"self",
".",
"__KeyPair",
"(",
"pub",
"=",
"passive_exchange_data",
"[",
"\"ek\"",
"]",
")",
"if",
"self",
".",
"__spk",
"[",
"\"key\"",
"]",
".",
"pub",
"!=",
"passive_exchange_data",
"[",
"\"spk\"",
"]",
":",
"raise",
"KeyExchangeException",
"(",
"\"The SPK used for this key exchange has been rotated, the key exchange \"",
"+",
"\"can not be completed.\"",
")",
"my_otpk",
"=",
"None",
"if",
"\"otpk\"",
"in",
"passive_exchange_data",
":",
"for",
"otpk",
"in",
"self",
".",
"__otpks",
":",
"if",
"otpk",
".",
"pub",
"==",
"passive_exchange_data",
"[",
"\"otpk\"",
"]",
":",
"my_otpk",
"=",
"otpk",
"break",
"for",
"otpk",
"in",
"self",
".",
"__hidden_otpks",
":",
"if",
"otpk",
".",
"pub",
"==",
"passive_exchange_data",
"[",
"\"otpk\"",
"]",
":",
"my_otpk",
"=",
"otpk",
"break",
"if",
"not",
"my_otpk",
":",
"raise",
"KeyExchangeException",
"(",
"\"The OTPK used for this key exchange has been deleted, the key \"",
"+",
"\"exchange can not be completed.\"",
")",
"elif",
"not",
"allow_no_otpk",
":",
"raise",
"KeyExchangeException",
"(",
"\"This key exchange data does not contain an OTPK, which is not allowed.\"",
")",
"dh1",
"=",
"self",
".",
"__spk",
"[",
"\"key\"",
"]",
".",
"getSharedSecret",
"(",
"other_ik",
")",
"dh2",
"=",
"self",
".",
"__ik",
".",
"getSharedSecret",
"(",
"other_ek",
")",
"dh3",
"=",
"self",
".",
"__spk",
"[",
"\"key\"",
"]",
".",
"getSharedSecret",
"(",
"other_ek",
")",
"dh4",
"=",
"b\"\"",
"if",
"my_otpk",
":",
"dh4",
"=",
"my_otpk",
".",
"getSharedSecret",
"(",
"other_ek",
")",
"sk",
"=",
"self",
".",
"__kdf",
"(",
"dh1",
"+",
"dh2",
"+",
"dh3",
"+",
"dh4",
")",
"other_ik_pub_serialized",
"=",
"self",
".",
"__PublicKeyEncoder",
".",
"encodePublicKey",
"(",
"other_ik",
".",
"pub",
",",
"self",
".",
"__curve",
")",
"ik_pub_serialized",
"=",
"self",
".",
"__PublicKeyEncoder",
".",
"encodePublicKey",
"(",
"self",
".",
"__ik",
".",
"pub",
",",
"self",
".",
"__curve",
")",
"ad",
"=",
"other_ik_pub_serialized",
"+",
"ik_pub_serialized",
"if",
"my_otpk",
"and",
"not",
"keep_otpk",
":",
"self",
".",
"deleteOTPK",
"(",
"my_otpk",
".",
"pub",
")",
"return",
"{",
"\"ad\"",
":",
"ad",
",",
"\"sk\"",
":",
"sk",
"}"
] | Do the key exchange, as the passive party. This involves retrieving data about the
key exchange from the active party.
:param passive_exchange_data: A structure generated by the active party, which
contains data requried to complete the key exchange. See the "to_other" part
of the structure returned by "getSharedSecretActive".
:param allow_no_otpk: A boolean indicating whether to allow key exchange, even if
the active party did not use a one-time pre key. The recommended default is
False.
:param keep_otpk: Keep the one-time pre key after using it, instead of deleting
it. See the notes below.
:returns: A dictionary containing the shared secret and the shared associated
data.
The returned structure looks like this::
{
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
The specification of X3DH dictates to delete one-time pre keys as soon as they are
used.
This behaviour provides security but may lead to considerable usability downsides
in some environments.
For that reason the keep_otpk flag exists.
If set to True, the one-time pre key is not automatically deleted.
USE WITH CARE, THIS MAY INTRODUCE SECURITY LEAKS IF USED INCORRECTLY.
If you decide to set the flag and to keep the otpks, you have to manage deleting
them yourself, e.g. by subclassing this class and overriding this method.
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details. | [
"Do",
"the",
"key",
"exchange",
"as",
"the",
"passive",
"party",
".",
"This",
"involves",
"retrieving",
"data",
"about",
"the",
"key",
"exchange",
"from",
"the",
"active",
"party",
"."
] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L440-L545 |
kislyuk/tweak | tweak/__init__.py | Config.save | def save(self, mode=0o600):
"""
Serialize the config data to the user home directory.
:param mode: The octal Unix mode (permissions) for the config file.
"""
if self._parent is not None:
self._parent.save(mode=mode)
else:
config_dir = os.path.dirname(os.path.abspath(self.config_files[-1]))
try:
os.makedirs(config_dir)
except OSError as e:
if not (e.errno == errno.EEXIST and os.path.isdir(config_dir)):
raise
with open(self.config_files[-1], "wb" if sys.version_info < (3, 0) else "w") as fh:
self._dump(fh)
os.chmod(self.config_files[-1], mode)
self._logger.debug("Saved config to %s", self.config_files[-1]) | python | def save(self, mode=0o600):
"""
Serialize the config data to the user home directory.
:param mode: The octal Unix mode (permissions) for the config file.
"""
if self._parent is not None:
self._parent.save(mode=mode)
else:
config_dir = os.path.dirname(os.path.abspath(self.config_files[-1]))
try:
os.makedirs(config_dir)
except OSError as e:
if not (e.errno == errno.EEXIST and os.path.isdir(config_dir)):
raise
with open(self.config_files[-1], "wb" if sys.version_info < (3, 0) else "w") as fh:
self._dump(fh)
os.chmod(self.config_files[-1], mode)
self._logger.debug("Saved config to %s", self.config_files[-1]) | [
"def",
"save",
"(",
"self",
",",
"mode",
"=",
"0o600",
")",
":",
"if",
"self",
".",
"_parent",
"is",
"not",
"None",
":",
"self",
".",
"_parent",
".",
"save",
"(",
"mode",
"=",
"mode",
")",
"else",
":",
"config_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"config_files",
"[",
"-",
"1",
"]",
")",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"config_dir",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"not",
"(",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"config_dir",
")",
")",
":",
"raise",
"with",
"open",
"(",
"self",
".",
"config_files",
"[",
"-",
"1",
"]",
",",
"\"wb\"",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
"else",
"\"w\"",
")",
"as",
"fh",
":",
"self",
".",
"_dump",
"(",
"fh",
")",
"os",
".",
"chmod",
"(",
"self",
".",
"config_files",
"[",
"-",
"1",
"]",
",",
"mode",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Saved config to %s\"",
",",
"self",
".",
"config_files",
"[",
"-",
"1",
"]",
")"
] | Serialize the config data to the user home directory.
:param mode: The octal Unix mode (permissions) for the config file. | [
"Serialize",
"the",
"config",
"data",
"to",
"the",
"user",
"home",
"directory",
"."
] | train | https://github.com/kislyuk/tweak/blob/0fec5fa8a21e46713c565a996f41c8b7c6793166/tweak/__init__.py#L148-L166 |
daskol/nls | nls/model.py | Problem.model | def model(self, *args, **kwargs):
"""
Piority of Arguments: Arguments passed in `kwargs` has the most piority, 'param' key in `kwargs` has less
piority than `kwargs` and dictionary arguments in `args` have the least piority. Other arguments are ignored.
Argument List:
model - set model type, default value 'default';
dx - default value '1.0e-1';
dt - default value '1.0e-3';
t0 - default value '0.0';
u0 - default value '1.0e-1';
order - default value '5';
pumping - default value ``;
!original_params - default value `{}`;
!dimless_params - default value `{}`;
"""
if 'filename' in kwargs:
return self.modelFromFile(kwargs['filename'])
if 'params' in kwargs:
params = kwargs.pop('params')
kwargs['model'] = 'default' if 'model' not in kwargs else kwargs['model']
kwargs['original_params'] = {} if 'original_params' not in kwargs else kwargs['original_params']
if 'R' not in kwargs['original_params']:
kwargs['original_params']['R'] = 0.0242057488654
if 'gamma' not in kwargs['original_params']:
kwargs['original_params']['gamma'] = 0.0242057488654
if 'g' not in kwargs['original_params']:
kwargs['original_params']['g'] = 0.00162178517398
if 'tilde_g' not in kwargs['original_params']:
kwargs['original_params']['tilde_g'] = 0.0169440242057
if 'gamma_R' not in kwargs['original_params']:
kwargs['original_params']['gamma_R'] = 0.242057488654
if kwargs.get('model') in ('1d', 'default', str(Model1D)):
return self.fabricateModel1D(*args, **kwargs)
elif kwargs.get('model') in ('2d', str(Model2D)):
return self.fabricateModel2D(*args, **kwargs)
else:
raise Exception('Unknown model passed!') | python | def model(self, *args, **kwargs):
"""
Piority of Arguments: Arguments passed in `kwargs` has the most piority, 'param' key in `kwargs` has less
piority than `kwargs` and dictionary arguments in `args` have the least piority. Other arguments are ignored.
Argument List:
model - set model type, default value 'default';
dx - default value '1.0e-1';
dt - default value '1.0e-3';
t0 - default value '0.0';
u0 - default value '1.0e-1';
order - default value '5';
pumping - default value ``;
!original_params - default value `{}`;
!dimless_params - default value `{}`;
"""
if 'filename' in kwargs:
return self.modelFromFile(kwargs['filename'])
if 'params' in kwargs:
params = kwargs.pop('params')
kwargs['model'] = 'default' if 'model' not in kwargs else kwargs['model']
kwargs['original_params'] = {} if 'original_params' not in kwargs else kwargs['original_params']
if 'R' not in kwargs['original_params']:
kwargs['original_params']['R'] = 0.0242057488654
if 'gamma' not in kwargs['original_params']:
kwargs['original_params']['gamma'] = 0.0242057488654
if 'g' not in kwargs['original_params']:
kwargs['original_params']['g'] = 0.00162178517398
if 'tilde_g' not in kwargs['original_params']:
kwargs['original_params']['tilde_g'] = 0.0169440242057
if 'gamma_R' not in kwargs['original_params']:
kwargs['original_params']['gamma_R'] = 0.242057488654
if kwargs.get('model') in ('1d', 'default', str(Model1D)):
return self.fabricateModel1D(*args, **kwargs)
elif kwargs.get('model') in ('2d', str(Model2D)):
return self.fabricateModel2D(*args, **kwargs)
else:
raise Exception('Unknown model passed!') | [
"def",
"model",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'filename'",
"in",
"kwargs",
":",
"return",
"self",
".",
"modelFromFile",
"(",
"kwargs",
"[",
"'filename'",
"]",
")",
"if",
"'params'",
"in",
"kwargs",
":",
"params",
"=",
"kwargs",
".",
"pop",
"(",
"'params'",
")",
"kwargs",
"[",
"'model'",
"]",
"=",
"'default'",
"if",
"'model'",
"not",
"in",
"kwargs",
"else",
"kwargs",
"[",
"'model'",
"]",
"kwargs",
"[",
"'original_params'",
"]",
"=",
"{",
"}",
"if",
"'original_params'",
"not",
"in",
"kwargs",
"else",
"kwargs",
"[",
"'original_params'",
"]",
"if",
"'R'",
"not",
"in",
"kwargs",
"[",
"'original_params'",
"]",
":",
"kwargs",
"[",
"'original_params'",
"]",
"[",
"'R'",
"]",
"=",
"0.0242057488654",
"if",
"'gamma'",
"not",
"in",
"kwargs",
"[",
"'original_params'",
"]",
":",
"kwargs",
"[",
"'original_params'",
"]",
"[",
"'gamma'",
"]",
"=",
"0.0242057488654",
"if",
"'g'",
"not",
"in",
"kwargs",
"[",
"'original_params'",
"]",
":",
"kwargs",
"[",
"'original_params'",
"]",
"[",
"'g'",
"]",
"=",
"0.00162178517398",
"if",
"'tilde_g'",
"not",
"in",
"kwargs",
"[",
"'original_params'",
"]",
":",
"kwargs",
"[",
"'original_params'",
"]",
"[",
"'tilde_g'",
"]",
"=",
"0.0169440242057",
"if",
"'gamma_R'",
"not",
"in",
"kwargs",
"[",
"'original_params'",
"]",
":",
"kwargs",
"[",
"'original_params'",
"]",
"[",
"'gamma_R'",
"]",
"=",
"0.242057488654",
"if",
"kwargs",
".",
"get",
"(",
"'model'",
")",
"in",
"(",
"'1d'",
",",
"'default'",
",",
"str",
"(",
"Model1D",
")",
")",
":",
"return",
"self",
".",
"fabricateModel1D",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"kwargs",
".",
"get",
"(",
"'model'",
")",
"in",
"(",
"'2d'",
",",
"str",
"(",
"Model2D",
")",
")",
":",
"return",
"self",
".",
"fabricateModel2D",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown model passed!'",
")"
] | Piority of Arguments: Arguments passed in `kwargs` has the most piority, 'param' key in `kwargs` has less
piority than `kwargs` and dictionary arguments in `args` have the least piority. Other arguments are ignored.
Argument List:
model - set model type, default value 'default';
dx - default value '1.0e-1';
dt - default value '1.0e-3';
t0 - default value '0.0';
u0 - default value '1.0e-1';
order - default value '5';
pumping - default value ``;
!original_params - default value `{}`;
!dimless_params - default value `{}`; | [
"Piority",
"of",
"Arguments",
":",
"Arguments",
"passed",
"in",
"kwargs",
"has",
"the",
"most",
"piority",
"param",
"key",
"in",
"kwargs",
"has",
"less",
"piority",
"than",
"kwargs",
"and",
"dictionary",
"arguments",
"in",
"args",
"have",
"the",
"least",
"piority",
".",
"Other",
"arguments",
"are",
"ignored",
".",
"Argument",
"List",
":",
"model",
"-",
"set",
"model",
"type",
"default",
"value",
"default",
";",
"dx",
"-",
"default",
"value",
"1",
".",
"0e",
"-",
"1",
";",
"dt",
"-",
"default",
"value",
"1",
".",
"0e",
"-",
"3",
";",
"t0",
"-",
"default",
"value",
"0",
".",
"0",
";",
"u0",
"-",
"default",
"value",
"1",
".",
"0e",
"-",
"1",
";",
"order",
"-",
"default",
"value",
"5",
";",
"pumping",
"-",
"default",
"value",
";",
"!original_params",
"-",
"default",
"value",
"{}",
";",
"!dimless_params",
"-",
"default",
"value",
"{}",
";"
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L32-L72 |
daskol/nls | nls/model.py | AbstractModel.getChemicalPotential | def getChemicalPotential(self, solution):
"""Call solver in order to calculate chemical potential.
"""
if isinstance(solution, Solution):
solution = solution.getSolution()
self.mu = self.solver.chemicalPotential(solution)
return self.mu | python | def getChemicalPotential(self, solution):
"""Call solver in order to calculate chemical potential.
"""
if isinstance(solution, Solution):
solution = solution.getSolution()
self.mu = self.solver.chemicalPotential(solution)
return self.mu | [
"def",
"getChemicalPotential",
"(",
"self",
",",
"solution",
")",
":",
"if",
"isinstance",
"(",
"solution",
",",
"Solution",
")",
":",
"solution",
"=",
"solution",
".",
"getSolution",
"(",
")",
"self",
".",
"mu",
"=",
"self",
".",
"solver",
".",
"chemicalPotential",
"(",
"solution",
")",
"return",
"self",
".",
"mu"
] | Call solver in order to calculate chemical potential. | [
"Call",
"solver",
"in",
"order",
"to",
"calculate",
"chemical",
"potential",
"."
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L196-L203 |
daskol/nls | nls/model.py | AbstractModel.store | def store(self, filename=None, label=None, desc=None, date=None):
"""Store object to mat-file. TODO: determine format specification
"""
date = date if date else datetime.now()
date = date.replace(microsecond=0).isoformat()
filename = filename if filename else date + '.mat'
matfile = {
'model': str(type(self)),
'date': date,
'dim': len(self.init_sol.shape),
'dimlesses': self.coeffs,
'init_solution': self.init_sol,
'num_iters': self.num_iters,
'num_nodes': self.num_nodes,
'order': self.order,
'originals': self.originals,
'pumping': self.getPumping(),
'spatial_step': self.dx,
'time_step': self.dt,
}
if desc:
matfile['desc'] = desc
if label:
matfile['label'] = label
savemat(filename, matfile) | python | def store(self, filename=None, label=None, desc=None, date=None):
"""Store object to mat-file. TODO: determine format specification
"""
date = date if date else datetime.now()
date = date.replace(microsecond=0).isoformat()
filename = filename if filename else date + '.mat'
matfile = {
'model': str(type(self)),
'date': date,
'dim': len(self.init_sol.shape),
'dimlesses': self.coeffs,
'init_solution': self.init_sol,
'num_iters': self.num_iters,
'num_nodes': self.num_nodes,
'order': self.order,
'originals': self.originals,
'pumping': self.getPumping(),
'spatial_step': self.dx,
'time_step': self.dt,
}
if desc:
matfile['desc'] = desc
if label:
matfile['label'] = label
savemat(filename, matfile) | [
"def",
"store",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"label",
"=",
"None",
",",
"desc",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"date",
"=",
"date",
"if",
"date",
"else",
"datetime",
".",
"now",
"(",
")",
"date",
"=",
"date",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
".",
"isoformat",
"(",
")",
"filename",
"=",
"filename",
"if",
"filename",
"else",
"date",
"+",
"'.mat'",
"matfile",
"=",
"{",
"'model'",
":",
"str",
"(",
"type",
"(",
"self",
")",
")",
",",
"'date'",
":",
"date",
",",
"'dim'",
":",
"len",
"(",
"self",
".",
"init_sol",
".",
"shape",
")",
",",
"'dimlesses'",
":",
"self",
".",
"coeffs",
",",
"'init_solution'",
":",
"self",
".",
"init_sol",
",",
"'num_iters'",
":",
"self",
".",
"num_iters",
",",
"'num_nodes'",
":",
"self",
".",
"num_nodes",
",",
"'order'",
":",
"self",
".",
"order",
",",
"'originals'",
":",
"self",
".",
"originals",
",",
"'pumping'",
":",
"self",
".",
"getPumping",
"(",
")",
",",
"'spatial_step'",
":",
"self",
".",
"dx",
",",
"'time_step'",
":",
"self",
".",
"dt",
",",
"}",
"if",
"desc",
":",
"matfile",
"[",
"'desc'",
"]",
"=",
"desc",
"if",
"label",
":",
"matfile",
"[",
"'label'",
"]",
"=",
"label",
"savemat",
"(",
"filename",
",",
"matfile",
")"
] | Store object to mat-file. TODO: determine format specification | [
"Store",
"object",
"to",
"mat",
"-",
"file",
".",
"TODO",
":",
"determine",
"format",
"specification"
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L257-L284 |
daskol/nls | nls/model.py | AbstractModel.restore | def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
matfile['originals'] = matfile['originals'][0, 0]
if matfile['dim'] == 1:
matfile['init_solution'] = matfile['init_solution'][0, :]
matfile['pumping'] = matfile['pumping'][0, :]
self.coeffs = matfile['dimlesses'][0, :]
self.init_sol = matfile['init_solution']
self.num_nodes = matfile['num_nodes'][0, 0]
self.num_iters = matfile['num_iters'][0, 0]
self.pumping = GridPumping(matfile['pumping'])
self.dx = matfile['spatial_step'][0, 0]
self.dt = matfile['time_step'][0, 0]
types = matfile['originals'].dtype
values = matfile['originals']
self.originals = dict(zip(types.names, (value[0, 0] for value in values)))
if 'desc' in matfile:
self.desc = str(matfile['desc'][0])
if 'label' in matfile:
self.label = str(matfile['label'][0])
return self | python | def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
matfile['originals'] = matfile['originals'][0, 0]
if matfile['dim'] == 1:
matfile['init_solution'] = matfile['init_solution'][0, :]
matfile['pumping'] = matfile['pumping'][0, :]
self.coeffs = matfile['dimlesses'][0, :]
self.init_sol = matfile['init_solution']
self.num_nodes = matfile['num_nodes'][0, 0]
self.num_iters = matfile['num_iters'][0, 0]
self.pumping = GridPumping(matfile['pumping'])
self.dx = matfile['spatial_step'][0, 0]
self.dt = matfile['time_step'][0, 0]
types = matfile['originals'].dtype
values = matfile['originals']
self.originals = dict(zip(types.names, (value[0, 0] for value in values)))
if 'desc' in matfile:
self.desc = str(matfile['desc'][0])
if 'label' in matfile:
self.label = str(matfile['label'][0])
return self | [
"def",
"restore",
"(",
"self",
",",
"filename",
")",
":",
"matfile",
"=",
"loadmat",
"(",
"filename",
")",
"matfile",
"[",
"'originals'",
"]",
"=",
"matfile",
"[",
"'originals'",
"]",
"[",
"0",
",",
"0",
"]",
"if",
"matfile",
"[",
"'dim'",
"]",
"==",
"1",
":",
"matfile",
"[",
"'init_solution'",
"]",
"=",
"matfile",
"[",
"'init_solution'",
"]",
"[",
"0",
",",
":",
"]",
"matfile",
"[",
"'pumping'",
"]",
"=",
"matfile",
"[",
"'pumping'",
"]",
"[",
"0",
",",
":",
"]",
"self",
".",
"coeffs",
"=",
"matfile",
"[",
"'dimlesses'",
"]",
"[",
"0",
",",
":",
"]",
"self",
".",
"init_sol",
"=",
"matfile",
"[",
"'init_solution'",
"]",
"self",
".",
"num_nodes",
"=",
"matfile",
"[",
"'num_nodes'",
"]",
"[",
"0",
",",
"0",
"]",
"self",
".",
"num_iters",
"=",
"matfile",
"[",
"'num_iters'",
"]",
"[",
"0",
",",
"0",
"]",
"self",
".",
"pumping",
"=",
"GridPumping",
"(",
"matfile",
"[",
"'pumping'",
"]",
")",
"self",
".",
"dx",
"=",
"matfile",
"[",
"'spatial_step'",
"]",
"[",
"0",
",",
"0",
"]",
"self",
".",
"dt",
"=",
"matfile",
"[",
"'time_step'",
"]",
"[",
"0",
",",
"0",
"]",
"types",
"=",
"matfile",
"[",
"'originals'",
"]",
".",
"dtype",
"values",
"=",
"matfile",
"[",
"'originals'",
"]",
"self",
".",
"originals",
"=",
"dict",
"(",
"zip",
"(",
"types",
".",
"names",
",",
"(",
"value",
"[",
"0",
",",
"0",
"]",
"for",
"value",
"in",
"values",
")",
")",
")",
"if",
"'desc'",
"in",
"matfile",
":",
"self",
".",
"desc",
"=",
"str",
"(",
"matfile",
"[",
"'desc'",
"]",
"[",
"0",
"]",
")",
"if",
"'label'",
"in",
"matfile",
":",
"self",
".",
"label",
"=",
"str",
"(",
"matfile",
"[",
"'label'",
"]",
"[",
"0",
"]",
")",
"return",
"self"
] | Restore object from mat-file. TODO: determine format specification | [
"Restore",
"object",
"from",
"mat",
"-",
"file",
".",
"TODO",
":",
"determine",
"format",
"specification"
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L286-L314 |
daskol/nls | nls/model.py | Solution.getDampingIntegral | def getDampingIntegral(self):
"""Calculate integral of damping terms of hamiltonian using rectangular method.
"""
reservoir = self.getReservoir()
density = self.getDensity()
length = self.model.getSpatialStep()
if self.solution.ndim == 1:
nodes = self.model.getNumberOfNodes()
radius = linspace(0, nodes * self.model.getSpatialStep(), nodes)
integral = 2 * pi * sum((reservoir - 1.0) * density * radius * length)
elif self.solution.ndim == 2:
area = length ** 2
integral = sum(sum((reservoir - 1.0) * density * area))
return integral | python | def getDampingIntegral(self):
"""Calculate integral of damping terms of hamiltonian using rectangular method.
"""
reservoir = self.getReservoir()
density = self.getDensity()
length = self.model.getSpatialStep()
if self.solution.ndim == 1:
nodes = self.model.getNumberOfNodes()
radius = linspace(0, nodes * self.model.getSpatialStep(), nodes)
integral = 2 * pi * sum((reservoir - 1.0) * density * radius * length)
elif self.solution.ndim == 2:
area = length ** 2
integral = sum(sum((reservoir - 1.0) * density * area))
return integral | [
"def",
"getDampingIntegral",
"(",
"self",
")",
":",
"reservoir",
"=",
"self",
".",
"getReservoir",
"(",
")",
"density",
"=",
"self",
".",
"getDensity",
"(",
")",
"length",
"=",
"self",
".",
"model",
".",
"getSpatialStep",
"(",
")",
"if",
"self",
".",
"solution",
".",
"ndim",
"==",
"1",
":",
"nodes",
"=",
"self",
".",
"model",
".",
"getNumberOfNodes",
"(",
")",
"radius",
"=",
"linspace",
"(",
"0",
",",
"nodes",
"*",
"self",
".",
"model",
".",
"getSpatialStep",
"(",
")",
",",
"nodes",
")",
"integral",
"=",
"2",
"*",
"pi",
"*",
"sum",
"(",
"(",
"reservoir",
"-",
"1.0",
")",
"*",
"density",
"*",
"radius",
"*",
"length",
")",
"elif",
"self",
".",
"solution",
".",
"ndim",
"==",
"2",
":",
"area",
"=",
"length",
"**",
"2",
"integral",
"=",
"sum",
"(",
"sum",
"(",
"(",
"reservoir",
"-",
"1.0",
")",
"*",
"density",
"*",
"area",
")",
")",
"return",
"integral"
] | Calculate integral of damping terms of hamiltonian using rectangular method. | [
"Calculate",
"integral",
"of",
"damping",
"terms",
"of",
"hamiltonian",
"using",
"rectangular",
"method",
"."
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L350-L365 |
daskol/nls | nls/model.py | Solution.store | def store(self, filename=None, label=None, desc=None, date=None):
"""Store object to mat-file. TODO: determine format specification
"""
date = datetime.now() if date is None else date
filename = filename if filename else date.replace(microsecond=0).isoformat() + '.mat'
def storeWithFileLikeObject(file_like):
content = {
'elapsed_time': self.elapsed_time,
'solution': self.solution,
'version': version(),
}
self.model.store(file_like, label, desc, date)
savemat(file_like, content, appendmat=True)
if isinstance(filename, file):
storeWithFileLikeObject(filename)
else:
with open(filename, 'wb') as f:
storeWithFileLikeObject(f) | python | def store(self, filename=None, label=None, desc=None, date=None):
"""Store object to mat-file. TODO: determine format specification
"""
date = datetime.now() if date is None else date
filename = filename if filename else date.replace(microsecond=0).isoformat() + '.mat'
def storeWithFileLikeObject(file_like):
content = {
'elapsed_time': self.elapsed_time,
'solution': self.solution,
'version': version(),
}
self.model.store(file_like, label, desc, date)
savemat(file_like, content, appendmat=True)
if isinstance(filename, file):
storeWithFileLikeObject(filename)
else:
with open(filename, 'wb') as f:
storeWithFileLikeObject(f) | [
"def",
"store",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"label",
"=",
"None",
",",
"desc",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"date",
"=",
"datetime",
".",
"now",
"(",
")",
"if",
"date",
"is",
"None",
"else",
"date",
"filename",
"=",
"filename",
"if",
"filename",
"else",
"date",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
".",
"isoformat",
"(",
")",
"+",
"'.mat'",
"def",
"storeWithFileLikeObject",
"(",
"file_like",
")",
":",
"content",
"=",
"{",
"'elapsed_time'",
":",
"self",
".",
"elapsed_time",
",",
"'solution'",
":",
"self",
".",
"solution",
",",
"'version'",
":",
"version",
"(",
")",
",",
"}",
"self",
".",
"model",
".",
"store",
"(",
"file_like",
",",
"label",
",",
"desc",
",",
"date",
")",
"savemat",
"(",
"file_like",
",",
"content",
",",
"appendmat",
"=",
"True",
")",
"if",
"isinstance",
"(",
"filename",
",",
"file",
")",
":",
"storeWithFileLikeObject",
"(",
"filename",
")",
"else",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"storeWithFileLikeObject",
"(",
"f",
")"
] | Store object to mat-file. TODO: determine format specification | [
"Store",
"object",
"to",
"mat",
"-",
"file",
".",
"TODO",
":",
"determine",
"format",
"specification"
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L500-L519 |
daskol/nls | nls/model.py | Solution.restore | def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
if matfile['dim'] == 1:
matfile['solution'] = matfile['solution'][0, :]
self.elapsed_time = matfile['elapsed_time'][0, 0]
self.solution = matfile['solution']
return self | python | def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
if matfile['dim'] == 1:
matfile['solution'] = matfile['solution'][0, :]
self.elapsed_time = matfile['elapsed_time'][0, 0]
self.solution = matfile['solution']
return self | [
"def",
"restore",
"(",
"self",
",",
"filename",
")",
":",
"matfile",
"=",
"loadmat",
"(",
"filename",
")",
"if",
"matfile",
"[",
"'dim'",
"]",
"==",
"1",
":",
"matfile",
"[",
"'solution'",
"]",
"=",
"matfile",
"[",
"'solution'",
"]",
"[",
"0",
",",
":",
"]",
"self",
".",
"elapsed_time",
"=",
"matfile",
"[",
"'elapsed_time'",
"]",
"[",
"0",
",",
"0",
"]",
"self",
".",
"solution",
"=",
"matfile",
"[",
"'solution'",
"]",
"return",
"self"
] | Restore object from mat-file. TODO: determine format specification | [
"Restore",
"object",
"from",
"mat",
"-",
"file",
".",
"TODO",
":",
"determine",
"format",
"specification"
] | train | https://github.com/daskol/nls/blob/00bb4555e4f56e222dc6f54faf2e286567519626/nls/model.py#L521-L532 |
fudge-py/fudge | fudge/util.py | fmt_val | def fmt_val(val, shorten=True):
"""Format a value for inclusion in an
informative text string.
"""
val = repr(val)
max = 50
if shorten:
if len(val) > max:
close = val[-1]
val = val[0:max-4] + "..."
if close in (">", "'", '"', ']', '}', ')'):
val = val + close
return val | python | def fmt_val(val, shorten=True):
"""Format a value for inclusion in an
informative text string.
"""
val = repr(val)
max = 50
if shorten:
if len(val) > max:
close = val[-1]
val = val[0:max-4] + "..."
if close in (">", "'", '"', ']', '}', ')'):
val = val + close
return val | [
"def",
"fmt_val",
"(",
"val",
",",
"shorten",
"=",
"True",
")",
":",
"val",
"=",
"repr",
"(",
"val",
")",
"max",
"=",
"50",
"if",
"shorten",
":",
"if",
"len",
"(",
"val",
")",
">",
"max",
":",
"close",
"=",
"val",
"[",
"-",
"1",
"]",
"val",
"=",
"val",
"[",
"0",
":",
"max",
"-",
"4",
"]",
"+",
"\"...\"",
"if",
"close",
"in",
"(",
"\">\"",
",",
"\"'\"",
",",
"'\"'",
",",
"']'",
",",
"'}'",
",",
"')'",
")",
":",
"val",
"=",
"val",
"+",
"close",
"return",
"val"
] | Format a value for inclusion in an
informative text string. | [
"Format",
"a",
"value",
"for",
"inclusion",
"in",
"an",
"informative",
"text",
"string",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/util.py#L15-L27 |
fudge-py/fudge | fudge/util.py | fmt_dict_vals | def fmt_dict_vals(dict_vals, shorten=True):
"""Returns list of key=val pairs formatted
for inclusion in an informative text string.
"""
items = dict_vals.items()
if not items:
return [fmt_val(None, shorten=shorten)]
return ["%s=%s" % (k, fmt_val(v, shorten=shorten)) for k,v in items] | python | def fmt_dict_vals(dict_vals, shorten=True):
"""Returns list of key=val pairs formatted
for inclusion in an informative text string.
"""
items = dict_vals.items()
if not items:
return [fmt_val(None, shorten=shorten)]
return ["%s=%s" % (k, fmt_val(v, shorten=shorten)) for k,v in items] | [
"def",
"fmt_dict_vals",
"(",
"dict_vals",
",",
"shorten",
"=",
"True",
")",
":",
"items",
"=",
"dict_vals",
".",
"items",
"(",
")",
"if",
"not",
"items",
":",
"return",
"[",
"fmt_val",
"(",
"None",
",",
"shorten",
"=",
"shorten",
")",
"]",
"return",
"[",
"\"%s=%s\"",
"%",
"(",
"k",
",",
"fmt_val",
"(",
"v",
",",
"shorten",
"=",
"shorten",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
"]"
] | Returns list of key=val pairs formatted
for inclusion in an informative text string. | [
"Returns",
"list",
"of",
"key",
"=",
"val",
"pairs",
"formatted",
"for",
"inclusion",
"in",
"an",
"informative",
"text",
"string",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/util.py#L29-L36 |
Clinical-Genomics/trailblazer | trailblazer/mip/start.py | MipCli.build_command | def build_command(self, config, **kwargs):
"""Builds the command to execute MIP."""
command = ['perl', self.script, CLI_OPTIONS['config']['option'], config]
for key, value in kwargs.items():
# enable passing in flags as "False" - shouldn't add command
if value:
command.append(CLI_OPTIONS[key]['option'])
if value is True:
command.append(CLI_OPTIONS[key].get('default', '1'))
else:
command.append(value)
return command | python | def build_command(self, config, **kwargs):
"""Builds the command to execute MIP."""
command = ['perl', self.script, CLI_OPTIONS['config']['option'], config]
for key, value in kwargs.items():
# enable passing in flags as "False" - shouldn't add command
if value:
command.append(CLI_OPTIONS[key]['option'])
if value is True:
command.append(CLI_OPTIONS[key].get('default', '1'))
else:
command.append(value)
return command | [
"def",
"build_command",
"(",
"self",
",",
"config",
",",
"*",
"*",
"kwargs",
")",
":",
"command",
"=",
"[",
"'perl'",
",",
"self",
".",
"script",
",",
"CLI_OPTIONS",
"[",
"'config'",
"]",
"[",
"'option'",
"]",
",",
"config",
"]",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"# enable passing in flags as \"False\" - shouldn't add command",
"if",
"value",
":",
"command",
".",
"append",
"(",
"CLI_OPTIONS",
"[",
"key",
"]",
"[",
"'option'",
"]",
")",
"if",
"value",
"is",
"True",
":",
"command",
".",
"append",
"(",
"CLI_OPTIONS",
"[",
"key",
"]",
".",
"get",
"(",
"'default'",
",",
"'1'",
")",
")",
"else",
":",
"command",
".",
"append",
"(",
"value",
")",
"return",
"command"
] | Builds the command to execute MIP. | [
"Builds",
"the",
"command",
"to",
"execute",
"MIP",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/start.py#L48-L59 |
Clinical-Genomics/trailblazer | trailblazer/mip/start.py | MipCli.execute | def execute(self, command):
"""Start a new MIP run."""
process = subprocess.Popen(
command,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
)
return process | python | def execute(self, command):
"""Start a new MIP run."""
process = subprocess.Popen(
command,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
)
return process | [
"def",
"execute",
"(",
"self",
",",
"command",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"preexec_fn",
"=",
"lambda",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGPIPE",
",",
"signal",
".",
"SIG_DFL",
")",
")",
"return",
"process"
] | Start a new MIP run. | [
"Start",
"a",
"new",
"MIP",
"run",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/start.py#L63-L69 |
thespacedoctor/sherlock | sherlock/commonutils/update_wiki_pages.py | update_wiki_pages.update | def update(self):
"""
Update wiki pages
See class docstring for usage
"""
self.log.debug('starting the ``update`` method')
if "sherlock wiki root" not in self.settings:
print "Sherlock wiki settings not found in settings file"
return
staticTableInfo = self._get_table_infos()
viewInfo = self._get_view_infos()
streamedTableInfo = self._get_stream_view_infos()
self._create_md_tables(
tableData=staticTableInfo,
viewData=viewInfo,
streamData=streamedTableInfo
)
self._write_wiki_pages()
self._update_github()
self.log.debug('completed the ``update`` method')
return | python | def update(self):
"""
Update wiki pages
See class docstring for usage
"""
self.log.debug('starting the ``update`` method')
if "sherlock wiki root" not in self.settings:
print "Sherlock wiki settings not found in settings file"
return
staticTableInfo = self._get_table_infos()
viewInfo = self._get_view_infos()
streamedTableInfo = self._get_stream_view_infos()
self._create_md_tables(
tableData=staticTableInfo,
viewData=viewInfo,
streamData=streamedTableInfo
)
self._write_wiki_pages()
self._update_github()
self.log.debug('completed the ``update`` method')
return | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``update`` method'",
")",
"if",
"\"sherlock wiki root\"",
"not",
"in",
"self",
".",
"settings",
":",
"print",
"\"Sherlock wiki settings not found in settings file\"",
"return",
"staticTableInfo",
"=",
"self",
".",
"_get_table_infos",
"(",
")",
"viewInfo",
"=",
"self",
".",
"_get_view_infos",
"(",
")",
"streamedTableInfo",
"=",
"self",
".",
"_get_stream_view_infos",
"(",
")",
"self",
".",
"_create_md_tables",
"(",
"tableData",
"=",
"staticTableInfo",
",",
"viewData",
"=",
"viewInfo",
",",
"streamData",
"=",
"streamedTableInfo",
")",
"self",
".",
"_write_wiki_pages",
"(",
")",
"self",
".",
"_update_github",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``update`` method'",
")",
"return"
] | Update wiki pages
See class docstring for usage | [
"Update",
"wiki",
"pages"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/update_wiki_pages.py#L91-L115 |
thespacedoctor/sherlock | sherlock/commonutils/update_wiki_pages.py | update_wiki_pages._get_table_infos | def _get_table_infos(
self,
trimmed=False):
"""query the sherlock-catalogues database table metadata
"""
self.log.debug('starting the ``_get_table_infos`` method')
sqlQuery = u"""
SELECT * FROM crossmatch_catalogues.tcs_helper_catalogue_tables_info where legacy_table = 0 and table_name not like "legacy%%" and table_name not like "%%stream" order by number_of_rows desc;
""" % locals()
tableInfo = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
if trimmed:
cleanTable = []
for r in tableInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c]
cleanTable.append(orow)
tableInfo = cleanTable
self.log.debug('completed the ``_get_table_infos`` method')
return tableInfo | python | def _get_table_infos(
self,
trimmed=False):
"""query the sherlock-catalogues database table metadata
"""
self.log.debug('starting the ``_get_table_infos`` method')
sqlQuery = u"""
SELECT * FROM crossmatch_catalogues.tcs_helper_catalogue_tables_info where legacy_table = 0 and table_name not like "legacy%%" and table_name not like "%%stream" order by number_of_rows desc;
""" % locals()
tableInfo = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
if trimmed:
cleanTable = []
for r in tableInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c]
cleanTable.append(orow)
tableInfo = cleanTable
self.log.debug('completed the ``_get_table_infos`` method')
return tableInfo | [
"def",
"_get_table_infos",
"(",
"self",
",",
"trimmed",
"=",
"False",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_get_table_infos`` method'",
")",
"sqlQuery",
"=",
"u\"\"\"\n SELECT * FROM crossmatch_catalogues.tcs_helper_catalogue_tables_info where legacy_table = 0 and table_name not like \"legacy%%\" and table_name not like \"%%stream\" order by number_of_rows desc;\n \"\"\"",
"%",
"locals",
"(",
")",
"tableInfo",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"if",
"trimmed",
":",
"cleanTable",
"=",
"[",
"]",
"for",
"r",
"in",
"tableInfo",
":",
"orow",
"=",
"collections",
".",
"OrderedDict",
"(",
"sorted",
"(",
"{",
"}",
".",
"items",
"(",
")",
")",
")",
"for",
"c",
"in",
"self",
".",
"basicColumns",
":",
"if",
"c",
"in",
"r",
":",
"orow",
"[",
"c",
"]",
"=",
"r",
"[",
"c",
"]",
"cleanTable",
".",
"append",
"(",
"orow",
")",
"tableInfo",
"=",
"cleanTable",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_get_table_infos`` method'",
")",
"return",
"tableInfo"
] | query the sherlock-catalogues database table metadata | [
"query",
"the",
"sherlock",
"-",
"catalogues",
"database",
"table",
"metadata"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/update_wiki_pages.py#L117-L145 |
thespacedoctor/sherlock | sherlock/commonutils/update_wiki_pages.py | update_wiki_pages._get_view_infos | def _get_view_infos(
self,
trimmed=False):
"""query the sherlock-catalogues database view metadata
"""
self.log.debug('starting the ``_get_view_infos`` method')
sqlQuery = u"""
SELECT v.*, t.description as "master table" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like "legacy%%" and t.id=v.table_id order by number_of_rows desc
""" % locals()
viewInfo = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
if trimmed:
cleanTable = []
for r in viewInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c]
cleanTable.append(orow)
viewInfo = cleanTable
self.log.debug('completed the ``_get_view_infos`` method')
return viewInfo | python | def _get_view_infos(
self,
trimmed=False):
"""query the sherlock-catalogues database view metadata
"""
self.log.debug('starting the ``_get_view_infos`` method')
sqlQuery = u"""
SELECT v.*, t.description as "master table" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like "legacy%%" and t.id=v.table_id order by number_of_rows desc
""" % locals()
viewInfo = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
if trimmed:
cleanTable = []
for r in viewInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c]
cleanTable.append(orow)
viewInfo = cleanTable
self.log.debug('completed the ``_get_view_infos`` method')
return viewInfo | [
"def",
"_get_view_infos",
"(",
"self",
",",
"trimmed",
"=",
"False",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_get_view_infos`` method'",
")",
"sqlQuery",
"=",
"u\"\"\"\n SELECT v.*, t.description as \"master table\" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like \"legacy%%\" and t.id=v.table_id order by number_of_rows desc\n \"\"\"",
"%",
"locals",
"(",
")",
"viewInfo",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"if",
"trimmed",
":",
"cleanTable",
"=",
"[",
"]",
"for",
"r",
"in",
"viewInfo",
":",
"orow",
"=",
"collections",
".",
"OrderedDict",
"(",
"sorted",
"(",
"{",
"}",
".",
"items",
"(",
")",
")",
")",
"for",
"c",
"in",
"self",
".",
"basicColumns",
":",
"if",
"c",
"in",
"r",
":",
"orow",
"[",
"c",
"]",
"=",
"r",
"[",
"c",
"]",
"cleanTable",
".",
"append",
"(",
"orow",
")",
"viewInfo",
"=",
"cleanTable",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_get_view_infos`` method'",
")",
"return",
"viewInfo"
] | query the sherlock-catalogues database view metadata | [
"query",
"the",
"sherlock",
"-",
"catalogues",
"database",
"view",
"metadata"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/update_wiki_pages.py#L147-L175 |
thespacedoctor/sherlock | sherlock/commonutils/update_wiki_pages.py | update_wiki_pages._get_stream_view_infos | def _get_stream_view_infos(
self,
trimmed=False):
"""query the sherlock-catalogues database streamed data tables' metadata
"""
self.log.debug('starting the ``_get_stream_view_infos`` method')
sqlQuery = u"""
SELECT * FROM crossmatch_catalogues.tcs_helper_catalogue_tables_info where legacy_table = 0 and table_name not like "legacy%%" and table_name like "%%stream" order by number_of_rows desc;
""" % locals()
streamInfo = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
if trimmed:
cleanTable = []
for r in streamInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c]
cleanTable.append(orow)
streamInfo = cleanTable
self.log.debug('completed the ``_get_stream_view_infos`` method')
return streamInfo | python | def _get_stream_view_infos(
self,
trimmed=False):
"""query the sherlock-catalogues database streamed data tables' metadata
"""
self.log.debug('starting the ``_get_stream_view_infos`` method')
sqlQuery = u"""
SELECT * FROM crossmatch_catalogues.tcs_helper_catalogue_tables_info where legacy_table = 0 and table_name not like "legacy%%" and table_name like "%%stream" order by number_of_rows desc;
""" % locals()
streamInfo = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
if trimmed:
cleanTable = []
for r in streamInfo:
orow = collections.OrderedDict(sorted({}.items()))
for c in self.basicColumns:
if c in r:
orow[c] = r[c]
cleanTable.append(orow)
streamInfo = cleanTable
self.log.debug('completed the ``_get_stream_view_infos`` method')
return streamInfo | [
"def",
"_get_stream_view_infos",
"(",
"self",
",",
"trimmed",
"=",
"False",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_get_stream_view_infos`` method'",
")",
"sqlQuery",
"=",
"u\"\"\"\n SELECT * FROM crossmatch_catalogues.tcs_helper_catalogue_tables_info where legacy_table = 0 and table_name not like \"legacy%%\" and table_name like \"%%stream\" order by number_of_rows desc;\n \"\"\"",
"%",
"locals",
"(",
")",
"streamInfo",
"=",
"readquery",
"(",
"log",
"=",
"self",
".",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"self",
".",
"cataloguesDbConn",
",",
"quiet",
"=",
"False",
")",
"if",
"trimmed",
":",
"cleanTable",
"=",
"[",
"]",
"for",
"r",
"in",
"streamInfo",
":",
"orow",
"=",
"collections",
".",
"OrderedDict",
"(",
"sorted",
"(",
"{",
"}",
".",
"items",
"(",
")",
")",
")",
"for",
"c",
"in",
"self",
".",
"basicColumns",
":",
"if",
"c",
"in",
"r",
":",
"orow",
"[",
"c",
"]",
"=",
"r",
"[",
"c",
"]",
"cleanTable",
".",
"append",
"(",
"orow",
")",
"streamInfo",
"=",
"cleanTable",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_get_stream_view_infos`` method'",
")",
"return",
"streamInfo"
] | query the sherlock-catalogues database streamed data tables' metadata | [
"query",
"the",
"sherlock",
"-",
"catalogues",
"database",
"streamed",
"data",
"tables",
"metadata"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/update_wiki_pages.py#L177-L205 |
thespacedoctor/sherlock | sherlock/commonutils/update_wiki_pages.py | update_wiki_pages._create_md_tables | def _create_md_tables(
self,
tableData,
viewData,
streamData
):
"""generate markdown format tables from the database query results
**Key Arguments:**
- ``tableData`` -- the sherlock-catalogues database table metadata.
- ``viewData`` -- the sherlock-catalogues database view metadata.
- ``streamData`` -- the sherlock-catalogues database streamed data tables' metadata.
**Return:**
- None
"""
self.log.debug('starting the ``_create_md_tables`` method')
header = u"""
| <sub>Table Name</sub> | <sub>Description</sub> | <sub>Reference</sub> | <sub>Number Rows</sub> | <sub>Vizier</sub> | <sub>NED</sub> | <sub>Objects</sub> | <sub>Weight (1-10)</sub> |
| :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- |"""
rows = u""
for ti in tableData:
table_name = ti["table_name"]
description = ti["description"]
url = ti["url"]
number_of_rows = ti["number_of_rows"]
reference_url = ti["reference_url"]
reference_text = ti["reference_text"]
notes = ti["notes"]
vizier_link = ti["vizier_link"]
in_ned = ti["in_ned"]
object_types = ti["object_types"]
version_number = ti["version_number"]
last_updated = ti["last_updated"]
legacy_table = ti["legacy_table"]
old_table_name = ti["old_table_name"]
weight = ti["object_type_accuracy"]
number_of_rows = str(number_of_rows)
thisLen = len(number_of_rows)
newNumber = ""
count = 0
while count < thisLen:
count += 1
newNumber = number_of_rows[-count] + newNumber
if count % 3 == 0:
newNumber = "," + newNumber
if newNumber[0] == ",":
newNumber = newNumber[1:]
if vizier_link and len(vizier_link) and vizier_link != 0 and vizier_link != "0":
vizier_link = u"[✓](%(vizier_link)s)" % locals()
else:
vizier_link = u""
if in_ned:
in_ned = u"✓"
else:
in_ned = u""
rows += u"""
| <sub>%(table_name)s</sub> | <sub>[%(description)s](%(url)s)</sub> | <sub>[%(reference_text)s](%(reference_url)s)</sub> | <sub>%(newNumber)s</sub> | <sub>%(vizier_link)s</sub> | <sub>%(in_ned)s</sub> | <sub>%(object_types)s</sub> | <sub>%(weight)s</sub> |""" % locals()
self.mdTables = header + rows
header = u"""
| <sub>View Name</sub> | <sub>Number Rows</sub> | <sub>Object Type</sub> |
| :--- | :--- | :--- |"""
rows = u""
for ti in viewData:
view_name = ti["view_name"]
number_of_rows = ti["number_of_rows"]
object_type = ti["object_type"]
number_of_rows = str(number_of_rows)
thisLen = len(number_of_rows)
newNumber = ""
count = 0
while count < thisLen:
count += 1
newNumber = number_of_rows[-count] + newNumber
if count % 3 == 0:
newNumber = "," + newNumber
if newNumber[0] == ",":
newNumber = newNumber[1:]
rows += u"""
| <sub>%(view_name)s</sub> | <sub>%(newNumber)s</sub> | <sub>%(object_type)s</sub> |""" % locals()
self.mdViews = header + rows
header = u"""
| <sub>Table Name</sub> | <sub>Description</sub> | <sub>Reference</sub> | <sub>Number Rows</sub> | <sub>Objects</sub> |
| :--- | :--- | :--- | :--- | :--- | """
rows = u""
for ti in streamData:
table_name = ti["table_name"]
description = ti["description"]
url = ti["url"]
number_of_rows = ti["number_of_rows"]
reference_url = ti["reference_url"]
reference_text = ti["reference_text"]
notes = ti["notes"]
vizier_link = ti["vizier_link"]
in_ned = ti["in_ned"]
object_types = ti["object_types"]
version_number = ti["version_number"]
last_updated = ti["last_updated"]
legacy_table = ti["legacy_table"]
old_table_name = ti["old_table_name"]
number_of_rows = str(number_of_rows)
thisLen = len(number_of_rows)
newNumber = ""
count = 0
while count < thisLen:
count += 1
newNumber = number_of_rows[-count] + newNumber
if count % 3 == 0:
newNumber = "," + newNumber
if newNumber[0] == ",":
newNumber = newNumber[1:]
if len(vizier_link) and vizier_link != 0 and vizier_link != "0":
vizier_link = u"[✓](%(vizier_link)s)" % locals()
else:
vizier_link = u""
if in_ned:
in_ned = u"✓"
else:
in_ned = u""
rows += u"""
| <sub>%(table_name)s</sub> | <sub>[%(description)s](%(url)s)</sub> | <sub>[%(reference_text)s](%(reference_url)s)</sub> | <sub>%(newNumber)s</sub> | <sub>%(object_types)s</sub> |""" % locals()
self.mdStreams = header + rows
self.log.debug('completed the ``_create_md_tables`` method')
return | python | def _create_md_tables(
self,
tableData,
viewData,
streamData
):
"""generate markdown format tables from the database query results
**Key Arguments:**
- ``tableData`` -- the sherlock-catalogues database table metadata.
- ``viewData`` -- the sherlock-catalogues database view metadata.
- ``streamData`` -- the sherlock-catalogues database streamed data tables' metadata.
**Return:**
- None
"""
self.log.debug('starting the ``_create_md_tables`` method')
header = u"""
| <sub>Table Name</sub> | <sub>Description</sub> | <sub>Reference</sub> | <sub>Number Rows</sub> | <sub>Vizier</sub> | <sub>NED</sub> | <sub>Objects</sub> | <sub>Weight (1-10)</sub> |
| :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- |"""
rows = u""
for ti in tableData:
table_name = ti["table_name"]
description = ti["description"]
url = ti["url"]
number_of_rows = ti["number_of_rows"]
reference_url = ti["reference_url"]
reference_text = ti["reference_text"]
notes = ti["notes"]
vizier_link = ti["vizier_link"]
in_ned = ti["in_ned"]
object_types = ti["object_types"]
version_number = ti["version_number"]
last_updated = ti["last_updated"]
legacy_table = ti["legacy_table"]
old_table_name = ti["old_table_name"]
weight = ti["object_type_accuracy"]
number_of_rows = str(number_of_rows)
thisLen = len(number_of_rows)
newNumber = ""
count = 0
while count < thisLen:
count += 1
newNumber = number_of_rows[-count] + newNumber
if count % 3 == 0:
newNumber = "," + newNumber
if newNumber[0] == ",":
newNumber = newNumber[1:]
if vizier_link and len(vizier_link) and vizier_link != 0 and vizier_link != "0":
vizier_link = u"[✓](%(vizier_link)s)" % locals()
else:
vizier_link = u""
if in_ned:
in_ned = u"✓"
else:
in_ned = u""
rows += u"""
| <sub>%(table_name)s</sub> | <sub>[%(description)s](%(url)s)</sub> | <sub>[%(reference_text)s](%(reference_url)s)</sub> | <sub>%(newNumber)s</sub> | <sub>%(vizier_link)s</sub> | <sub>%(in_ned)s</sub> | <sub>%(object_types)s</sub> | <sub>%(weight)s</sub> |""" % locals()
self.mdTables = header + rows
header = u"""
| <sub>View Name</sub> | <sub>Number Rows</sub> | <sub>Object Type</sub> |
| :--- | :--- | :--- |"""
rows = u""
for ti in viewData:
view_name = ti["view_name"]
number_of_rows = ti["number_of_rows"]
object_type = ti["object_type"]
number_of_rows = str(number_of_rows)
thisLen = len(number_of_rows)
newNumber = ""
count = 0
while count < thisLen:
count += 1
newNumber = number_of_rows[-count] + newNumber
if count % 3 == 0:
newNumber = "," + newNumber
if newNumber[0] == ",":
newNumber = newNumber[1:]
rows += u"""
| <sub>%(view_name)s</sub> | <sub>%(newNumber)s</sub> | <sub>%(object_type)s</sub> |""" % locals()
self.mdViews = header + rows
header = u"""
| <sub>Table Name</sub> | <sub>Description</sub> | <sub>Reference</sub> | <sub>Number Rows</sub> | <sub>Objects</sub> |
| :--- | :--- | :--- | :--- | :--- | """
rows = u""
for ti in streamData:
table_name = ti["table_name"]
description = ti["description"]
url = ti["url"]
number_of_rows = ti["number_of_rows"]
reference_url = ti["reference_url"]
reference_text = ti["reference_text"]
notes = ti["notes"]
vizier_link = ti["vizier_link"]
in_ned = ti["in_ned"]
object_types = ti["object_types"]
version_number = ti["version_number"]
last_updated = ti["last_updated"]
legacy_table = ti["legacy_table"]
old_table_name = ti["old_table_name"]
number_of_rows = str(number_of_rows)
thisLen = len(number_of_rows)
newNumber = ""
count = 0
while count < thisLen:
count += 1
newNumber = number_of_rows[-count] + newNumber
if count % 3 == 0:
newNumber = "," + newNumber
if newNumber[0] == ",":
newNumber = newNumber[1:]
if len(vizier_link) and vizier_link != 0 and vizier_link != "0":
vizier_link = u"[✓](%(vizier_link)s)" % locals()
else:
vizier_link = u""
if in_ned:
in_ned = u"✓"
else:
in_ned = u""
rows += u"""
| <sub>%(table_name)s</sub> | <sub>[%(description)s](%(url)s)</sub> | <sub>[%(reference_text)s](%(reference_url)s)</sub> | <sub>%(newNumber)s</sub> | <sub>%(object_types)s</sub> |""" % locals()
self.mdStreams = header + rows
self.log.debug('completed the ``_create_md_tables`` method')
return | [
"def",
"_create_md_tables",
"(",
"self",
",",
"tableData",
",",
"viewData",
",",
"streamData",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_create_md_tables`` method'",
")",
"header",
"=",
"u\"\"\"\n| <sub>Table Name</sub> | <sub>Description</sub> | <sub>Reference</sub> | <sub>Number Rows</sub> | <sub>Vizier</sub> | <sub>NED</sub> | <sub>Objects</sub> | <sub>Weight (1-10)</sub> |\n| :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- |\"\"\"",
"rows",
"=",
"u\"\"",
"for",
"ti",
"in",
"tableData",
":",
"table_name",
"=",
"ti",
"[",
"\"table_name\"",
"]",
"description",
"=",
"ti",
"[",
"\"description\"",
"]",
"url",
"=",
"ti",
"[",
"\"url\"",
"]",
"number_of_rows",
"=",
"ti",
"[",
"\"number_of_rows\"",
"]",
"reference_url",
"=",
"ti",
"[",
"\"reference_url\"",
"]",
"reference_text",
"=",
"ti",
"[",
"\"reference_text\"",
"]",
"notes",
"=",
"ti",
"[",
"\"notes\"",
"]",
"vizier_link",
"=",
"ti",
"[",
"\"vizier_link\"",
"]",
"in_ned",
"=",
"ti",
"[",
"\"in_ned\"",
"]",
"object_types",
"=",
"ti",
"[",
"\"object_types\"",
"]",
"version_number",
"=",
"ti",
"[",
"\"version_number\"",
"]",
"last_updated",
"=",
"ti",
"[",
"\"last_updated\"",
"]",
"legacy_table",
"=",
"ti",
"[",
"\"legacy_table\"",
"]",
"old_table_name",
"=",
"ti",
"[",
"\"old_table_name\"",
"]",
"weight",
"=",
"ti",
"[",
"\"object_type_accuracy\"",
"]",
"number_of_rows",
"=",
"str",
"(",
"number_of_rows",
")",
"thisLen",
"=",
"len",
"(",
"number_of_rows",
")",
"newNumber",
"=",
"\"\"",
"count",
"=",
"0",
"while",
"count",
"<",
"thisLen",
":",
"count",
"+=",
"1",
"newNumber",
"=",
"number_of_rows",
"[",
"-",
"count",
"]",
"+",
"newNumber",
"if",
"count",
"%",
"3",
"==",
"0",
":",
"newNumber",
"=",
"\",\"",
"+",
"newNumber",
"if",
"newNumber",
"[",
"0",
"]",
"==",
"\",\"",
":",
"newNumber",
"=",
"newNumber",
"[",
"1",
":",
"]",
"if",
"vizier_link",
"and",
"len",
"(",
"vizier_link",
")",
"and",
"vizier_link",
"!=",
"0",
"and",
"vizier_link",
"!=",
"\"0\"",
":",
"vizier_link",
"=",
"u\"[✓](%(vizier_link)s)\" %",
"l",
"cals()",
"",
"",
"else",
":",
"vizier_link",
"=",
"u\"\"",
"if",
"in_ned",
":",
"in_ned",
"=",
"u\"✓\"",
"else",
":",
"in_ned",
"=",
"u\"\"",
"rows",
"+=",
"u\"\"\"\n| <sub>%(table_name)s</sub> | <sub>[%(description)s](%(url)s)</sub> | <sub>[%(reference_text)s](%(reference_url)s)</sub> | <sub>%(newNumber)s</sub> | <sub>%(vizier_link)s</sub> | <sub>%(in_ned)s</sub> | <sub>%(object_types)s</sub> | <sub>%(weight)s</sub> |\"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"mdTables",
"=",
"header",
"+",
"rows",
"header",
"=",
"u\"\"\"\n| <sub>View Name</sub> | <sub>Number Rows</sub> | <sub>Object Type</sub> |\n| :--- | :--- | :--- |\"\"\"",
"rows",
"=",
"u\"\"",
"for",
"ti",
"in",
"viewData",
":",
"view_name",
"=",
"ti",
"[",
"\"view_name\"",
"]",
"number_of_rows",
"=",
"ti",
"[",
"\"number_of_rows\"",
"]",
"object_type",
"=",
"ti",
"[",
"\"object_type\"",
"]",
"number_of_rows",
"=",
"str",
"(",
"number_of_rows",
")",
"thisLen",
"=",
"len",
"(",
"number_of_rows",
")",
"newNumber",
"=",
"\"\"",
"count",
"=",
"0",
"while",
"count",
"<",
"thisLen",
":",
"count",
"+=",
"1",
"newNumber",
"=",
"number_of_rows",
"[",
"-",
"count",
"]",
"+",
"newNumber",
"if",
"count",
"%",
"3",
"==",
"0",
":",
"newNumber",
"=",
"\",\"",
"+",
"newNumber",
"if",
"newNumber",
"[",
"0",
"]",
"==",
"\",\"",
":",
"newNumber",
"=",
"newNumber",
"[",
"1",
":",
"]",
"rows",
"+=",
"u\"\"\"\n| <sub>%(view_name)s</sub> | <sub>%(newNumber)s</sub> | <sub>%(object_type)s</sub> |\"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"mdViews",
"=",
"header",
"+",
"rows",
"header",
"=",
"u\"\"\"\n| <sub>Table Name</sub> | <sub>Description</sub> | <sub>Reference</sub> | <sub>Number Rows</sub> | <sub>Objects</sub> | \n| :--- | :--- | :--- | :--- | :--- | \"\"\"",
"rows",
"=",
"u\"\"",
"for",
"ti",
"in",
"streamData",
":",
"table_name",
"=",
"ti",
"[",
"\"table_name\"",
"]",
"description",
"=",
"ti",
"[",
"\"description\"",
"]",
"url",
"=",
"ti",
"[",
"\"url\"",
"]",
"number_of_rows",
"=",
"ti",
"[",
"\"number_of_rows\"",
"]",
"reference_url",
"=",
"ti",
"[",
"\"reference_url\"",
"]",
"reference_text",
"=",
"ti",
"[",
"\"reference_text\"",
"]",
"notes",
"=",
"ti",
"[",
"\"notes\"",
"]",
"vizier_link",
"=",
"ti",
"[",
"\"vizier_link\"",
"]",
"in_ned",
"=",
"ti",
"[",
"\"in_ned\"",
"]",
"object_types",
"=",
"ti",
"[",
"\"object_types\"",
"]",
"version_number",
"=",
"ti",
"[",
"\"version_number\"",
"]",
"last_updated",
"=",
"ti",
"[",
"\"last_updated\"",
"]",
"legacy_table",
"=",
"ti",
"[",
"\"legacy_table\"",
"]",
"old_table_name",
"=",
"ti",
"[",
"\"old_table_name\"",
"]",
"number_of_rows",
"=",
"str",
"(",
"number_of_rows",
")",
"thisLen",
"=",
"len",
"(",
"number_of_rows",
")",
"newNumber",
"=",
"\"\"",
"count",
"=",
"0",
"while",
"count",
"<",
"thisLen",
":",
"count",
"+=",
"1",
"newNumber",
"=",
"number_of_rows",
"[",
"-",
"count",
"]",
"+",
"newNumber",
"if",
"count",
"%",
"3",
"==",
"0",
":",
"newNumber",
"=",
"\",\"",
"+",
"newNumber",
"if",
"newNumber",
"[",
"0",
"]",
"==",
"\",\"",
":",
"newNumber",
"=",
"newNumber",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"vizier_link",
")",
"and",
"vizier_link",
"!=",
"0",
"and",
"vizier_link",
"!=",
"\"0\"",
":",
"vizier_link",
"=",
"u\"[✓](%(vizier_link)s)\" %",
"l",
"cals()",
"",
"",
"else",
":",
"vizier_link",
"=",
"u\"\"",
"if",
"in_ned",
":",
"in_ned",
"=",
"u\"✓\"",
"else",
":",
"in_ned",
"=",
"u\"\"",
"rows",
"+=",
"u\"\"\"\n| <sub>%(table_name)s</sub> | <sub>[%(description)s](%(url)s)</sub> | <sub>[%(reference_text)s](%(reference_url)s)</sub> | <sub>%(newNumber)s</sub> | <sub>%(object_types)s</sub> |\"\"\"",
"%",
"locals",
"(",
")",
"self",
".",
"mdStreams",
"=",
"header",
"+",
"rows",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_create_md_tables`` method'",
")",
"return"
] | generate markdown format tables from the database query results
**Key Arguments:**
- ``tableData`` -- the sherlock-catalogues database table metadata.
- ``viewData`` -- the sherlock-catalogues database view metadata.
- ``streamData`` -- the sherlock-catalogues database streamed data tables' metadata.
**Return:**
- None | [
"generate",
"markdown",
"format",
"tables",
"from",
"the",
"database",
"query",
"results"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/update_wiki_pages.py#L207-L350 |
thespacedoctor/sherlock | sherlock/commonutils/update_wiki_pages.py | update_wiki_pages._write_wiki_pages | def _write_wiki_pages(
self):
"""write the markdown formated content of the database tables' metadata to local wiki pages
"""
self.log.debug('starting the ``_write_wiki_pages`` method')
pathToWriteFile = self.settings[
"sherlock wiki root"] + "/Crossmatch-Catalogue Tables.md"
writeFile = codecs.open(pathToWriteFile, encoding='utf-8', mode='w')
now = datetime.now()
now = now.strftime("%Y-%m-%d %H:%M")
lastUpdated = """Last Updated %(now)s
""" % locals()
writeFile.write(lastUpdated + self.mdTables)
writeFile.close()
pathToWriteFile = self.settings[
"sherlock wiki root"] + "/Crossmatch-Catalogue Views.md"
writeFile = codecs.open(pathToWriteFile, encoding='utf-8', mode='w')
now = datetime.now()
now = now.strftime("%Y-%m-%d %H:%M")
lastUpdated = """Last Updated %(now)s
""" % locals()
writeFile.write(lastUpdated + self.mdViews)
writeFile.close()
pathToWriteFile = self.settings[
"sherlock wiki root"] + "/Crossmatch-Catalogue Streams.md"
writeFile = codecs.open(pathToWriteFile, encoding='utf-8', mode='w')
now = datetime.now()
now = now.strftime("%Y-%m-%d %H:%M")
lastUpdated = """Last Updated %(now)s
""" % locals()
writeFile.write(lastUpdated + self.mdStreams)
writeFile.close()
self.log.debug('completed the ``_write_wiki_pages`` method')
return None | python | def _write_wiki_pages(
self):
"""write the markdown formated content of the database tables' metadata to local wiki pages
"""
self.log.debug('starting the ``_write_wiki_pages`` method')
pathToWriteFile = self.settings[
"sherlock wiki root"] + "/Crossmatch-Catalogue Tables.md"
writeFile = codecs.open(pathToWriteFile, encoding='utf-8', mode='w')
now = datetime.now()
now = now.strftime("%Y-%m-%d %H:%M")
lastUpdated = """Last Updated %(now)s
""" % locals()
writeFile.write(lastUpdated + self.mdTables)
writeFile.close()
pathToWriteFile = self.settings[
"sherlock wiki root"] + "/Crossmatch-Catalogue Views.md"
writeFile = codecs.open(pathToWriteFile, encoding='utf-8', mode='w')
now = datetime.now()
now = now.strftime("%Y-%m-%d %H:%M")
lastUpdated = """Last Updated %(now)s
""" % locals()
writeFile.write(lastUpdated + self.mdViews)
writeFile.close()
pathToWriteFile = self.settings[
"sherlock wiki root"] + "/Crossmatch-Catalogue Streams.md"
writeFile = codecs.open(pathToWriteFile, encoding='utf-8', mode='w')
now = datetime.now()
now = now.strftime("%Y-%m-%d %H:%M")
lastUpdated = """Last Updated %(now)s
""" % locals()
writeFile.write(lastUpdated + self.mdStreams)
writeFile.close()
self.log.debug('completed the ``_write_wiki_pages`` method')
return None | [
"def",
"_write_wiki_pages",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_write_wiki_pages`` method'",
")",
"pathToWriteFile",
"=",
"self",
".",
"settings",
"[",
"\"sherlock wiki root\"",
"]",
"+",
"\"/Crossmatch-Catalogue Tables.md\"",
"writeFile",
"=",
"codecs",
".",
"open",
"(",
"pathToWriteFile",
",",
"encoding",
"=",
"'utf-8'",
",",
"mode",
"=",
"'w'",
")",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"now",
"=",
"now",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M\"",
")",
"lastUpdated",
"=",
"\"\"\"Last Updated %(now)s\n\"\"\"",
"%",
"locals",
"(",
")",
"writeFile",
".",
"write",
"(",
"lastUpdated",
"+",
"self",
".",
"mdTables",
")",
"writeFile",
".",
"close",
"(",
")",
"pathToWriteFile",
"=",
"self",
".",
"settings",
"[",
"\"sherlock wiki root\"",
"]",
"+",
"\"/Crossmatch-Catalogue Views.md\"",
"writeFile",
"=",
"codecs",
".",
"open",
"(",
"pathToWriteFile",
",",
"encoding",
"=",
"'utf-8'",
",",
"mode",
"=",
"'w'",
")",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"now",
"=",
"now",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M\"",
")",
"lastUpdated",
"=",
"\"\"\"Last Updated %(now)s\n\"\"\"",
"%",
"locals",
"(",
")",
"writeFile",
".",
"write",
"(",
"lastUpdated",
"+",
"self",
".",
"mdViews",
")",
"writeFile",
".",
"close",
"(",
")",
"pathToWriteFile",
"=",
"self",
".",
"settings",
"[",
"\"sherlock wiki root\"",
"]",
"+",
"\"/Crossmatch-Catalogue Streams.md\"",
"writeFile",
"=",
"codecs",
".",
"open",
"(",
"pathToWriteFile",
",",
"encoding",
"=",
"'utf-8'",
",",
"mode",
"=",
"'w'",
")",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"now",
"=",
"now",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M\"",
")",
"lastUpdated",
"=",
"\"\"\"Last Updated %(now)s\n\"\"\"",
"%",
"locals",
"(",
")",
"writeFile",
".",
"write",
"(",
"lastUpdated",
"+",
"self",
".",
"mdStreams",
")",
"writeFile",
".",
"close",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_write_wiki_pages`` method'",
")",
"return",
"None"
] | write the markdown formated content of the database tables' metadata to local wiki pages | [
"write",
"the",
"markdown",
"formated",
"content",
"of",
"the",
"database",
"tables",
"metadata",
"to",
"local",
"wiki",
"pages"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/update_wiki_pages.py#L352-L392 |
thespacedoctor/sherlock | sherlock/commonutils/update_wiki_pages.py | update_wiki_pages._update_github | def _update_github(
self):
"""commit the changes and push them to github
"""
self.log.debug('starting the ``_update_github`` method')
from subprocess import Popen, PIPE, STDOUT
gdir = self.settings["sherlock wiki root"]
cmd = """cd %(gdir)s && git add --all && git commit -m "x" && git pull origin master && git push origin master""" % locals()
p = Popen(cmd, stdout=PIPE, stdin=PIPE, shell=True)
output = p.communicate()[0]
print output
self.log.debug('output: %(output)s' % locals())
self.log.debug('completed the ``_update_github`` method')
return None | python | def _update_github(
self):
"""commit the changes and push them to github
"""
self.log.debug('starting the ``_update_github`` method')
from subprocess import Popen, PIPE, STDOUT
gdir = self.settings["sherlock wiki root"]
cmd = """cd %(gdir)s && git add --all && git commit -m "x" && git pull origin master && git push origin master""" % locals()
p = Popen(cmd, stdout=PIPE, stdin=PIPE, shell=True)
output = p.communicate()[0]
print output
self.log.debug('output: %(output)s' % locals())
self.log.debug('completed the ``_update_github`` method')
return None | [
"def",
"_update_github",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_update_github`` method'",
")",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
",",
"STDOUT",
"gdir",
"=",
"self",
".",
"settings",
"[",
"\"sherlock wiki root\"",
"]",
"cmd",
"=",
"\"\"\"cd %(gdir)s && git add --all && git commit -m \"x\" && git pull origin master && git push origin master\"\"\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stdin",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"output",
"=",
"p",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"print",
"output",
"self",
".",
"log",
".",
"debug",
"(",
"'output: %(output)s'",
"%",
"locals",
"(",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_update_github`` method'",
")",
"return",
"None"
] | commit the changes and push them to github | [
"commit",
"the",
"changes",
"and",
"push",
"them",
"to",
"github"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/update_wiki_pages.py#L394-L409 |
kakulukia/django-undeletable | django_undeletable/models.py | AbstractUser.email_user | def email_user(
self, subject, message, from_email=settings.DEFAULT_FROM_EMAIL, **kwargs
):
"""
Sends an email to this User.
If settings.EMAIL_OVERRIDE_ADDRESS is set, this mail will be redirected to the alternate mail address.
"""
receiver = self.email
if settings.EMAIL_OVERRIDE_ADDRESS:
receiver = settings.EMAIL_OVERRIDE_ADDRESS
send_mail(subject, message, from_email, [receiver], **kwargs) | python | def email_user(
self, subject, message, from_email=settings.DEFAULT_FROM_EMAIL, **kwargs
):
"""
Sends an email to this User.
If settings.EMAIL_OVERRIDE_ADDRESS is set, this mail will be redirected to the alternate mail address.
"""
receiver = self.email
if settings.EMAIL_OVERRIDE_ADDRESS:
receiver = settings.EMAIL_OVERRIDE_ADDRESS
send_mail(subject, message, from_email, [receiver], **kwargs) | [
"def",
"email_user",
"(",
"self",
",",
"subject",
",",
"message",
",",
"from_email",
"=",
"settings",
".",
"DEFAULT_FROM_EMAIL",
",",
"*",
"*",
"kwargs",
")",
":",
"receiver",
"=",
"self",
".",
"email",
"if",
"settings",
".",
"EMAIL_OVERRIDE_ADDRESS",
":",
"receiver",
"=",
"settings",
".",
"EMAIL_OVERRIDE_ADDRESS",
"send_mail",
"(",
"subject",
",",
"message",
",",
"from_email",
",",
"[",
"receiver",
"]",
",",
"*",
"*",
"kwargs",
")"
] | Sends an email to this User.
If settings.EMAIL_OVERRIDE_ADDRESS is set, this mail will be redirected to the alternate mail address. | [
"Sends",
"an",
"email",
"to",
"this",
"User",
".",
"If",
"settings",
".",
"EMAIL_OVERRIDE_ADDRESS",
"is",
"set",
"this",
"mail",
"will",
"be",
"redirected",
"to",
"the",
"alternate",
"mail",
"address",
"."
] | train | https://github.com/kakulukia/django-undeletable/blob/565ae7c07ba0279a8cf768a96046f0f4ecbc0467/django_undeletable/models.py#L213-L225 |
Clinical-Genomics/trailblazer | trailblazer/cli/clean.py | clean | def clean(context, days_ago, yes):
"""Clean up files from "old" analyses runs."""
number_of_days_ago = dt.datetime.now() - dt.timedelta(days=days_ago)
analyses = context.obj['store'].analyses(
status='completed',
before=number_of_days_ago,
deleted=False,
)
for analysis_obj in analyses:
LOG.debug(f"checking analysis: {analysis_obj.family} ({analysis_obj.id})")
latest_analysis = context.obj['store'].analyses(family=analysis_obj.family).first()
if analysis_obj != latest_analysis:
print(click.style(f"{analysis_obj.family}: family has been re-started", fg='yellow'))
else:
print(f"delete analysis: {analysis_obj.family} ({analysis_obj.id})")
context.invoke(delete, analysis_id=analysis_obj.id, yes=yes) | python | def clean(context, days_ago, yes):
"""Clean up files from "old" analyses runs."""
number_of_days_ago = dt.datetime.now() - dt.timedelta(days=days_ago)
analyses = context.obj['store'].analyses(
status='completed',
before=number_of_days_ago,
deleted=False,
)
for analysis_obj in analyses:
LOG.debug(f"checking analysis: {analysis_obj.family} ({analysis_obj.id})")
latest_analysis = context.obj['store'].analyses(family=analysis_obj.family).first()
if analysis_obj != latest_analysis:
print(click.style(f"{analysis_obj.family}: family has been re-started", fg='yellow'))
else:
print(f"delete analysis: {analysis_obj.family} ({analysis_obj.id})")
context.invoke(delete, analysis_id=analysis_obj.id, yes=yes) | [
"def",
"clean",
"(",
"context",
",",
"days_ago",
",",
"yes",
")",
":",
"number_of_days_ago",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"dt",
".",
"timedelta",
"(",
"days",
"=",
"days_ago",
")",
"analyses",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analyses",
"(",
"status",
"=",
"'completed'",
",",
"before",
"=",
"number_of_days_ago",
",",
"deleted",
"=",
"False",
",",
")",
"for",
"analysis_obj",
"in",
"analyses",
":",
"LOG",
".",
"debug",
"(",
"f\"checking analysis: {analysis_obj.family} ({analysis_obj.id})\"",
")",
"latest_analysis",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analyses",
"(",
"family",
"=",
"analysis_obj",
".",
"family",
")",
".",
"first",
"(",
")",
"if",
"analysis_obj",
"!=",
"latest_analysis",
":",
"print",
"(",
"click",
".",
"style",
"(",
"f\"{analysis_obj.family}: family has been re-started\"",
",",
"fg",
"=",
"'yellow'",
")",
")",
"else",
":",
"print",
"(",
"f\"delete analysis: {analysis_obj.family} ({analysis_obj.id})\"",
")",
"context",
".",
"invoke",
"(",
"delete",
",",
"analysis_id",
"=",
"analysis_obj",
".",
"id",
",",
"yes",
"=",
"yes",
")"
] | Clean up files from "old" analyses runs. | [
"Clean",
"up",
"files",
"from",
"old",
"analyses",
"runs",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/cli/clean.py#L14-L29 |
thespacedoctor/sherlock | sherlock/commonutils/get_crossmatch_catalogues_column_map.py | get_crossmatch_catalogues_column_map | def get_crossmatch_catalogues_column_map(
dbConn,
log):
"""*Query the sherlock-catalogues helper tables to generate a map of the important columns of each catalogue*
Within your sherlock-catalogues database you need to manually map the inhomogeneous column-names from the sherlock-catalogues to an internal homogeneous name-set which includes *ra*, *dec*, *redshift*, *object name*, *magnitude*, *filter* etc.
The column-name map is set within the two database helper tables called `tcs_helper_catalogue_views_info` and `tcs_helper_catalogue_views_info`. See the *'Checklist for Adding A New Reference Catalogue to the Sherlock Catalogues Database'* for more information.
.. todo::
- write a checklist for adding a new catalogue to the sherlock database and reference it from here (use the image below of the tcs_helper_catalogue_views_info table)
.. image:: https://farm5.staticflickr.com/4604/38429536400_eafa991580_o.png
:width: 200 px
**Key Arguments:**
- ``dbConn`` -- the sherlock-catalogues database connection
- ``log`` -- logger
**Return:**
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Usage:**
To collect the column map dictionary of dictionaries from the catalogues database, use the ``get_crossmatch_catalogues_column_map`` function:
.. code-block:: python
from sherlock.commonutils import get_crossmatch_catalogues_column_map
colMaps = get_crossmatch_catalogues_column_map(
log=log,
dbConn=cataloguesDbConn
)
"""
log.debug('starting the ``get_crossmatch_catalogues_column_map`` function')
# GRAB THE NAMES OF THE IMPORTANT COLUMNS FROM DATABASE
sqlQuery = u"""
SELECT
*
FROM
tcs_helper_catalogue_views_info v,
tcs_helper_catalogue_tables_info t
WHERE
v.table_id = t.id
""" % locals()
rows = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=dbConn,
quiet=False
)
colMaps = {}
for row in rows:
colMaps[row["view_name"]] = row
log.debug('completed the ``get_crossmatch_catalogues_column_map`` function')
return colMaps | python | def get_crossmatch_catalogues_column_map(
dbConn,
log):
"""*Query the sherlock-catalogues helper tables to generate a map of the important columns of each catalogue*
Within your sherlock-catalogues database you need to manually map the inhomogeneous column-names from the sherlock-catalogues to an internal homogeneous name-set which includes *ra*, *dec*, *redshift*, *object name*, *magnitude*, *filter* etc.
The column-name map is set within the two database helper tables called `tcs_helper_catalogue_views_info` and `tcs_helper_catalogue_views_info`. See the *'Checklist for Adding A New Reference Catalogue to the Sherlock Catalogues Database'* for more information.
.. todo::
- write a checklist for adding a new catalogue to the sherlock database and reference it from here (use the image below of the tcs_helper_catalogue_views_info table)
.. image:: https://farm5.staticflickr.com/4604/38429536400_eafa991580_o.png
:width: 200 px
**Key Arguments:**
- ``dbConn`` -- the sherlock-catalogues database connection
- ``log`` -- logger
**Return:**
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Usage:**
To collect the column map dictionary of dictionaries from the catalogues database, use the ``get_crossmatch_catalogues_column_map`` function:
.. code-block:: python
from sherlock.commonutils import get_crossmatch_catalogues_column_map
colMaps = get_crossmatch_catalogues_column_map(
log=log,
dbConn=cataloguesDbConn
)
"""
log.debug('starting the ``get_crossmatch_catalogues_column_map`` function')
# GRAB THE NAMES OF THE IMPORTANT COLUMNS FROM DATABASE
sqlQuery = u"""
SELECT
*
FROM
tcs_helper_catalogue_views_info v,
tcs_helper_catalogue_tables_info t
WHERE
v.table_id = t.id
""" % locals()
rows = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=dbConn,
quiet=False
)
colMaps = {}
for row in rows:
colMaps[row["view_name"]] = row
log.debug('completed the ``get_crossmatch_catalogues_column_map`` function')
return colMaps | [
"def",
"get_crossmatch_catalogues_column_map",
"(",
"dbConn",
",",
"log",
")",
":",
"log",
".",
"debug",
"(",
"'starting the ``get_crossmatch_catalogues_column_map`` function'",
")",
"# GRAB THE NAMES OF THE IMPORTANT COLUMNS FROM DATABASE",
"sqlQuery",
"=",
"u\"\"\"\n SELECT \n *\n FROM\n tcs_helper_catalogue_views_info v,\n tcs_helper_catalogue_tables_info t\n WHERE\n v.table_id = t.id\n \"\"\"",
"%",
"locals",
"(",
")",
"rows",
"=",
"readquery",
"(",
"log",
"=",
"log",
",",
"sqlQuery",
"=",
"sqlQuery",
",",
"dbConn",
"=",
"dbConn",
",",
"quiet",
"=",
"False",
")",
"colMaps",
"=",
"{",
"}",
"for",
"row",
"in",
"rows",
":",
"colMaps",
"[",
"row",
"[",
"\"view_name\"",
"]",
"]",
"=",
"row",
"log",
".",
"debug",
"(",
"'completed the ``get_crossmatch_catalogues_column_map`` function'",
")",
"return",
"colMaps"
] | *Query the sherlock-catalogues helper tables to generate a map of the important columns of each catalogue*
Within your sherlock-catalogues database you need to manually map the inhomogeneous column-names from the sherlock-catalogues to an internal homogeneous name-set which includes *ra*, *dec*, *redshift*, *object name*, *magnitude*, *filter* etc.
The column-name map is set within the two database helper tables called `tcs_helper_catalogue_views_info` and `tcs_helper_catalogue_views_info`. See the *'Checklist for Adding A New Reference Catalogue to the Sherlock Catalogues Database'* for more information.
.. todo::
- write a checklist for adding a new catalogue to the sherlock database and reference it from here (use the image below of the tcs_helper_catalogue_views_info table)
.. image:: https://farm5.staticflickr.com/4604/38429536400_eafa991580_o.png
:width: 200 px
**Key Arguments:**
- ``dbConn`` -- the sherlock-catalogues database connection
- ``log`` -- logger
**Return:**
- ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`).
**Usage:**
To collect the column map dictionary of dictionaries from the catalogues database, use the ``get_crossmatch_catalogues_column_map`` function:
.. code-block:: python
from sherlock.commonutils import get_crossmatch_catalogues_column_map
colMaps = get_crossmatch_catalogues_column_map(
log=log,
dbConn=cataloguesDbConn
) | [
"*",
"Query",
"the",
"sherlock",
"-",
"catalogues",
"helper",
"tables",
"to",
"generate",
"a",
"map",
"of",
"the",
"important",
"columns",
"of",
"each",
"catalogue",
"*"
] | train | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/commonutils/get_crossmatch_catalogues_column_map.py#L20-L77 |
Clinical-Genomics/trailblazer | trailblazer/mip/miplog.py | job_ids | def job_ids(log_stream):
"""Grep out all lines with scancel example."""
id_rows = [line for line in log_stream if 'scancel' in line]
jobs = [id_row.strip()[-7:-1] for id_row in id_rows]
return jobs | python | def job_ids(log_stream):
"""Grep out all lines with scancel example."""
id_rows = [line for line in log_stream if 'scancel' in line]
jobs = [id_row.strip()[-7:-1] for id_row in id_rows]
return jobs | [
"def",
"job_ids",
"(",
"log_stream",
")",
":",
"id_rows",
"=",
"[",
"line",
"for",
"line",
"in",
"log_stream",
"if",
"'scancel'",
"in",
"line",
"]",
"jobs",
"=",
"[",
"id_row",
".",
"strip",
"(",
")",
"[",
"-",
"7",
":",
"-",
"1",
"]",
"for",
"id_row",
"in",
"id_rows",
"]",
"return",
"jobs"
] | Grep out all lines with scancel example. | [
"Grep",
"out",
"all",
"lines",
"with",
"scancel",
"example",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/miplog.py#L3-L7 |
ARMmbed/autoversion | src/auto_version/config.py | get_or_create_config | def get_or_create_config(path, config):
"""Using TOML format, load config from given path, or write out example based on defaults"""
if os.path.isfile(path):
with open(path) as fh:
_LOG.debug("loading config from %s", os.path.abspath(path))
config._inflate(toml.load(fh))
else:
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
with open(path, "w") as fh:
toml.dump(config._deflate(), fh) | python | def get_or_create_config(path, config):
"""Using TOML format, load config from given path, or write out example based on defaults"""
if os.path.isfile(path):
with open(path) as fh:
_LOG.debug("loading config from %s", os.path.abspath(path))
config._inflate(toml.load(fh))
else:
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
with open(path, "w") as fh:
toml.dump(config._deflate(), fh) | [
"def",
"get_or_create_config",
"(",
"path",
",",
"config",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"fh",
":",
"_LOG",
".",
"debug",
"(",
"\"loading config from %s\"",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"config",
".",
"_inflate",
"(",
"toml",
".",
"load",
"(",
"fh",
")",
")",
"else",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
"except",
"OSError",
":",
"pass",
"with",
"open",
"(",
"path",
",",
"\"w\"",
")",
"as",
"fh",
":",
"toml",
".",
"dump",
"(",
"config",
".",
"_deflate",
"(",
")",
",",
"fh",
")"
] | Using TOML format, load config from given path, or write out example based on defaults | [
"Using",
"TOML",
"format",
"load",
"config",
"from",
"given",
"path",
"or",
"write",
"out",
"example",
"based",
"on",
"defaults"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/config.py#L81-L93 |
ARMmbed/autoversion | src/auto_version/config.py | AutoVersionConfig._deflate | def _deflate(cls):
"""Prepare for serialisation - returns a dictionary"""
data = {k: v for k, v in vars(cls).items() if not k.startswith("_")}
return {Constants.CONFIG_KEY: data} | python | def _deflate(cls):
"""Prepare for serialisation - returns a dictionary"""
data = {k: v for k, v in vars(cls).items() if not k.startswith("_")}
return {Constants.CONFIG_KEY: data} | [
"def",
"_deflate",
"(",
"cls",
")",
":",
"data",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"vars",
"(",
"cls",
")",
".",
"items",
"(",
")",
"if",
"not",
"k",
".",
"startswith",
"(",
"\"_\"",
")",
"}",
"return",
"{",
"Constants",
".",
"CONFIG_KEY",
":",
"data",
"}"
] | Prepare for serialisation - returns a dictionary | [
"Prepare",
"for",
"serialisation",
"-",
"returns",
"a",
"dictionary"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/config.py#L68-L71 |
ARMmbed/autoversion | src/auto_version/config.py | AutoVersionConfig._inflate | def _inflate(cls, data):
"""Update config by deserialising input dictionary"""
for k, v in data[Constants.CONFIG_KEY].items():
setattr(cls, k, v)
return cls._deflate() | python | def _inflate(cls, data):
"""Update config by deserialising input dictionary"""
for k, v in data[Constants.CONFIG_KEY].items():
setattr(cls, k, v)
return cls._deflate() | [
"def",
"_inflate",
"(",
"cls",
",",
"data",
")",
":",
"for",
"k",
",",
"v",
"in",
"data",
"[",
"Constants",
".",
"CONFIG_KEY",
"]",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"cls",
",",
"k",
",",
"v",
")",
"return",
"cls",
".",
"_deflate",
"(",
")"
] | Update config by deserialising input dictionary | [
"Update",
"config",
"by",
"deserialising",
"input",
"dictionary"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/config.py#L74-L78 |
fudge-py/fudge | fudge/patcher.py | with_patched_object | def with_patched_object(obj, attr_name, patched_value):
"""Decorator that patches an object before the decorated method
is called and restores it afterwards.
This is a wrapper around :func:`fudge.patcher.patch_object`
Example::
>>> from fudge import with_patched_object
>>> class Session:
... state = 'clean'
...
>>> @with_patched_object(Session, "state", "dirty")
... def test():
... print Session.state
...
>>> test()
dirty
>>> print Session.state
clean
"""
def patcher(method):
@wraps(method)
def method_call(*m_args, **m_kw):
patched_obj = patch_object(obj, attr_name, patched_value)
try:
return method(*m_args, **m_kw)
finally:
patched_obj.restore()
return method_call
return patcher | python | def with_patched_object(obj, attr_name, patched_value):
"""Decorator that patches an object before the decorated method
is called and restores it afterwards.
This is a wrapper around :func:`fudge.patcher.patch_object`
Example::
>>> from fudge import with_patched_object
>>> class Session:
... state = 'clean'
...
>>> @with_patched_object(Session, "state", "dirty")
... def test():
... print Session.state
...
>>> test()
dirty
>>> print Session.state
clean
"""
def patcher(method):
@wraps(method)
def method_call(*m_args, **m_kw):
patched_obj = patch_object(obj, attr_name, patched_value)
try:
return method(*m_args, **m_kw)
finally:
patched_obj.restore()
return method_call
return patcher | [
"def",
"with_patched_object",
"(",
"obj",
",",
"attr_name",
",",
"patched_value",
")",
":",
"def",
"patcher",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"method_call",
"(",
"*",
"m_args",
",",
"*",
"*",
"m_kw",
")",
":",
"patched_obj",
"=",
"patch_object",
"(",
"obj",
",",
"attr_name",
",",
"patched_value",
")",
"try",
":",
"return",
"method",
"(",
"*",
"m_args",
",",
"*",
"*",
"m_kw",
")",
"finally",
":",
"patched_obj",
".",
"restore",
"(",
")",
"return",
"method_call",
"return",
"patcher"
] | Decorator that patches an object before the decorated method
is called and restores it afterwards.
This is a wrapper around :func:`fudge.patcher.patch_object`
Example::
>>> from fudge import with_patched_object
>>> class Session:
... state = 'clean'
...
>>> @with_patched_object(Session, "state", "dirty")
... def test():
... print Session.state
...
>>> test()
dirty
>>> print Session.state
clean | [
"Decorator",
"that",
"patches",
"an",
"object",
"before",
"the",
"decorated",
"method",
"is",
"called",
"and",
"restores",
"it",
"afterwards",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/patcher.py#L136-L167 |
fudge-py/fudge | fudge/patcher.py | patch_object | def patch_object(obj, attr_name, patched_value):
"""Patches an object and returns an instance of :class:`fudge.patcher.PatchHandler` for later restoration.
Note that if *obj* is not an object but a path to a module then it will be imported.
You may want to use a more convenient wrapper :func:`with_patched_object` or :func:`patched_context`
Example::
>>> from fudge import patch_object
>>> class Session:
... state = 'clean'
...
>>> patched_session = patch_object(Session, "state", "dirty")
>>> Session.state
'dirty'
>>> patched_session.restore()
>>> Session.state
'clean'
Here is another example showing how to patch multiple objects at once::
>>> class Session:
... state = 'clean'
...
>>> class config:
... session_strategy = 'database'
...
>>> patches = [
... patch_object(config, "session_strategy", "filesystem"),
... patch_object(Session, "state", "dirty")
... ]
>>> try:
... # your app under test would run here ...
... print "(while patched)"
... print "config.session_strategy=%r" % config.session_strategy
... print "Session.state=%r" % Session.state
... finally:
... for p in patches:
... p.restore()
... print "(patches restored)"
(while patched)
config.session_strategy='filesystem'
Session.state='dirty'
(patches restored)
>>> config.session_strategy
'database'
>>> Session.state
'clean'
"""
if isinstance(obj, (str, unicode)):
obj_path = adjusted_path = obj
done = False
exc = None
at_top_level = False
while not done:
try:
obj = __import__(adjusted_path)
done = True
except ImportError:
# Handle paths that traveerse object attributes.
# Such as: smtplib.SMTP.connect
# smtplib <- module to import
adjusted_path = adjusted_path.rsplit('.', 1)[0]
if not exc:
exc = sys.exc_info()
if at_top_level:
# We're at the top level module and it doesn't exist.
# Raise the first exception since it will make more sense:
etype, val, tb = exc
raise etype, val, tb
if not adjusted_path.count('.'):
at_top_level = True
for part in obj_path.split('.')[1:]:
obj = getattr(obj, part)
handle = PatchHandler(obj, attr_name)
handle.patch(patched_value)
return handle | python | def patch_object(obj, attr_name, patched_value):
"""Patches an object and returns an instance of :class:`fudge.patcher.PatchHandler` for later restoration.
Note that if *obj* is not an object but a path to a module then it will be imported.
You may want to use a more convenient wrapper :func:`with_patched_object` or :func:`patched_context`
Example::
>>> from fudge import patch_object
>>> class Session:
... state = 'clean'
...
>>> patched_session = patch_object(Session, "state", "dirty")
>>> Session.state
'dirty'
>>> patched_session.restore()
>>> Session.state
'clean'
Here is another example showing how to patch multiple objects at once::
>>> class Session:
... state = 'clean'
...
>>> class config:
... session_strategy = 'database'
...
>>> patches = [
... patch_object(config, "session_strategy", "filesystem"),
... patch_object(Session, "state", "dirty")
... ]
>>> try:
... # your app under test would run here ...
... print "(while patched)"
... print "config.session_strategy=%r" % config.session_strategy
... print "Session.state=%r" % Session.state
... finally:
... for p in patches:
... p.restore()
... print "(patches restored)"
(while patched)
config.session_strategy='filesystem'
Session.state='dirty'
(patches restored)
>>> config.session_strategy
'database'
>>> Session.state
'clean'
"""
if isinstance(obj, (str, unicode)):
obj_path = adjusted_path = obj
done = False
exc = None
at_top_level = False
while not done:
try:
obj = __import__(adjusted_path)
done = True
except ImportError:
# Handle paths that traveerse object attributes.
# Such as: smtplib.SMTP.connect
# smtplib <- module to import
adjusted_path = adjusted_path.rsplit('.', 1)[0]
if not exc:
exc = sys.exc_info()
if at_top_level:
# We're at the top level module and it doesn't exist.
# Raise the first exception since it will make more sense:
etype, val, tb = exc
raise etype, val, tb
if not adjusted_path.count('.'):
at_top_level = True
for part in obj_path.split('.')[1:]:
obj = getattr(obj, part)
handle = PatchHandler(obj, attr_name)
handle.patch(patched_value)
return handle | [
"def",
"patch_object",
"(",
"obj",
",",
"attr_name",
",",
"patched_value",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"obj_path",
"=",
"adjusted_path",
"=",
"obj",
"done",
"=",
"False",
"exc",
"=",
"None",
"at_top_level",
"=",
"False",
"while",
"not",
"done",
":",
"try",
":",
"obj",
"=",
"__import__",
"(",
"adjusted_path",
")",
"done",
"=",
"True",
"except",
"ImportError",
":",
"# Handle paths that traveerse object attributes.",
"# Such as: smtplib.SMTP.connect",
"# smtplib <- module to import",
"adjusted_path",
"=",
"adjusted_path",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"if",
"not",
"exc",
":",
"exc",
"=",
"sys",
".",
"exc_info",
"(",
")",
"if",
"at_top_level",
":",
"# We're at the top level module and it doesn't exist.",
"# Raise the first exception since it will make more sense:",
"etype",
",",
"val",
",",
"tb",
"=",
"exc",
"raise",
"etype",
",",
"val",
",",
"tb",
"if",
"not",
"adjusted_path",
".",
"count",
"(",
"'.'",
")",
":",
"at_top_level",
"=",
"True",
"for",
"part",
"in",
"obj_path",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
":",
"obj",
"=",
"getattr",
"(",
"obj",
",",
"part",
")",
"handle",
"=",
"PatchHandler",
"(",
"obj",
",",
"attr_name",
")",
"handle",
".",
"patch",
"(",
"patched_value",
")",
"return",
"handle"
] | Patches an object and returns an instance of :class:`fudge.patcher.PatchHandler` for later restoration.
Note that if *obj* is not an object but a path to a module then it will be imported.
You may want to use a more convenient wrapper :func:`with_patched_object` or :func:`patched_context`
Example::
>>> from fudge import patch_object
>>> class Session:
... state = 'clean'
...
>>> patched_session = patch_object(Session, "state", "dirty")
>>> Session.state
'dirty'
>>> patched_session.restore()
>>> Session.state
'clean'
Here is another example showing how to patch multiple objects at once::
>>> class Session:
... state = 'clean'
...
>>> class config:
... session_strategy = 'database'
...
>>> patches = [
... patch_object(config, "session_strategy", "filesystem"),
... patch_object(Session, "state", "dirty")
... ]
>>> try:
... # your app under test would run here ...
... print "(while patched)"
... print "config.session_strategy=%r" % config.session_strategy
... print "Session.state=%r" % Session.state
... finally:
... for p in patches:
... p.restore()
... print "(patches restored)"
(while patched)
config.session_strategy='filesystem'
Session.state='dirty'
(patches restored)
>>> config.session_strategy
'database'
>>> Session.state
'clean' | [
"Patches",
"an",
"object",
"and",
"returns",
"an",
"instance",
"of",
":",
"class",
":",
"fudge",
".",
"patcher",
".",
"PatchHandler",
"for",
"later",
"restoration",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/patcher.py#L208-L287 |
fudge-py/fudge | fudge/patcher.py | PatchHandler.patch | def patch(self, patched_value):
"""Set a new value for the attribute of the object."""
try:
if self.getter:
setattr(self.getter_class, self.attr_name, patched_value)
else:
setattr(self.orig_object, self.attr_name, patched_value)
except TypeError:
# Workaround for patching builtin objects:
proxy_name = 'fudge_proxy_%s_%s_%s' % (
self.orig_object.__module__,
self.orig_object.__name__,
patched_value.__class__.__name__
)
self.proxy_object = type(proxy_name, (self.orig_object,),
{self.attr_name: patched_value})
mod = sys.modules[self.orig_object.__module__]
setattr(mod, self.orig_object.__name__, self.proxy_object) | python | def patch(self, patched_value):
"""Set a new value for the attribute of the object."""
try:
if self.getter:
setattr(self.getter_class, self.attr_name, patched_value)
else:
setattr(self.orig_object, self.attr_name, patched_value)
except TypeError:
# Workaround for patching builtin objects:
proxy_name = 'fudge_proxy_%s_%s_%s' % (
self.orig_object.__module__,
self.orig_object.__name__,
patched_value.__class__.__name__
)
self.proxy_object = type(proxy_name, (self.orig_object,),
{self.attr_name: patched_value})
mod = sys.modules[self.orig_object.__module__]
setattr(mod, self.orig_object.__name__, self.proxy_object) | [
"def",
"patch",
"(",
"self",
",",
"patched_value",
")",
":",
"try",
":",
"if",
"self",
".",
"getter",
":",
"setattr",
"(",
"self",
".",
"getter_class",
",",
"self",
".",
"attr_name",
",",
"patched_value",
")",
"else",
":",
"setattr",
"(",
"self",
".",
"orig_object",
",",
"self",
".",
"attr_name",
",",
"patched_value",
")",
"except",
"TypeError",
":",
"# Workaround for patching builtin objects:",
"proxy_name",
"=",
"'fudge_proxy_%s_%s_%s'",
"%",
"(",
"self",
".",
"orig_object",
".",
"__module__",
",",
"self",
".",
"orig_object",
".",
"__name__",
",",
"patched_value",
".",
"__class__",
".",
"__name__",
")",
"self",
".",
"proxy_object",
"=",
"type",
"(",
"proxy_name",
",",
"(",
"self",
".",
"orig_object",
",",
")",
",",
"{",
"self",
".",
"attr_name",
":",
"patched_value",
"}",
")",
"mod",
"=",
"sys",
".",
"modules",
"[",
"self",
".",
"orig_object",
".",
"__module__",
"]",
"setattr",
"(",
"mod",
",",
"self",
".",
"orig_object",
".",
"__name__",
",",
"self",
".",
"proxy_object",
")"
] | Set a new value for the attribute of the object. | [
"Set",
"a",
"new",
"value",
"for",
"the",
"attribute",
"of",
"the",
"object",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/patcher.py#L309-L326 |
fudge-py/fudge | fudge/patcher.py | PatchHandler.restore | def restore(self):
"""Restore the saved value for the attribute of the object."""
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
# Was not a local, safe to delete:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object) | python | def restore(self):
"""Restore the saved value for the attribute of the object."""
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
# Was not a local, safe to delete:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object) | [
"def",
"restore",
"(",
"self",
")",
":",
"if",
"self",
".",
"proxy_object",
"is",
"None",
":",
"if",
"self",
".",
"getter",
":",
"setattr",
"(",
"self",
".",
"getter_class",
",",
"self",
".",
"attr_name",
",",
"self",
".",
"getter",
")",
"elif",
"self",
".",
"is_local",
":",
"setattr",
"(",
"self",
".",
"orig_object",
",",
"self",
".",
"attr_name",
",",
"self",
".",
"orig_value",
")",
"else",
":",
"# Was not a local, safe to delete:",
"delattr",
"(",
"self",
".",
"orig_object",
",",
"self",
".",
"attr_name",
")",
"else",
":",
"setattr",
"(",
"sys",
".",
"modules",
"[",
"self",
".",
"orig_object",
".",
"__module__",
"]",
",",
"self",
".",
"orig_object",
".",
"__name__",
",",
"self",
".",
"orig_object",
")"
] | Restore the saved value for the attribute of the object. | [
"Restore",
"the",
"saved",
"value",
"for",
"the",
"attribute",
"of",
"the",
"object",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/patcher.py#L328-L341 |
Clinical-Genomics/trailblazer | trailblazer/mip/files.py | parse_config | def parse_config(data: dict) -> dict:
"""Parse MIP config file.
Args:
data (dict): raw YAML input from MIP analysis config file
Returns:
dict: parsed data
"""
return {
'email': data.get('email'),
'family': data['family_id'],
'samples': [{
'id': sample_id,
'type': analysis_type,
} for sample_id, analysis_type in data['analysis_type'].items()],
'config_path': data['config_file_analysis'],
'is_dryrun': True if 'dry_run_all' in data else False,
'log_path': data['log_file'],
'out_dir': data['outdata_dir'],
'priority': data['slurm_quality_of_service'],
'sampleinfo_path': data['sample_info_file'],
} | python | def parse_config(data: dict) -> dict:
"""Parse MIP config file.
Args:
data (dict): raw YAML input from MIP analysis config file
Returns:
dict: parsed data
"""
return {
'email': data.get('email'),
'family': data['family_id'],
'samples': [{
'id': sample_id,
'type': analysis_type,
} for sample_id, analysis_type in data['analysis_type'].items()],
'config_path': data['config_file_analysis'],
'is_dryrun': True if 'dry_run_all' in data else False,
'log_path': data['log_file'],
'out_dir': data['outdata_dir'],
'priority': data['slurm_quality_of_service'],
'sampleinfo_path': data['sample_info_file'],
} | [
"def",
"parse_config",
"(",
"data",
":",
"dict",
")",
"->",
"dict",
":",
"return",
"{",
"'email'",
":",
"data",
".",
"get",
"(",
"'email'",
")",
",",
"'family'",
":",
"data",
"[",
"'family_id'",
"]",
",",
"'samples'",
":",
"[",
"{",
"'id'",
":",
"sample_id",
",",
"'type'",
":",
"analysis_type",
",",
"}",
"for",
"sample_id",
",",
"analysis_type",
"in",
"data",
"[",
"'analysis_type'",
"]",
".",
"items",
"(",
")",
"]",
",",
"'config_path'",
":",
"data",
"[",
"'config_file_analysis'",
"]",
",",
"'is_dryrun'",
":",
"True",
"if",
"'dry_run_all'",
"in",
"data",
"else",
"False",
",",
"'log_path'",
":",
"data",
"[",
"'log_file'",
"]",
",",
"'out_dir'",
":",
"data",
"[",
"'outdata_dir'",
"]",
",",
"'priority'",
":",
"data",
"[",
"'slurm_quality_of_service'",
"]",
",",
"'sampleinfo_path'",
":",
"data",
"[",
"'sample_info_file'",
"]",
",",
"}"
] | Parse MIP config file.
Args:
data (dict): raw YAML input from MIP analysis config file
Returns:
dict: parsed data | [
"Parse",
"MIP",
"config",
"file",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/files.py#L9-L31 |
Clinical-Genomics/trailblazer | trailblazer/mip/files.py | parse_sampleinfo | def parse_sampleinfo(data: dict) -> dict:
"""Parse MIP sample info file.
Args:
data (dict): raw YAML input from MIP qc sample info file
Returns:
dict: parsed data
"""
genome_build = data['human_genome_build']
genome_build_str = f"{genome_build['source']}{genome_build['version']}"
if 'svdb' in data['program']:
svdb_outpath = (f"{data['program']['svdb']['path']}")
else:
svdb_outpath = ''
outdata = {
'date': data['analysis_date'],
'family': data['family'],
'genome_build': genome_build_str,
'rank_model_version': data['program']['genmod']['rank_model']['version'],
'is_finished': True if data['analysisrunstatus'] == 'finished' else False,
'pedigree_path': data['pedigree_minimal'],
'peddy': {
'ped': (data['program']['peddy']['peddy']['path'] if
'peddy' in data['program'] else None),
'ped_check': (data['program']['peddy']['ped_check']['path'] if
'peddy' in data['program'] else None),
'sex_check': (data['program']['peddy']['sex_check']['path'] if
'peddy' in data['program'] else None),
},
'qcmetrics_path': data['program']['qccollect']['path'],
'samples': [],
'snv': {
'bcf': data['most_complete_bcf']['path'],
'clinical_vcf': data['vcf_binary_file']['clinical']['path'],
'gbcf': data['gbcf_file']['path'],
'research_vcf': data['vcf_binary_file']['research']['path'],
},
'svdb_outpath': svdb_outpath,
'sv': {
'bcf': data.get('sv_bcf_file', {}).get('path'),
'clinical_vcf': (data['sv_vcf_binary_file']['clinical']['path'] if
'sv_vcf_binary_file' in data else None),
'merged': svdb_outpath,
'research_vcf': (data['sv_vcf_binary_file']['research']['path'] if
'sv_vcf_binary_file' in data else None),
},
'version': data['mip_version'],
}
for sample_id, sample_data in data['sample'].items():
sample = {
'id': sample_id,
'bam': sample_data['most_complete_bam']['path'],
'sambamba': list(sample_data['program']['sambamba_depth'].values())[0]['path'],
'sex': sample_data['sex'],
# subsample mt is only for wgs data
'subsample_mt': (list(sample_data['program']['samtools_subsample_mt'].values())[0]['path'] if
'samtools_subsample_mt' in sample_data['program'] else None),
'vcf2cytosure': list(sample_data['program']['vcf2cytosure'].values())[0]['path'],
}
chanjo_sexcheck = list(sample_data['program']['chanjo_sexcheck'].values())[0]
sample['chanjo_sexcheck'] = chanjo_sexcheck['path']
outdata['samples'].append(sample)
return outdata | python | def parse_sampleinfo(data: dict) -> dict:
"""Parse MIP sample info file.
Args:
data (dict): raw YAML input from MIP qc sample info file
Returns:
dict: parsed data
"""
genome_build = data['human_genome_build']
genome_build_str = f"{genome_build['source']}{genome_build['version']}"
if 'svdb' in data['program']:
svdb_outpath = (f"{data['program']['svdb']['path']}")
else:
svdb_outpath = ''
outdata = {
'date': data['analysis_date'],
'family': data['family'],
'genome_build': genome_build_str,
'rank_model_version': data['program']['genmod']['rank_model']['version'],
'is_finished': True if data['analysisrunstatus'] == 'finished' else False,
'pedigree_path': data['pedigree_minimal'],
'peddy': {
'ped': (data['program']['peddy']['peddy']['path'] if
'peddy' in data['program'] else None),
'ped_check': (data['program']['peddy']['ped_check']['path'] if
'peddy' in data['program'] else None),
'sex_check': (data['program']['peddy']['sex_check']['path'] if
'peddy' in data['program'] else None),
},
'qcmetrics_path': data['program']['qccollect']['path'],
'samples': [],
'snv': {
'bcf': data['most_complete_bcf']['path'],
'clinical_vcf': data['vcf_binary_file']['clinical']['path'],
'gbcf': data['gbcf_file']['path'],
'research_vcf': data['vcf_binary_file']['research']['path'],
},
'svdb_outpath': svdb_outpath,
'sv': {
'bcf': data.get('sv_bcf_file', {}).get('path'),
'clinical_vcf': (data['sv_vcf_binary_file']['clinical']['path'] if
'sv_vcf_binary_file' in data else None),
'merged': svdb_outpath,
'research_vcf': (data['sv_vcf_binary_file']['research']['path'] if
'sv_vcf_binary_file' in data else None),
},
'version': data['mip_version'],
}
for sample_id, sample_data in data['sample'].items():
sample = {
'id': sample_id,
'bam': sample_data['most_complete_bam']['path'],
'sambamba': list(sample_data['program']['sambamba_depth'].values())[0]['path'],
'sex': sample_data['sex'],
# subsample mt is only for wgs data
'subsample_mt': (list(sample_data['program']['samtools_subsample_mt'].values())[0]['path'] if
'samtools_subsample_mt' in sample_data['program'] else None),
'vcf2cytosure': list(sample_data['program']['vcf2cytosure'].values())[0]['path'],
}
chanjo_sexcheck = list(sample_data['program']['chanjo_sexcheck'].values())[0]
sample['chanjo_sexcheck'] = chanjo_sexcheck['path']
outdata['samples'].append(sample)
return outdata | [
"def",
"parse_sampleinfo",
"(",
"data",
":",
"dict",
")",
"->",
"dict",
":",
"genome_build",
"=",
"data",
"[",
"'human_genome_build'",
"]",
"genome_build_str",
"=",
"f\"{genome_build['source']}{genome_build['version']}\"",
"if",
"'svdb'",
"in",
"data",
"[",
"'program'",
"]",
":",
"svdb_outpath",
"=",
"(",
"f\"{data['program']['svdb']['path']}\"",
")",
"else",
":",
"svdb_outpath",
"=",
"''",
"outdata",
"=",
"{",
"'date'",
":",
"data",
"[",
"'analysis_date'",
"]",
",",
"'family'",
":",
"data",
"[",
"'family'",
"]",
",",
"'genome_build'",
":",
"genome_build_str",
",",
"'rank_model_version'",
":",
"data",
"[",
"'program'",
"]",
"[",
"'genmod'",
"]",
"[",
"'rank_model'",
"]",
"[",
"'version'",
"]",
",",
"'is_finished'",
":",
"True",
"if",
"data",
"[",
"'analysisrunstatus'",
"]",
"==",
"'finished'",
"else",
"False",
",",
"'pedigree_path'",
":",
"data",
"[",
"'pedigree_minimal'",
"]",
",",
"'peddy'",
":",
"{",
"'ped'",
":",
"(",
"data",
"[",
"'program'",
"]",
"[",
"'peddy'",
"]",
"[",
"'peddy'",
"]",
"[",
"'path'",
"]",
"if",
"'peddy'",
"in",
"data",
"[",
"'program'",
"]",
"else",
"None",
")",
",",
"'ped_check'",
":",
"(",
"data",
"[",
"'program'",
"]",
"[",
"'peddy'",
"]",
"[",
"'ped_check'",
"]",
"[",
"'path'",
"]",
"if",
"'peddy'",
"in",
"data",
"[",
"'program'",
"]",
"else",
"None",
")",
",",
"'sex_check'",
":",
"(",
"data",
"[",
"'program'",
"]",
"[",
"'peddy'",
"]",
"[",
"'sex_check'",
"]",
"[",
"'path'",
"]",
"if",
"'peddy'",
"in",
"data",
"[",
"'program'",
"]",
"else",
"None",
")",
",",
"}",
",",
"'qcmetrics_path'",
":",
"data",
"[",
"'program'",
"]",
"[",
"'qccollect'",
"]",
"[",
"'path'",
"]",
",",
"'samples'",
":",
"[",
"]",
",",
"'snv'",
":",
"{",
"'bcf'",
":",
"data",
"[",
"'most_complete_bcf'",
"]",
"[",
"'path'",
"]",
",",
"'clinical_vcf'",
":",
"data",
"[",
"'vcf_binary_file'",
"]",
"[",
"'clinical'",
"]",
"[",
"'path'",
"]",
",",
"'gbcf'",
":",
"data",
"[",
"'gbcf_file'",
"]",
"[",
"'path'",
"]",
",",
"'research_vcf'",
":",
"data",
"[",
"'vcf_binary_file'",
"]",
"[",
"'research'",
"]",
"[",
"'path'",
"]",
",",
"}",
",",
"'svdb_outpath'",
":",
"svdb_outpath",
",",
"'sv'",
":",
"{",
"'bcf'",
":",
"data",
".",
"get",
"(",
"'sv_bcf_file'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'path'",
")",
",",
"'clinical_vcf'",
":",
"(",
"data",
"[",
"'sv_vcf_binary_file'",
"]",
"[",
"'clinical'",
"]",
"[",
"'path'",
"]",
"if",
"'sv_vcf_binary_file'",
"in",
"data",
"else",
"None",
")",
",",
"'merged'",
":",
"svdb_outpath",
",",
"'research_vcf'",
":",
"(",
"data",
"[",
"'sv_vcf_binary_file'",
"]",
"[",
"'research'",
"]",
"[",
"'path'",
"]",
"if",
"'sv_vcf_binary_file'",
"in",
"data",
"else",
"None",
")",
",",
"}",
",",
"'version'",
":",
"data",
"[",
"'mip_version'",
"]",
",",
"}",
"for",
"sample_id",
",",
"sample_data",
"in",
"data",
"[",
"'sample'",
"]",
".",
"items",
"(",
")",
":",
"sample",
"=",
"{",
"'id'",
":",
"sample_id",
",",
"'bam'",
":",
"sample_data",
"[",
"'most_complete_bam'",
"]",
"[",
"'path'",
"]",
",",
"'sambamba'",
":",
"list",
"(",
"sample_data",
"[",
"'program'",
"]",
"[",
"'sambamba_depth'",
"]",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"[",
"'path'",
"]",
",",
"'sex'",
":",
"sample_data",
"[",
"'sex'",
"]",
",",
"# subsample mt is only for wgs data",
"'subsample_mt'",
":",
"(",
"list",
"(",
"sample_data",
"[",
"'program'",
"]",
"[",
"'samtools_subsample_mt'",
"]",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"[",
"'path'",
"]",
"if",
"'samtools_subsample_mt'",
"in",
"sample_data",
"[",
"'program'",
"]",
"else",
"None",
")",
",",
"'vcf2cytosure'",
":",
"list",
"(",
"sample_data",
"[",
"'program'",
"]",
"[",
"'vcf2cytosure'",
"]",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"[",
"'path'",
"]",
",",
"}",
"chanjo_sexcheck",
"=",
"list",
"(",
"sample_data",
"[",
"'program'",
"]",
"[",
"'chanjo_sexcheck'",
"]",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"sample",
"[",
"'chanjo_sexcheck'",
"]",
"=",
"chanjo_sexcheck",
"[",
"'path'",
"]",
"outdata",
"[",
"'samples'",
"]",
".",
"append",
"(",
"sample",
")",
"return",
"outdata"
] | Parse MIP sample info file.
Args:
data (dict): raw YAML input from MIP qc sample info file
Returns:
dict: parsed data | [
"Parse",
"MIP",
"sample",
"info",
"file",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/files.py#L34-L100 |
Clinical-Genomics/trailblazer | trailblazer/mip/files.py | parse_qcmetrics | def parse_qcmetrics(metrics: dict) -> dict:
"""Parse MIP qc metrics file.
Args:
metrics (dict): raw YAML input from MIP qc metrics file
Returns:
dict: parsed data
"""
data = {
'versions': {
'freebayes': metrics['program']['freebayes']['version'],
'gatk': metrics['program']['gatk']['version'],
'manta': metrics['program'].get('manta', {}).get('version'),
'bcftools': metrics['program']['bcftools']['version'],
'vep': metrics['program']['varianteffectpredictor']['version'],
},
'samples': [],
}
plink_samples = {}
plink_sexcheck = metrics['program'].get('plink_sexcheck', {}).get('sample_sexcheck')
if isinstance(plink_sexcheck, str):
sample_id, sex_number = plink_sexcheck.strip().split(':', 1)
plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))
elif isinstance(plink_sexcheck, list):
for sample_raw in plink_sexcheck:
sample_id, sex_number = sample_raw.split(':', 1)
plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))
for sample_id, sample_metrics in metrics['sample'].items():
## Bam stats metrics
bam_stats = [values['bamstats'] for key, values in sample_metrics.items()
if key[:-1].endswith('.lane')]
total_reads = sum(int(bam_stat['raw_total_sequences']) for bam_stat in bam_stats)
total_mapped = sum(int(bam_stat['reads_mapped']) for bam_stat in bam_stats)
## Picard metrics
main_key = [key for key in sample_metrics.keys() if '_lanes_' in key][0]
hs_metrics = sample_metrics[main_key]['collecthsmetrics']['header']['data']
multiple_inst_metrics = sample_metrics[main_key]['collectmultiplemetricsinsertsize']['header']['data']
multiple_metrics = sample_metrics[main_key]['collectmultiplemetrics']['header']['pair']
sample_data = {
'at_dropout': hs_metrics['AT_DROPOUT'],
'completeness_target': {
10: hs_metrics['PCT_TARGET_BASES_10X'],
20: hs_metrics['PCT_TARGET_BASES_20X'],
50: hs_metrics['PCT_TARGET_BASES_50X'],
100: hs_metrics['PCT_TARGET_BASES_100X'],
},
'duplicates': float(sample_metrics[main_key]['markduplicates']['fraction_duplicates']),
'gc_dropout': hs_metrics['GC_DROPOUT'],
'id': sample_id,
'median_insert_size': multiple_inst_metrics['MEDIAN_INSERT_SIZE'],
'mapped': total_mapped / total_reads,
'plink_sex': plink_samples.get(sample_id),
'predicted_sex': sample_metrics[main_key]['chanjo_sexcheck']['gender'],
'reads': total_reads,
'insert_size_standard_deviation': float(multiple_inst_metrics['STANDARD_DEVIATION']),
'strand_balance': float(multiple_metrics['STRAND_BALANCE']),
'target_coverage': float(hs_metrics['MEAN_TARGET_COVERAGE']),
}
data['samples'].append(sample_data)
return data | python | def parse_qcmetrics(metrics: dict) -> dict:
"""Parse MIP qc metrics file.
Args:
metrics (dict): raw YAML input from MIP qc metrics file
Returns:
dict: parsed data
"""
data = {
'versions': {
'freebayes': metrics['program']['freebayes']['version'],
'gatk': metrics['program']['gatk']['version'],
'manta': metrics['program'].get('manta', {}).get('version'),
'bcftools': metrics['program']['bcftools']['version'],
'vep': metrics['program']['varianteffectpredictor']['version'],
},
'samples': [],
}
plink_samples = {}
plink_sexcheck = metrics['program'].get('plink_sexcheck', {}).get('sample_sexcheck')
if isinstance(plink_sexcheck, str):
sample_id, sex_number = plink_sexcheck.strip().split(':', 1)
plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))
elif isinstance(plink_sexcheck, list):
for sample_raw in plink_sexcheck:
sample_id, sex_number = sample_raw.split(':', 1)
plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))
for sample_id, sample_metrics in metrics['sample'].items():
## Bam stats metrics
bam_stats = [values['bamstats'] for key, values in sample_metrics.items()
if key[:-1].endswith('.lane')]
total_reads = sum(int(bam_stat['raw_total_sequences']) for bam_stat in bam_stats)
total_mapped = sum(int(bam_stat['reads_mapped']) for bam_stat in bam_stats)
## Picard metrics
main_key = [key for key in sample_metrics.keys() if '_lanes_' in key][0]
hs_metrics = sample_metrics[main_key]['collecthsmetrics']['header']['data']
multiple_inst_metrics = sample_metrics[main_key]['collectmultiplemetricsinsertsize']['header']['data']
multiple_metrics = sample_metrics[main_key]['collectmultiplemetrics']['header']['pair']
sample_data = {
'at_dropout': hs_metrics['AT_DROPOUT'],
'completeness_target': {
10: hs_metrics['PCT_TARGET_BASES_10X'],
20: hs_metrics['PCT_TARGET_BASES_20X'],
50: hs_metrics['PCT_TARGET_BASES_50X'],
100: hs_metrics['PCT_TARGET_BASES_100X'],
},
'duplicates': float(sample_metrics[main_key]['markduplicates']['fraction_duplicates']),
'gc_dropout': hs_metrics['GC_DROPOUT'],
'id': sample_id,
'median_insert_size': multiple_inst_metrics['MEDIAN_INSERT_SIZE'],
'mapped': total_mapped / total_reads,
'plink_sex': plink_samples.get(sample_id),
'predicted_sex': sample_metrics[main_key]['chanjo_sexcheck']['gender'],
'reads': total_reads,
'insert_size_standard_deviation': float(multiple_inst_metrics['STANDARD_DEVIATION']),
'strand_balance': float(multiple_metrics['STRAND_BALANCE']),
'target_coverage': float(hs_metrics['MEAN_TARGET_COVERAGE']),
}
data['samples'].append(sample_data)
return data | [
"def",
"parse_qcmetrics",
"(",
"metrics",
":",
"dict",
")",
"->",
"dict",
":",
"data",
"=",
"{",
"'versions'",
":",
"{",
"'freebayes'",
":",
"metrics",
"[",
"'program'",
"]",
"[",
"'freebayes'",
"]",
"[",
"'version'",
"]",
",",
"'gatk'",
":",
"metrics",
"[",
"'program'",
"]",
"[",
"'gatk'",
"]",
"[",
"'version'",
"]",
",",
"'manta'",
":",
"metrics",
"[",
"'program'",
"]",
".",
"get",
"(",
"'manta'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'version'",
")",
",",
"'bcftools'",
":",
"metrics",
"[",
"'program'",
"]",
"[",
"'bcftools'",
"]",
"[",
"'version'",
"]",
",",
"'vep'",
":",
"metrics",
"[",
"'program'",
"]",
"[",
"'varianteffectpredictor'",
"]",
"[",
"'version'",
"]",
",",
"}",
",",
"'samples'",
":",
"[",
"]",
",",
"}",
"plink_samples",
"=",
"{",
"}",
"plink_sexcheck",
"=",
"metrics",
"[",
"'program'",
"]",
".",
"get",
"(",
"'plink_sexcheck'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'sample_sexcheck'",
")",
"if",
"isinstance",
"(",
"plink_sexcheck",
",",
"str",
")",
":",
"sample_id",
",",
"sex_number",
"=",
"plink_sexcheck",
".",
"strip",
"(",
")",
".",
"split",
"(",
"':'",
",",
"1",
")",
"plink_samples",
"[",
"sample_id",
"]",
"=",
"PED_SEX_MAP",
".",
"get",
"(",
"int",
"(",
"sex_number",
")",
")",
"elif",
"isinstance",
"(",
"plink_sexcheck",
",",
"list",
")",
":",
"for",
"sample_raw",
"in",
"plink_sexcheck",
":",
"sample_id",
",",
"sex_number",
"=",
"sample_raw",
".",
"split",
"(",
"':'",
",",
"1",
")",
"plink_samples",
"[",
"sample_id",
"]",
"=",
"PED_SEX_MAP",
".",
"get",
"(",
"int",
"(",
"sex_number",
")",
")",
"for",
"sample_id",
",",
"sample_metrics",
"in",
"metrics",
"[",
"'sample'",
"]",
".",
"items",
"(",
")",
":",
"## Bam stats metrics",
"bam_stats",
"=",
"[",
"values",
"[",
"'bamstats'",
"]",
"for",
"key",
",",
"values",
"in",
"sample_metrics",
".",
"items",
"(",
")",
"if",
"key",
"[",
":",
"-",
"1",
"]",
".",
"endswith",
"(",
"'.lane'",
")",
"]",
"total_reads",
"=",
"sum",
"(",
"int",
"(",
"bam_stat",
"[",
"'raw_total_sequences'",
"]",
")",
"for",
"bam_stat",
"in",
"bam_stats",
")",
"total_mapped",
"=",
"sum",
"(",
"int",
"(",
"bam_stat",
"[",
"'reads_mapped'",
"]",
")",
"for",
"bam_stat",
"in",
"bam_stats",
")",
"## Picard metrics",
"main_key",
"=",
"[",
"key",
"for",
"key",
"in",
"sample_metrics",
".",
"keys",
"(",
")",
"if",
"'_lanes_'",
"in",
"key",
"]",
"[",
"0",
"]",
"hs_metrics",
"=",
"sample_metrics",
"[",
"main_key",
"]",
"[",
"'collecthsmetrics'",
"]",
"[",
"'header'",
"]",
"[",
"'data'",
"]",
"multiple_inst_metrics",
"=",
"sample_metrics",
"[",
"main_key",
"]",
"[",
"'collectmultiplemetricsinsertsize'",
"]",
"[",
"'header'",
"]",
"[",
"'data'",
"]",
"multiple_metrics",
"=",
"sample_metrics",
"[",
"main_key",
"]",
"[",
"'collectmultiplemetrics'",
"]",
"[",
"'header'",
"]",
"[",
"'pair'",
"]",
"sample_data",
"=",
"{",
"'at_dropout'",
":",
"hs_metrics",
"[",
"'AT_DROPOUT'",
"]",
",",
"'completeness_target'",
":",
"{",
"10",
":",
"hs_metrics",
"[",
"'PCT_TARGET_BASES_10X'",
"]",
",",
"20",
":",
"hs_metrics",
"[",
"'PCT_TARGET_BASES_20X'",
"]",
",",
"50",
":",
"hs_metrics",
"[",
"'PCT_TARGET_BASES_50X'",
"]",
",",
"100",
":",
"hs_metrics",
"[",
"'PCT_TARGET_BASES_100X'",
"]",
",",
"}",
",",
"'duplicates'",
":",
"float",
"(",
"sample_metrics",
"[",
"main_key",
"]",
"[",
"'markduplicates'",
"]",
"[",
"'fraction_duplicates'",
"]",
")",
",",
"'gc_dropout'",
":",
"hs_metrics",
"[",
"'GC_DROPOUT'",
"]",
",",
"'id'",
":",
"sample_id",
",",
"'median_insert_size'",
":",
"multiple_inst_metrics",
"[",
"'MEDIAN_INSERT_SIZE'",
"]",
",",
"'mapped'",
":",
"total_mapped",
"/",
"total_reads",
",",
"'plink_sex'",
":",
"plink_samples",
".",
"get",
"(",
"sample_id",
")",
",",
"'predicted_sex'",
":",
"sample_metrics",
"[",
"main_key",
"]",
"[",
"'chanjo_sexcheck'",
"]",
"[",
"'gender'",
"]",
",",
"'reads'",
":",
"total_reads",
",",
"'insert_size_standard_deviation'",
":",
"float",
"(",
"multiple_inst_metrics",
"[",
"'STANDARD_DEVIATION'",
"]",
")",
",",
"'strand_balance'",
":",
"float",
"(",
"multiple_metrics",
"[",
"'STRAND_BALANCE'",
"]",
")",
",",
"'target_coverage'",
":",
"float",
"(",
"hs_metrics",
"[",
"'MEAN_TARGET_COVERAGE'",
"]",
")",
",",
"}",
"data",
"[",
"'samples'",
"]",
".",
"append",
"(",
"sample_data",
")",
"return",
"data"
] | Parse MIP qc metrics file.
Args:
metrics (dict): raw YAML input from MIP qc metrics file
Returns:
dict: parsed data | [
"Parse",
"MIP",
"qc",
"metrics",
"file",
".",
"Args",
":",
"metrics",
"(",
"dict",
")",
":",
"raw",
"YAML",
"input",
"from",
"MIP",
"qc",
"metrics",
"file"
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/files.py#L103-L168 |
Clinical-Genomics/trailblazer | trailblazer/mip/files.py | parse_peddy_sexcheck | def parse_peddy_sexcheck(handle: TextIO):
"""Parse Peddy sexcheck output."""
data = {}
samples = csv.DictReader(handle)
for sample in samples:
data[sample['sample_id']] = {
'predicted_sex': sample['predicted_sex'],
'het_ratio': float(sample['het_ratio']),
'error': True if sample['error'] == 'True' else False,
}
return data | python | def parse_peddy_sexcheck(handle: TextIO):
"""Parse Peddy sexcheck output."""
data = {}
samples = csv.DictReader(handle)
for sample in samples:
data[sample['sample_id']] = {
'predicted_sex': sample['predicted_sex'],
'het_ratio': float(sample['het_ratio']),
'error': True if sample['error'] == 'True' else False,
}
return data | [
"def",
"parse_peddy_sexcheck",
"(",
"handle",
":",
"TextIO",
")",
":",
"data",
"=",
"{",
"}",
"samples",
"=",
"csv",
".",
"DictReader",
"(",
"handle",
")",
"for",
"sample",
"in",
"samples",
":",
"data",
"[",
"sample",
"[",
"'sample_id'",
"]",
"]",
"=",
"{",
"'predicted_sex'",
":",
"sample",
"[",
"'predicted_sex'",
"]",
",",
"'het_ratio'",
":",
"float",
"(",
"sample",
"[",
"'het_ratio'",
"]",
")",
",",
"'error'",
":",
"True",
"if",
"sample",
"[",
"'error'",
"]",
"==",
"'True'",
"else",
"False",
",",
"}",
"return",
"data"
] | Parse Peddy sexcheck output. | [
"Parse",
"Peddy",
"sexcheck",
"output",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/files.py#L171-L181 |
Clinical-Genomics/trailblazer | trailblazer/mip/files.py | parse_chanjo_sexcheck | def parse_chanjo_sexcheck(handle: TextIO):
"""Parse Chanjo sex-check output."""
samples = csv.DictReader(handle, delimiter='\t')
for sample in samples:
return {
'predicted_sex': sample['sex'],
'x_coverage': float(sample['#X_coverage']),
'y_coverage': float(sample['Y_coverage']),
} | python | def parse_chanjo_sexcheck(handle: TextIO):
"""Parse Chanjo sex-check output."""
samples = csv.DictReader(handle, delimiter='\t')
for sample in samples:
return {
'predicted_sex': sample['sex'],
'x_coverage': float(sample['#X_coverage']),
'y_coverage': float(sample['Y_coverage']),
} | [
"def",
"parse_chanjo_sexcheck",
"(",
"handle",
":",
"TextIO",
")",
":",
"samples",
"=",
"csv",
".",
"DictReader",
"(",
"handle",
",",
"delimiter",
"=",
"'\\t'",
")",
"for",
"sample",
"in",
"samples",
":",
"return",
"{",
"'predicted_sex'",
":",
"sample",
"[",
"'sex'",
"]",
",",
"'x_coverage'",
":",
"float",
"(",
"sample",
"[",
"'#X_coverage'",
"]",
")",
",",
"'y_coverage'",
":",
"float",
"(",
"sample",
"[",
"'Y_coverage'",
"]",
")",
",",
"}"
] | Parse Chanjo sex-check output. | [
"Parse",
"Chanjo",
"sex",
"-",
"check",
"output",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/files.py#L184-L192 |
ozgurgunes/django-manifest | manifest/core/decorators.py | owner_required | def owner_required(Model=None):
"""
Usage:
@owner_required(Entry)
def manage_entry(request, pk=None, object=None):
@owner_required()
def entry_delete(*args, **kwargs):
kwargs["post_delete_redirect"] = reverse('manage_blogs')
return delete_object(*args, **kwargs)
"""
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
user = request.user
grant = False
model = Model
mod_edit = False
if 'pk' in kwargs:
pk = int(kwargs['pk'])
if model:
mod_edit = True
elif 'model' in kwargs:
model = kwargs['model']
object = get_object_or_404(model, pk=pk)
if user.is_superuser:
grant = True
else:
if user.__class__ == model:
grant = pk == user.id
else:
names = [rel.get_accessor_name()
for rel
in user._meta.get_all_related_objects()
if rel.model == model]
if names:
grant = pk in [o.id
for o
in getattr(user, names[0]).all()]
if not grant:
return HttpResponseForbidden('Forbidden')
if mod_edit:
kwargs['object'] = object
return view_func(request, *args, **kwargs)
return wraps(view_func)(_wrapped_view)
return decorator | python | def owner_required(Model=None):
"""
Usage:
@owner_required(Entry)
def manage_entry(request, pk=None, object=None):
@owner_required()
def entry_delete(*args, **kwargs):
kwargs["post_delete_redirect"] = reverse('manage_blogs')
return delete_object(*args, **kwargs)
"""
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
user = request.user
grant = False
model = Model
mod_edit = False
if 'pk' in kwargs:
pk = int(kwargs['pk'])
if model:
mod_edit = True
elif 'model' in kwargs:
model = kwargs['model']
object = get_object_or_404(model, pk=pk)
if user.is_superuser:
grant = True
else:
if user.__class__ == model:
grant = pk == user.id
else:
names = [rel.get_accessor_name()
for rel
in user._meta.get_all_related_objects()
if rel.model == model]
if names:
grant = pk in [o.id
for o
in getattr(user, names[0]).all()]
if not grant:
return HttpResponseForbidden('Forbidden')
if mod_edit:
kwargs['object'] = object
return view_func(request, *args, **kwargs)
return wraps(view_func)(_wrapped_view)
return decorator | [
"def",
"owner_required",
"(",
"Model",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"view_func",
")",
":",
"def",
"_wrapped_view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"user",
"=",
"request",
".",
"user",
"grant",
"=",
"False",
"model",
"=",
"Model",
"mod_edit",
"=",
"False",
"if",
"'pk'",
"in",
"kwargs",
":",
"pk",
"=",
"int",
"(",
"kwargs",
"[",
"'pk'",
"]",
")",
"if",
"model",
":",
"mod_edit",
"=",
"True",
"elif",
"'model'",
"in",
"kwargs",
":",
"model",
"=",
"kwargs",
"[",
"'model'",
"]",
"object",
"=",
"get_object_or_404",
"(",
"model",
",",
"pk",
"=",
"pk",
")",
"if",
"user",
".",
"is_superuser",
":",
"grant",
"=",
"True",
"else",
":",
"if",
"user",
".",
"__class__",
"==",
"model",
":",
"grant",
"=",
"pk",
"==",
"user",
".",
"id",
"else",
":",
"names",
"=",
"[",
"rel",
".",
"get_accessor_name",
"(",
")",
"for",
"rel",
"in",
"user",
".",
"_meta",
".",
"get_all_related_objects",
"(",
")",
"if",
"rel",
".",
"model",
"==",
"model",
"]",
"if",
"names",
":",
"grant",
"=",
"pk",
"in",
"[",
"o",
".",
"id",
"for",
"o",
"in",
"getattr",
"(",
"user",
",",
"names",
"[",
"0",
"]",
")",
".",
"all",
"(",
")",
"]",
"if",
"not",
"grant",
":",
"return",
"HttpResponseForbidden",
"(",
"'Forbidden'",
")",
"if",
"mod_edit",
":",
"kwargs",
"[",
"'object'",
"]",
"=",
"object",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wraps",
"(",
"view_func",
")",
"(",
"_wrapped_view",
")",
"return",
"decorator"
] | Usage:
@owner_required(Entry)
def manage_entry(request, pk=None, object=None):
@owner_required()
def entry_delete(*args, **kwargs):
kwargs["post_delete_redirect"] = reverse('manage_blogs')
return delete_object(*args, **kwargs) | [
"Usage",
":"
] | train | https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/core/decorators.py#L7-L52 |
capless/valley | valley/utils/imports.py | import_util | def import_util(imp):
'''
Lazily imports a utils (class,
function,or variable) from a module) from
a string.
@param imp:
'''
mod_name, obj_name = imp.rsplit('.', 1)
mod = importlib.import_module(mod_name)
return getattr(mod, obj_name) | python | def import_util(imp):
'''
Lazily imports a utils (class,
function,or variable) from a module) from
a string.
@param imp:
'''
mod_name, obj_name = imp.rsplit('.', 1)
mod = importlib.import_module(mod_name)
return getattr(mod, obj_name) | [
"def",
"import_util",
"(",
"imp",
")",
":",
"mod_name",
",",
"obj_name",
"=",
"imp",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"mod_name",
")",
"return",
"getattr",
"(",
"mod",
",",
"obj_name",
")"
] | Lazily imports a utils (class,
function,or variable) from a module) from
a string.
@param imp: | [
"Lazily",
"imports",
"a",
"utils",
"(",
"class",
"function",
"or",
"variable",
")",
"from",
"a",
"module",
")",
"from",
"a",
"string",
"."
] | train | https://github.com/capless/valley/blob/491e4203e428a9e92264e204d44a1df96a570bbc/valley/utils/imports.py#L4-L14 |
ARMmbed/autoversion | src/auto_version/cli.py | get_cli | def get_cli():
"""Load cli options"""
parser = argparse.ArgumentParser(
prog="auto_version",
description="auto version v%s: a tool to control version numbers" % __version__,
)
parser.add_argument(
"--target",
action="append",
default=[],
help="Files containing version info. "
"Assumes unique variable names between files. (default: %s)."
% (config.targets,),
)
parser.add_argument(
"--bump",
choices=SemVerSigFig,
help="Bumps the specified part of SemVer string. "
"Use this locally to correctly modify the version file.",
)
parser.add_argument(
"--news",
"--file-triggers",
action="store_true",
dest="file_triggers",
help="Detects need to bump based on presence of files (as specified in config).",
)
parser.add_argument(
"--set",
help="Set the SemVer string. Use this locally to set the project version explicitly.",
)
parser.add_argument(
"--set-patch-count",
action="store_true",
help="Sets the patch number to the commit count.",
)
parser.add_argument(
"--lock",
action="store_true",
help="Locks the SemVer string. "
"Lock will remain for another call to autoversion before being cleared.",
)
parser.add_argument(
"--release",
action="store_true",
default=False,
help="Marks as a release build, which flags the build as released.",
)
parser.add_argument(
"--version",
action="store_true",
default=False,
help="Prints the version of auto_version itself (self-version).",
)
parser.add_argument("--config", help="Configuration file path.")
parser.add_argument(
"-v",
"--verbosity",
action="count",
default=0,
help="increase output verbosity. " "can be specified multiple times",
)
return parser.parse_known_args() | python | def get_cli():
"""Load cli options"""
parser = argparse.ArgumentParser(
prog="auto_version",
description="auto version v%s: a tool to control version numbers" % __version__,
)
parser.add_argument(
"--target",
action="append",
default=[],
help="Files containing version info. "
"Assumes unique variable names between files. (default: %s)."
% (config.targets,),
)
parser.add_argument(
"--bump",
choices=SemVerSigFig,
help="Bumps the specified part of SemVer string. "
"Use this locally to correctly modify the version file.",
)
parser.add_argument(
"--news",
"--file-triggers",
action="store_true",
dest="file_triggers",
help="Detects need to bump based on presence of files (as specified in config).",
)
parser.add_argument(
"--set",
help="Set the SemVer string. Use this locally to set the project version explicitly.",
)
parser.add_argument(
"--set-patch-count",
action="store_true",
help="Sets the patch number to the commit count.",
)
parser.add_argument(
"--lock",
action="store_true",
help="Locks the SemVer string. "
"Lock will remain for another call to autoversion before being cleared.",
)
parser.add_argument(
"--release",
action="store_true",
default=False,
help="Marks as a release build, which flags the build as released.",
)
parser.add_argument(
"--version",
action="store_true",
default=False,
help="Prints the version of auto_version itself (self-version).",
)
parser.add_argument("--config", help="Configuration file path.")
parser.add_argument(
"-v",
"--verbosity",
action="count",
default=0,
help="increase output verbosity. " "can be specified multiple times",
)
return parser.parse_known_args() | [
"def",
"get_cli",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"auto_version\"",
",",
"description",
"=",
"\"auto version v%s: a tool to control version numbers\"",
"%",
"__version__",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--target\"",
",",
"action",
"=",
"\"append\"",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"Files containing version info. \"",
"\"Assumes unique variable names between files. (default: %s).\"",
"%",
"(",
"config",
".",
"targets",
",",
")",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--bump\"",
",",
"choices",
"=",
"SemVerSigFig",
",",
"help",
"=",
"\"Bumps the specified part of SemVer string. \"",
"\"Use this locally to correctly modify the version file.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--news\"",
",",
"\"--file-triggers\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"file_triggers\"",
",",
"help",
"=",
"\"Detects need to bump based on presence of files (as specified in config).\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--set\"",
",",
"help",
"=",
"\"Set the SemVer string. Use this locally to set the project version explicitly.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--set-patch-count\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Sets the patch number to the commit count.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--lock\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Locks the SemVer string. \"",
"\"Lock will remain for another call to autoversion before being cleared.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--release\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Marks as a release build, which flags the build as released.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--version\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Prints the version of auto_version itself (self-version).\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--config\"",
",",
"help",
"=",
"\"Configuration file path.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbosity\"",
",",
"action",
"=",
"\"count\"",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"increase output verbosity. \"",
"\"can be specified multiple times\"",
",",
")",
"return",
"parser",
".",
"parse_known_args",
"(",
")"
] | Load cli options | [
"Load",
"cli",
"options"
] | train | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/cli.py#L9-L71 |
bskinn/stdio-mgr | src/stdio_mgr/stdio_mgr.py | stdio_mgr | def stdio_mgr(in_str=""):
r"""Subsitute temporary text buffers for `stdio` in a managed context.
Context manager.
Substitutes empty :cls:`~io.StringIO`\ s for
:cls:`sys.stdout` and :cls:`sys.stderr`,
and a :cls:`TeeStdin` for :cls:`sys.stdin` within the managed context.
Upon exiting the context, the original stream objects are restored
within :mod:`sys`, and the temporary streams are closed.
Parameters
----------
in_str
|str| *(optional)* -- Initialization text for
the :cls:`TeeStdin` substitution for `stdin`.
Default is an empty string.
Yields
------
in_
:cls:`TeeStdin` -- Temporary stream for `stdin`.
out_
:cls:`~io.StringIO` -- Temporary stream for `stdout`,
initially empty.
err_
:cls:`~io.StringIO` -- Temporary stream for `stderr`,
initially empty.
"""
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
new_stdout = StringIO()
new_stderr = StringIO()
new_stdin = TeeStdin(new_stdout, in_str)
sys.stdin = new_stdin
sys.stdout = new_stdout
sys.stderr = new_stderr
yield new_stdin, new_stdout, new_stderr
sys.stdin = old_stdin
sys.stdout = old_stdout
sys.stderr = old_stderr
new_stdin.close()
new_stdout.close()
new_stderr.close() | python | def stdio_mgr(in_str=""):
r"""Subsitute temporary text buffers for `stdio` in a managed context.
Context manager.
Substitutes empty :cls:`~io.StringIO`\ s for
:cls:`sys.stdout` and :cls:`sys.stderr`,
and a :cls:`TeeStdin` for :cls:`sys.stdin` within the managed context.
Upon exiting the context, the original stream objects are restored
within :mod:`sys`, and the temporary streams are closed.
Parameters
----------
in_str
|str| *(optional)* -- Initialization text for
the :cls:`TeeStdin` substitution for `stdin`.
Default is an empty string.
Yields
------
in_
:cls:`TeeStdin` -- Temporary stream for `stdin`.
out_
:cls:`~io.StringIO` -- Temporary stream for `stdout`,
initially empty.
err_
:cls:`~io.StringIO` -- Temporary stream for `stderr`,
initially empty.
"""
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
new_stdout = StringIO()
new_stderr = StringIO()
new_stdin = TeeStdin(new_stdout, in_str)
sys.stdin = new_stdin
sys.stdout = new_stdout
sys.stderr = new_stderr
yield new_stdin, new_stdout, new_stderr
sys.stdin = old_stdin
sys.stdout = old_stdout
sys.stderr = old_stderr
new_stdin.close()
new_stdout.close()
new_stderr.close() | [
"def",
"stdio_mgr",
"(",
"in_str",
"=",
"\"\"",
")",
":",
"old_stdin",
"=",
"sys",
".",
"stdin",
"old_stdout",
"=",
"sys",
".",
"stdout",
"old_stderr",
"=",
"sys",
".",
"stderr",
"new_stdout",
"=",
"StringIO",
"(",
")",
"new_stderr",
"=",
"StringIO",
"(",
")",
"new_stdin",
"=",
"TeeStdin",
"(",
"new_stdout",
",",
"in_str",
")",
"sys",
".",
"stdin",
"=",
"new_stdin",
"sys",
".",
"stdout",
"=",
"new_stdout",
"sys",
".",
"stderr",
"=",
"new_stderr",
"yield",
"new_stdin",
",",
"new_stdout",
",",
"new_stderr",
"sys",
".",
"stdin",
"=",
"old_stdin",
"sys",
".",
"stdout",
"=",
"old_stdout",
"sys",
".",
"stderr",
"=",
"old_stderr",
"new_stdin",
".",
"close",
"(",
")",
"new_stdout",
".",
"close",
"(",
")",
"new_stderr",
".",
"close",
"(",
")"
] | r"""Subsitute temporary text buffers for `stdio` in a managed context.
Context manager.
Substitutes empty :cls:`~io.StringIO`\ s for
:cls:`sys.stdout` and :cls:`sys.stderr`,
and a :cls:`TeeStdin` for :cls:`sys.stdin` within the managed context.
Upon exiting the context, the original stream objects are restored
within :mod:`sys`, and the temporary streams are closed.
Parameters
----------
in_str
|str| *(optional)* -- Initialization text for
the :cls:`TeeStdin` substitution for `stdin`.
Default is an empty string.
Yields
------
in_
:cls:`TeeStdin` -- Temporary stream for `stdin`.
out_
:cls:`~io.StringIO` -- Temporary stream for `stdout`,
initially empty.
err_
:cls:`~io.StringIO` -- Temporary stream for `stderr`,
initially empty. | [
"r",
"Subsitute",
"temporary",
"text",
"buffers",
"for",
"stdio",
"in",
"a",
"managed",
"context",
"."
] | train | https://github.com/bskinn/stdio-mgr/blob/dff9f326528aac67d7ca0dc0a86ce3dffa3e0d39/src/stdio_mgr/stdio_mgr.py#L139-L196 |
andrewda/frc-livescore | livescore/simpleocr_utils/opencv_utils.py | draw_segments | def draw_segments(image, segments, color=(255, 0, 0), line_width=1):
"""draws segments on image"""
for segment in segments:
x, y, w, h = segment
cv2.rectangle(image, (x, y), (x + w, y + h), color, line_width) | python | def draw_segments(image, segments, color=(255, 0, 0), line_width=1):
"""draws segments on image"""
for segment in segments:
x, y, w, h = segment
cv2.rectangle(image, (x, y), (x + w, y + h), color, line_width) | [
"def",
"draw_segments",
"(",
"image",
",",
"segments",
",",
"color",
"=",
"(",
"255",
",",
"0",
",",
"0",
")",
",",
"line_width",
"=",
"1",
")",
":",
"for",
"segment",
"in",
"segments",
":",
"x",
",",
"y",
",",
"w",
",",
"h",
"=",
"segment",
"cv2",
".",
"rectangle",
"(",
"image",
",",
"(",
"x",
",",
"y",
")",
",",
"(",
"x",
"+",
"w",
",",
"y",
"+",
"h",
")",
",",
"color",
",",
"line_width",
")"
] | draws segments on image | [
"draws",
"segments",
"on",
"image"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/opencv_utils.py#L105-L109 |
andrewda/frc-livescore | livescore/simpleocr_utils/opencv_utils.py | draw_lines | def draw_lines(image, ys, color=(255, 0, 0), line_width=1):
"""draws horizontal lines"""
for y in ys:
cv2.line(image, (0, y), (image.shape[1], y), color, line_width) | python | def draw_lines(image, ys, color=(255, 0, 0), line_width=1):
"""draws horizontal lines"""
for y in ys:
cv2.line(image, (0, y), (image.shape[1], y), color, line_width) | [
"def",
"draw_lines",
"(",
"image",
",",
"ys",
",",
"color",
"=",
"(",
"255",
",",
"0",
",",
"0",
")",
",",
"line_width",
"=",
"1",
")",
":",
"for",
"y",
"in",
"ys",
":",
"cv2",
".",
"line",
"(",
"image",
",",
"(",
"0",
",",
"y",
")",
",",
"(",
"image",
".",
"shape",
"[",
"1",
"]",
",",
"y",
")",
",",
"color",
",",
"line_width",
")"
] | draws horizontal lines | [
"draws",
"horizontal",
"lines"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/opencv_utils.py#L112-L115 |
Clinical-Genomics/trailblazer | trailblazer/mip/sacct.py | time_to_sec | def time_to_sec(time_str: str) -> int:
"""Convert time in string format to seconds.
Skipping seconds since sometimes the last column is truncated
for entries where >10 days.
"""
total_sec = 0
if '-' in time_str:
# parse out days
days, time_str = time_str.split('-')
total_sec += (int(days) * 24 * 60 * 60)
# parse out the hours and mins (skip seconds)
hours_min_raw = time_str.split(':')[:-1]
time_parts = [int(round(float(val))) for val in hours_min_raw]
total_sec += time_parts[-1] * 60 # minutes
if len(time_parts) > 1:
total_sec += time_parts[-2] * 60 * 60 # hours
return total_sec | python | def time_to_sec(time_str: str) -> int:
"""Convert time in string format to seconds.
Skipping seconds since sometimes the last column is truncated
for entries where >10 days.
"""
total_sec = 0
if '-' in time_str:
# parse out days
days, time_str = time_str.split('-')
total_sec += (int(days) * 24 * 60 * 60)
# parse out the hours and mins (skip seconds)
hours_min_raw = time_str.split(':')[:-1]
time_parts = [int(round(float(val))) for val in hours_min_raw]
total_sec += time_parts[-1] * 60 # minutes
if len(time_parts) > 1:
total_sec += time_parts[-2] * 60 * 60 # hours
return total_sec | [
"def",
"time_to_sec",
"(",
"time_str",
":",
"str",
")",
"->",
"int",
":",
"total_sec",
"=",
"0",
"if",
"'-'",
"in",
"time_str",
":",
"# parse out days",
"days",
",",
"time_str",
"=",
"time_str",
".",
"split",
"(",
"'-'",
")",
"total_sec",
"+=",
"(",
"int",
"(",
"days",
")",
"*",
"24",
"*",
"60",
"*",
"60",
")",
"# parse out the hours and mins (skip seconds)",
"hours_min_raw",
"=",
"time_str",
".",
"split",
"(",
"':'",
")",
"[",
":",
"-",
"1",
"]",
"time_parts",
"=",
"[",
"int",
"(",
"round",
"(",
"float",
"(",
"val",
")",
")",
")",
"for",
"val",
"in",
"hours_min_raw",
"]",
"total_sec",
"+=",
"time_parts",
"[",
"-",
"1",
"]",
"*",
"60",
"# minutes",
"if",
"len",
"(",
"time_parts",
")",
">",
"1",
":",
"total_sec",
"+=",
"time_parts",
"[",
"-",
"2",
"]",
"*",
"60",
"*",
"60",
"# hours",
"return",
"total_sec"
] | Convert time in string format to seconds.
Skipping seconds since sometimes the last column is truncated
for entries where >10 days. | [
"Convert",
"time",
"in",
"string",
"format",
"to",
"seconds",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/sacct.py#L9-L27 |
Clinical-Genomics/trailblazer | trailblazer/mip/sacct.py | convert_job | def convert_job(row: list) -> dict:
"""Convert sacct row to dict."""
state = row[-2]
start_time_raw = row[-4]
end_time_raw = row[-3]
if state not in ('PENDING', 'CANCELLED'):
start_time = datetime.strptime(start_time_raw, '%Y-%m-%dT%H:%M:%S')
if state != 'RUNNING':
end_time = datetime.strptime(end_time_raw, '%Y-%m-%dT%H:%M:%S')
else:
end_time = None
else:
start_time = end_time = None
# parse name of job
job_name = row[1]
step_name, step_context = job_name.rstrip('_BOTH').rstrip('_SV').rsplit('_', 1)
return {
'id': int(row[0]),
'name': job_name,
'step': step_name,
'context': step_context,
'state': state,
'start': start_time,
'end': end_time,
'elapsed': time_to_sec(row[-5]),
'cpu': time_to_sec(row[-6]),
'is_completed': state == 'COMPLETED',
} | python | def convert_job(row: list) -> dict:
"""Convert sacct row to dict."""
state = row[-2]
start_time_raw = row[-4]
end_time_raw = row[-3]
if state not in ('PENDING', 'CANCELLED'):
start_time = datetime.strptime(start_time_raw, '%Y-%m-%dT%H:%M:%S')
if state != 'RUNNING':
end_time = datetime.strptime(end_time_raw, '%Y-%m-%dT%H:%M:%S')
else:
end_time = None
else:
start_time = end_time = None
# parse name of job
job_name = row[1]
step_name, step_context = job_name.rstrip('_BOTH').rstrip('_SV').rsplit('_', 1)
return {
'id': int(row[0]),
'name': job_name,
'step': step_name,
'context': step_context,
'state': state,
'start': start_time,
'end': end_time,
'elapsed': time_to_sec(row[-5]),
'cpu': time_to_sec(row[-6]),
'is_completed': state == 'COMPLETED',
} | [
"def",
"convert_job",
"(",
"row",
":",
"list",
")",
"->",
"dict",
":",
"state",
"=",
"row",
"[",
"-",
"2",
"]",
"start_time_raw",
"=",
"row",
"[",
"-",
"4",
"]",
"end_time_raw",
"=",
"row",
"[",
"-",
"3",
"]",
"if",
"state",
"not",
"in",
"(",
"'PENDING'",
",",
"'CANCELLED'",
")",
":",
"start_time",
"=",
"datetime",
".",
"strptime",
"(",
"start_time_raw",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"if",
"state",
"!=",
"'RUNNING'",
":",
"end_time",
"=",
"datetime",
".",
"strptime",
"(",
"end_time_raw",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"else",
":",
"end_time",
"=",
"None",
"else",
":",
"start_time",
"=",
"end_time",
"=",
"None",
"# parse name of job",
"job_name",
"=",
"row",
"[",
"1",
"]",
"step_name",
",",
"step_context",
"=",
"job_name",
".",
"rstrip",
"(",
"'_BOTH'",
")",
".",
"rstrip",
"(",
"'_SV'",
")",
".",
"rsplit",
"(",
"'_'",
",",
"1",
")",
"return",
"{",
"'id'",
":",
"int",
"(",
"row",
"[",
"0",
"]",
")",
",",
"'name'",
":",
"job_name",
",",
"'step'",
":",
"step_name",
",",
"'context'",
":",
"step_context",
",",
"'state'",
":",
"state",
",",
"'start'",
":",
"start_time",
",",
"'end'",
":",
"end_time",
",",
"'elapsed'",
":",
"time_to_sec",
"(",
"row",
"[",
"-",
"5",
"]",
")",
",",
"'cpu'",
":",
"time_to_sec",
"(",
"row",
"[",
"-",
"6",
"]",
")",
",",
"'is_completed'",
":",
"state",
"==",
"'COMPLETED'",
",",
"}"
] | Convert sacct row to dict. | [
"Convert",
"sacct",
"row",
"to",
"dict",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/sacct.py#L30-L59 |
Clinical-Genomics/trailblazer | trailblazer/mip/sacct.py | parse_sacct | def parse_sacct(sacct_stream):
"""Parse out information from sacct status output."""
rows = (line.split() for line in sacct_stream)
# filter rows that begin with a SLURM job id
relevant_rows = (row for row in rows if row[0].isdigit())
jobs = [convert_job(row) for row in relevant_rows]
return jobs | python | def parse_sacct(sacct_stream):
"""Parse out information from sacct status output."""
rows = (line.split() for line in sacct_stream)
# filter rows that begin with a SLURM job id
relevant_rows = (row for row in rows if row[0].isdigit())
jobs = [convert_job(row) for row in relevant_rows]
return jobs | [
"def",
"parse_sacct",
"(",
"sacct_stream",
")",
":",
"rows",
"=",
"(",
"line",
".",
"split",
"(",
")",
"for",
"line",
"in",
"sacct_stream",
")",
"# filter rows that begin with a SLURM job id",
"relevant_rows",
"=",
"(",
"row",
"for",
"row",
"in",
"rows",
"if",
"row",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
")",
"jobs",
"=",
"[",
"convert_job",
"(",
"row",
")",
"for",
"row",
"in",
"relevant_rows",
"]",
"return",
"jobs"
] | Parse out information from sacct status output. | [
"Parse",
"out",
"information",
"from",
"sacct",
"status",
"output",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/sacct.py#L62-L68 |
Clinical-Genomics/trailblazer | trailblazer/mip/sacct.py | filter_jobs | def filter_jobs(sacct_jobs, failed=True):
"""Filter jobs that have a FAILED etc. status."""
categories = FAILED_CATEGORIES if failed else NORMAL_CATEGORIES
filtered_jobs = [job for job in sacct_jobs if job['state'] in categories]
return filtered_jobs | python | def filter_jobs(sacct_jobs, failed=True):
"""Filter jobs that have a FAILED etc. status."""
categories = FAILED_CATEGORIES if failed else NORMAL_CATEGORIES
filtered_jobs = [job for job in sacct_jobs if job['state'] in categories]
return filtered_jobs | [
"def",
"filter_jobs",
"(",
"sacct_jobs",
",",
"failed",
"=",
"True",
")",
":",
"categories",
"=",
"FAILED_CATEGORIES",
"if",
"failed",
"else",
"NORMAL_CATEGORIES",
"filtered_jobs",
"=",
"[",
"job",
"for",
"job",
"in",
"sacct_jobs",
"if",
"job",
"[",
"'state'",
"]",
"in",
"categories",
"]",
"return",
"filtered_jobs"
] | Filter jobs that have a FAILED etc. status. | [
"Filter",
"jobs",
"that",
"have",
"a",
"FAILED",
"etc",
".",
"status",
"."
] | train | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/sacct.py#L71-L75 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation_aux.py | guess_segments_lines | def guess_segments_lines(segments, lines, nearline_tolerance=5.0):
"""
given segments, outputs a array of line numbers, or -1 if it
doesn't belong to any
"""
ys = segments[:, 1]
closeness = numpy.abs(numpy.subtract.outer(ys, lines)) # each row a y, each collumn a distance to each line
line_of_y = numpy.argmin(closeness, axis=1)
distance = numpy.min(closeness, axis=1)
bad = distance > numpy.mean(distance) + nearline_tolerance * numpy.std(distance)
line_of_y[bad] = -1
return line_of_y | python | def guess_segments_lines(segments, lines, nearline_tolerance=5.0):
"""
given segments, outputs a array of line numbers, or -1 if it
doesn't belong to any
"""
ys = segments[:, 1]
closeness = numpy.abs(numpy.subtract.outer(ys, lines)) # each row a y, each collumn a distance to each line
line_of_y = numpy.argmin(closeness, axis=1)
distance = numpy.min(closeness, axis=1)
bad = distance > numpy.mean(distance) + nearline_tolerance * numpy.std(distance)
line_of_y[bad] = -1
return line_of_y | [
"def",
"guess_segments_lines",
"(",
"segments",
",",
"lines",
",",
"nearline_tolerance",
"=",
"5.0",
")",
":",
"ys",
"=",
"segments",
"[",
":",
",",
"1",
"]",
"closeness",
"=",
"numpy",
".",
"abs",
"(",
"numpy",
".",
"subtract",
".",
"outer",
"(",
"ys",
",",
"lines",
")",
")",
"# each row a y, each collumn a distance to each line",
"line_of_y",
"=",
"numpy",
".",
"argmin",
"(",
"closeness",
",",
"axis",
"=",
"1",
")",
"distance",
"=",
"numpy",
".",
"min",
"(",
"closeness",
",",
"axis",
"=",
"1",
")",
"bad",
"=",
"distance",
">",
"numpy",
".",
"mean",
"(",
"distance",
")",
"+",
"nearline_tolerance",
"*",
"numpy",
".",
"std",
"(",
"distance",
")",
"line_of_y",
"[",
"bad",
"]",
"=",
"-",
"1",
"return",
"line_of_y"
] | given segments, outputs a array of line numbers, or -1 if it
doesn't belong to any | [
"given",
"segments",
"outputs",
"a",
"array",
"of",
"line",
"numbers",
"or",
"-",
"1",
"if",
"it",
"doesn",
"t",
"belong",
"to",
"any"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation_aux.py#L88-L99 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation_aux.py | contained_segments_matrix | def contained_segments_matrix(segments):
"""
givens a n*n matrix m, n=len(segments), in which m[i,j] means
segments[i] is contained inside segments[j]
"""
x1, y1 = segments[:, 0], segments[:, 1]
x2, y2 = x1 + segments[:, 2], y1 + segments[:, 3]
n = len(segments)
x1so, x2so, y1so, y2so = list(map(numpy.argsort, (x1, x2, y1, y2)))
x1soi, x2soi, y1soi, y2soi = list(map(numpy.argsort, (x1so, x2so, y1so, y2so))) # inverse transformations
# let rows be x1 and collumns be x2. this array represents where x1<x2
o1 = numpy.triu(numpy.ones((n, n)), k=1).astype(bool)
# let rows be x1 and collumns be x2. this array represents where x1>x2
o2 = numpy.tril(numpy.ones((n, n)), k=0).astype(bool)
a_inside_b_x = o2[x1soi][:, x1soi] * o1[x2soi][:, x2soi] # (x1[a]>x1[b] and x2[a]<x2[b])
a_inside_b_y = o2[y1soi][:, y1soi] * o1[y2soi][:, y2soi] # (y1[a]>y1[b] and y2[a]<y2[b])
a_inside_b = a_inside_b_x * a_inside_b_y
return a_inside_b | python | def contained_segments_matrix(segments):
"""
givens a n*n matrix m, n=len(segments), in which m[i,j] means
segments[i] is contained inside segments[j]
"""
x1, y1 = segments[:, 0], segments[:, 1]
x2, y2 = x1 + segments[:, 2], y1 + segments[:, 3]
n = len(segments)
x1so, x2so, y1so, y2so = list(map(numpy.argsort, (x1, x2, y1, y2)))
x1soi, x2soi, y1soi, y2soi = list(map(numpy.argsort, (x1so, x2so, y1so, y2so))) # inverse transformations
# let rows be x1 and collumns be x2. this array represents where x1<x2
o1 = numpy.triu(numpy.ones((n, n)), k=1).astype(bool)
# let rows be x1 and collumns be x2. this array represents where x1>x2
o2 = numpy.tril(numpy.ones((n, n)), k=0).astype(bool)
a_inside_b_x = o2[x1soi][:, x1soi] * o1[x2soi][:, x2soi] # (x1[a]>x1[b] and x2[a]<x2[b])
a_inside_b_y = o2[y1soi][:, y1soi] * o1[y2soi][:, y2soi] # (y1[a]>y1[b] and y2[a]<y2[b])
a_inside_b = a_inside_b_x * a_inside_b_y
return a_inside_b | [
"def",
"contained_segments_matrix",
"(",
"segments",
")",
":",
"x1",
",",
"y1",
"=",
"segments",
"[",
":",
",",
"0",
"]",
",",
"segments",
"[",
":",
",",
"1",
"]",
"x2",
",",
"y2",
"=",
"x1",
"+",
"segments",
"[",
":",
",",
"2",
"]",
",",
"y1",
"+",
"segments",
"[",
":",
",",
"3",
"]",
"n",
"=",
"len",
"(",
"segments",
")",
"x1so",
",",
"x2so",
",",
"y1so",
",",
"y2so",
"=",
"list",
"(",
"map",
"(",
"numpy",
".",
"argsort",
",",
"(",
"x1",
",",
"x2",
",",
"y1",
",",
"y2",
")",
")",
")",
"x1soi",
",",
"x2soi",
",",
"y1soi",
",",
"y2soi",
"=",
"list",
"(",
"map",
"(",
"numpy",
".",
"argsort",
",",
"(",
"x1so",
",",
"x2so",
",",
"y1so",
",",
"y2so",
")",
")",
")",
"# inverse transformations",
"# let rows be x1 and collumns be x2. this array represents where x1<x2",
"o1",
"=",
"numpy",
".",
"triu",
"(",
"numpy",
".",
"ones",
"(",
"(",
"n",
",",
"n",
")",
")",
",",
"k",
"=",
"1",
")",
".",
"astype",
"(",
"bool",
")",
"# let rows be x1 and collumns be x2. this array represents where x1>x2",
"o2",
"=",
"numpy",
".",
"tril",
"(",
"numpy",
".",
"ones",
"(",
"(",
"n",
",",
"n",
")",
")",
",",
"k",
"=",
"0",
")",
".",
"astype",
"(",
"bool",
")",
"a_inside_b_x",
"=",
"o2",
"[",
"x1soi",
"]",
"[",
":",
",",
"x1soi",
"]",
"*",
"o1",
"[",
"x2soi",
"]",
"[",
":",
",",
"x2soi",
"]",
"# (x1[a]>x1[b] and x2[a]<x2[b])",
"a_inside_b_y",
"=",
"o2",
"[",
"y1soi",
"]",
"[",
":",
",",
"y1soi",
"]",
"*",
"o1",
"[",
"y2soi",
"]",
"[",
":",
",",
"y2soi",
"]",
"# (y1[a]>y1[b] and y2[a]<y2[b])",
"a_inside_b",
"=",
"a_inside_b_x",
"*",
"a_inside_b_y",
"return",
"a_inside_b"
] | givens a n*n matrix m, n=len(segments), in which m[i,j] means
segments[i] is contained inside segments[j] | [
"givens",
"a",
"n",
"*",
"n",
"matrix",
"m",
"n",
"=",
"len",
"(",
"segments",
")",
"in",
"which",
"m",
"[",
"i",
"j",
"]",
"means",
"segments",
"[",
"i",
"]",
"is",
"contained",
"inside",
"segments",
"[",
"j",
"]"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation_aux.py#L102-L120 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation_aux.py | SegmentOrderer._process | def _process(self, segments):
"""sort segments in read order - left to right, up to down"""
# sort_f= lambda r: max_line_width*(r[1]/max_line_height)+r[0]
# segments= sorted(segments, key=sort_f)
# segments= segments_to_numpy( segments )
# return segments
mlh, mlw = self.max_line_height, self.max_line_width
s = segments.astype(numpy.uint32) # prevent overflows
order = mlw * (s[:, 1] // mlh) + s[:, 0]
sort_order = numpy.argsort(order)
return segments[sort_order] | python | def _process(self, segments):
"""sort segments in read order - left to right, up to down"""
# sort_f= lambda r: max_line_width*(r[1]/max_line_height)+r[0]
# segments= sorted(segments, key=sort_f)
# segments= segments_to_numpy( segments )
# return segments
mlh, mlw = self.max_line_height, self.max_line_width
s = segments.astype(numpy.uint32) # prevent overflows
order = mlw * (s[:, 1] // mlh) + s[:, 0]
sort_order = numpy.argsort(order)
return segments[sort_order] | [
"def",
"_process",
"(",
"self",
",",
"segments",
")",
":",
"# sort_f= lambda r: max_line_width*(r[1]/max_line_height)+r[0]",
"# segments= sorted(segments, key=sort_f)",
"# segments= segments_to_numpy( segments )",
"# return segments",
"mlh",
",",
"mlw",
"=",
"self",
".",
"max_line_height",
",",
"self",
".",
"max_line_width",
"s",
"=",
"segments",
".",
"astype",
"(",
"numpy",
".",
"uint32",
")",
"# prevent overflows",
"order",
"=",
"mlw",
"*",
"(",
"s",
"[",
":",
",",
"1",
"]",
"//",
"mlh",
")",
"+",
"s",
"[",
":",
",",
"0",
"]",
"sort_order",
"=",
"numpy",
".",
"argsort",
"(",
"order",
")",
"return",
"segments",
"[",
"sort_order",
"]"
] | sort segments in read order - left to right, up to down | [
"sort",
"segments",
"in",
"read",
"order",
"-",
"left",
"to",
"right",
"up",
"to",
"down"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation_aux.py#L11-L21 |
andrewda/frc-livescore | livescore/simpleocr_utils/segmentation_aux.py | LineFinder._guess_lines | def _guess_lines(ys, max_lines=50, confidence_minimum=0.0):
"""guesses and returns text inter-line distance, number of lines, y_position of first line"""
ys = ys.astype(numpy.float32)
compactness_list, means_list, diffs, deviations = [], [], [], []
start_n = 1
for k in range(start_n, min(len(ys), max_lines)):
compactness, classified_points, means = cv2.kmeans(data=ys, K=k, bestLabels=None, criteria=(
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1, 10), attempts=2, flags=cv2.KMEANS_PP_CENTERS)
means = numpy.sort(means, axis=0)
means_list.append(means)
compactness_list.append(compactness)
if k < 3:
tmp1 = [1, 2, 500, 550] # forge data for bad clusters
else:
# calculate the center of each cluster. Assuming lines are equally spaced...
tmp1 = numpy.diff(means, axis=0) # diff will be equal or very similar
tmp2 = numpy.std(tmp1) / numpy.mean(means) # so variance is minimal
tmp3 = numpy.sum((tmp1 - numpy.mean(tmp1)) ** 2) # root mean square deviation, more sensitive than std
diffs.append(tmp1)
deviations.append(tmp3)
compactness_list = numpy.diff(
numpy.log(numpy.array(compactness_list) + 0.01)) # sum small amount to avoid log(0)
deviations = numpy.array(deviations[1:])
deviations[0] = numpy.mean(deviations[1:])
compactness_list = (compactness_list - numpy.mean(compactness_list)) / numpy.std(compactness_list)
deviations = (deviations - numpy.mean(deviations)) / numpy.std(deviations)
aglomerated_metric = 0.1 * compactness_list + 0.9 * deviations
i = numpy.argmin(aglomerated_metric) + 1
lines = means_list[i]
# calculate confidence
betterness = numpy.sort(aglomerated_metric, axis=0)
confidence = (betterness[1] - betterness[0]) / (betterness[2] - betterness[1])
if confidence < confidence_minimum:
raise Exception("low confidence")
return lines | python | def _guess_lines(ys, max_lines=50, confidence_minimum=0.0):
"""guesses and returns text inter-line distance, number of lines, y_position of first line"""
ys = ys.astype(numpy.float32)
compactness_list, means_list, diffs, deviations = [], [], [], []
start_n = 1
for k in range(start_n, min(len(ys), max_lines)):
compactness, classified_points, means = cv2.kmeans(data=ys, K=k, bestLabels=None, criteria=(
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1, 10), attempts=2, flags=cv2.KMEANS_PP_CENTERS)
means = numpy.sort(means, axis=0)
means_list.append(means)
compactness_list.append(compactness)
if k < 3:
tmp1 = [1, 2, 500, 550] # forge data for bad clusters
else:
# calculate the center of each cluster. Assuming lines are equally spaced...
tmp1 = numpy.diff(means, axis=0) # diff will be equal or very similar
tmp2 = numpy.std(tmp1) / numpy.mean(means) # so variance is minimal
tmp3 = numpy.sum((tmp1 - numpy.mean(tmp1)) ** 2) # root mean square deviation, more sensitive than std
diffs.append(tmp1)
deviations.append(tmp3)
compactness_list = numpy.diff(
numpy.log(numpy.array(compactness_list) + 0.01)) # sum small amount to avoid log(0)
deviations = numpy.array(deviations[1:])
deviations[0] = numpy.mean(deviations[1:])
compactness_list = (compactness_list - numpy.mean(compactness_list)) / numpy.std(compactness_list)
deviations = (deviations - numpy.mean(deviations)) / numpy.std(deviations)
aglomerated_metric = 0.1 * compactness_list + 0.9 * deviations
i = numpy.argmin(aglomerated_metric) + 1
lines = means_list[i]
# calculate confidence
betterness = numpy.sort(aglomerated_metric, axis=0)
confidence = (betterness[1] - betterness[0]) / (betterness[2] - betterness[1])
if confidence < confidence_minimum:
raise Exception("low confidence")
return lines | [
"def",
"_guess_lines",
"(",
"ys",
",",
"max_lines",
"=",
"50",
",",
"confidence_minimum",
"=",
"0.0",
")",
":",
"ys",
"=",
"ys",
".",
"astype",
"(",
"numpy",
".",
"float32",
")",
"compactness_list",
",",
"means_list",
",",
"diffs",
",",
"deviations",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"start_n",
"=",
"1",
"for",
"k",
"in",
"range",
"(",
"start_n",
",",
"min",
"(",
"len",
"(",
"ys",
")",
",",
"max_lines",
")",
")",
":",
"compactness",
",",
"classified_points",
",",
"means",
"=",
"cv2",
".",
"kmeans",
"(",
"data",
"=",
"ys",
",",
"K",
"=",
"k",
",",
"bestLabels",
"=",
"None",
",",
"criteria",
"=",
"(",
"cv2",
".",
"TERM_CRITERIA_EPS",
"|",
"cv2",
".",
"TERM_CRITERIA_MAX_ITER",
",",
"1",
",",
"10",
")",
",",
"attempts",
"=",
"2",
",",
"flags",
"=",
"cv2",
".",
"KMEANS_PP_CENTERS",
")",
"means",
"=",
"numpy",
".",
"sort",
"(",
"means",
",",
"axis",
"=",
"0",
")",
"means_list",
".",
"append",
"(",
"means",
")",
"compactness_list",
".",
"append",
"(",
"compactness",
")",
"if",
"k",
"<",
"3",
":",
"tmp1",
"=",
"[",
"1",
",",
"2",
",",
"500",
",",
"550",
"]",
"# forge data for bad clusters",
"else",
":",
"# calculate the center of each cluster. Assuming lines are equally spaced...",
"tmp1",
"=",
"numpy",
".",
"diff",
"(",
"means",
",",
"axis",
"=",
"0",
")",
"# diff will be equal or very similar",
"tmp2",
"=",
"numpy",
".",
"std",
"(",
"tmp1",
")",
"/",
"numpy",
".",
"mean",
"(",
"means",
")",
"# so variance is minimal",
"tmp3",
"=",
"numpy",
".",
"sum",
"(",
"(",
"tmp1",
"-",
"numpy",
".",
"mean",
"(",
"tmp1",
")",
")",
"**",
"2",
")",
"# root mean square deviation, more sensitive than std",
"diffs",
".",
"append",
"(",
"tmp1",
")",
"deviations",
".",
"append",
"(",
"tmp3",
")",
"compactness_list",
"=",
"numpy",
".",
"diff",
"(",
"numpy",
".",
"log",
"(",
"numpy",
".",
"array",
"(",
"compactness_list",
")",
"+",
"0.01",
")",
")",
"# sum small amount to avoid log(0)",
"deviations",
"=",
"numpy",
".",
"array",
"(",
"deviations",
"[",
"1",
":",
"]",
")",
"deviations",
"[",
"0",
"]",
"=",
"numpy",
".",
"mean",
"(",
"deviations",
"[",
"1",
":",
"]",
")",
"compactness_list",
"=",
"(",
"compactness_list",
"-",
"numpy",
".",
"mean",
"(",
"compactness_list",
")",
")",
"/",
"numpy",
".",
"std",
"(",
"compactness_list",
")",
"deviations",
"=",
"(",
"deviations",
"-",
"numpy",
".",
"mean",
"(",
"deviations",
")",
")",
"/",
"numpy",
".",
"std",
"(",
"deviations",
")",
"aglomerated_metric",
"=",
"0.1",
"*",
"compactness_list",
"+",
"0.9",
"*",
"deviations",
"i",
"=",
"numpy",
".",
"argmin",
"(",
"aglomerated_metric",
")",
"+",
"1",
"lines",
"=",
"means_list",
"[",
"i",
"]",
"# calculate confidence",
"betterness",
"=",
"numpy",
".",
"sort",
"(",
"aglomerated_metric",
",",
"axis",
"=",
"0",
")",
"confidence",
"=",
"(",
"betterness",
"[",
"1",
"]",
"-",
"betterness",
"[",
"0",
"]",
")",
"/",
"(",
"betterness",
"[",
"2",
"]",
"-",
"betterness",
"[",
"1",
"]",
")",
"if",
"confidence",
"<",
"confidence_minimum",
":",
"raise",
"Exception",
"(",
"\"low confidence\"",
")",
"return",
"lines"
] | guesses and returns text inter-line distance, number of lines, y_position of first line | [
"guesses",
"and",
"returns",
"text",
"inter",
"-",
"line",
"distance",
"number",
"of",
"lines",
"y_position",
"of",
"first",
"line"
] | train | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/segmentation_aux.py#L26-L63 |
pytroll/posttroll | posttroll/message_broadcaster.py | DesignatedReceiversSender._send_to_address | def _send_to_address(self, address, data, timeout=10):
"""send data to *address* and *port* without verification of response.
"""
# Socket to talk to server
socket = get_context().socket(REQ)
try:
socket.setsockopt(LINGER, timeout * 1000)
if address.find(":") == -1:
socket.connect("tcp://%s:%d" % (address, self.default_port))
else:
socket.connect("tcp://%s" % address)
socket.send_string(data)
message = socket.recv_string()
if message != "ok":
LOGGER.warn("invalid acknowledge received: %s" % message)
finally:
socket.close() | python | def _send_to_address(self, address, data, timeout=10):
"""send data to *address* and *port* without verification of response.
"""
# Socket to talk to server
socket = get_context().socket(REQ)
try:
socket.setsockopt(LINGER, timeout * 1000)
if address.find(":") == -1:
socket.connect("tcp://%s:%d" % (address, self.default_port))
else:
socket.connect("tcp://%s" % address)
socket.send_string(data)
message = socket.recv_string()
if message != "ok":
LOGGER.warn("invalid acknowledge received: %s" % message)
finally:
socket.close() | [
"def",
"_send_to_address",
"(",
"self",
",",
"address",
",",
"data",
",",
"timeout",
"=",
"10",
")",
":",
"# Socket to talk to server",
"socket",
"=",
"get_context",
"(",
")",
".",
"socket",
"(",
"REQ",
")",
"try",
":",
"socket",
".",
"setsockopt",
"(",
"LINGER",
",",
"timeout",
"*",
"1000",
")",
"if",
"address",
".",
"find",
"(",
"\":\"",
")",
"==",
"-",
"1",
":",
"socket",
".",
"connect",
"(",
"\"tcp://%s:%d\"",
"%",
"(",
"address",
",",
"self",
".",
"default_port",
")",
")",
"else",
":",
"socket",
".",
"connect",
"(",
"\"tcp://%s\"",
"%",
"address",
")",
"socket",
".",
"send_string",
"(",
"data",
")",
"message",
"=",
"socket",
".",
"recv_string",
"(",
")",
"if",
"message",
"!=",
"\"ok\"",
":",
"LOGGER",
".",
"warn",
"(",
"\"invalid acknowledge received: %s\"",
"%",
"message",
")",
"finally",
":",
"socket",
".",
"close",
"(",
")"
] | send data to *address* and *port* without verification of response. | [
"send",
"data",
"to",
"*",
"address",
"*",
"and",
"*",
"port",
"*",
"without",
"verification",
"of",
"response",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message_broadcaster.py#L55-L72 |
pytroll/posttroll | posttroll/message_broadcaster.py | MessageBroadcaster._run | def _run(self):
"""Broadcasts forever.
"""
self._is_running = True
network_fail = False
try:
while self._do_run:
try:
if network_fail is True:
LOGGER.info("Network connection re-established!")
network_fail = False
self._sender(self._message)
except IOError as err:
if err.errno == errno.ENETUNREACH:
LOGGER.error("Network unreachable. "
"Trying again in %d s.",
self._interval)
network_fail = True
else:
raise
time.sleep(self._interval)
finally:
self._is_running = False
self._sender.close() | python | def _run(self):
"""Broadcasts forever.
"""
self._is_running = True
network_fail = False
try:
while self._do_run:
try:
if network_fail is True:
LOGGER.info("Network connection re-established!")
network_fail = False
self._sender(self._message)
except IOError as err:
if err.errno == errno.ENETUNREACH:
LOGGER.error("Network unreachable. "
"Trying again in %d s.",
self._interval)
network_fail = True
else:
raise
time.sleep(self._interval)
finally:
self._is_running = False
self._sender.close() | [
"def",
"_run",
"(",
"self",
")",
":",
"self",
".",
"_is_running",
"=",
"True",
"network_fail",
"=",
"False",
"try",
":",
"while",
"self",
".",
"_do_run",
":",
"try",
":",
"if",
"network_fail",
"is",
"True",
":",
"LOGGER",
".",
"info",
"(",
"\"Network connection re-established!\"",
")",
"network_fail",
"=",
"False",
"self",
".",
"_sender",
"(",
"self",
".",
"_message",
")",
"except",
"IOError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errno",
".",
"ENETUNREACH",
":",
"LOGGER",
".",
"error",
"(",
"\"Network unreachable. \"",
"\"Trying again in %d s.\"",
",",
"self",
".",
"_interval",
")",
"network_fail",
"=",
"True",
"else",
":",
"raise",
"time",
".",
"sleep",
"(",
"self",
".",
"_interval",
")",
"finally",
":",
"self",
".",
"_is_running",
"=",
"False",
"self",
".",
"_sender",
".",
"close",
"(",
")"
] | Broadcasts forever. | [
"Broadcasts",
"forever",
"."
] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message_broadcaster.py#L127-L150 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.from_json | def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json) | python | def from_json(self, json):
"""Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data.
"""
res_type = json['sys']['type']
if ResourceType.Array.value == res_type:
return self.create_array(json)
elif ResourceType.Entry.value == res_type:
return self.create_entry(json)
elif ResourceType.Asset.value == res_type:
return ResourceFactory.create_asset(json)
elif ResourceType.ContentType.value == res_type:
return ResourceFactory.create_content_type(json)
elif ResourceType.Space.value == res_type:
return ResourceFactory.create_space(json) | [
"def",
"from_json",
"(",
"self",
",",
"json",
")",
":",
"res_type",
"=",
"json",
"[",
"'sys'",
"]",
"[",
"'type'",
"]",
"if",
"ResourceType",
".",
"Array",
".",
"value",
"==",
"res_type",
":",
"return",
"self",
".",
"create_array",
"(",
"json",
")",
"elif",
"ResourceType",
".",
"Entry",
".",
"value",
"==",
"res_type",
":",
"return",
"self",
".",
"create_entry",
"(",
"json",
")",
"elif",
"ResourceType",
".",
"Asset",
".",
"value",
"==",
"res_type",
":",
"return",
"ResourceFactory",
".",
"create_asset",
"(",
"json",
")",
"elif",
"ResourceType",
".",
"ContentType",
".",
"value",
"==",
"res_type",
":",
"return",
"ResourceFactory",
".",
"create_content_type",
"(",
"json",
")",
"elif",
"ResourceType",
".",
"Space",
".",
"value",
"==",
"res_type",
":",
"return",
"ResourceFactory",
".",
"create_space",
"(",
"json",
")"
] | Create resource out of JSON data.
:param json: JSON dict.
:return: Resource with a type defined by the given JSON data. | [
"Create",
"resource",
"out",
"of",
"JSON",
"data",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L34-L51 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_entry | def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result | python | def create_entry(self, json):
"""Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance.
"""
sys = json['sys']
ct = sys['contentType']['sys']['id']
fields = json['fields']
raw_fields = copy.deepcopy(fields)
# Replace links with :class:`.resources.ResourceLink` objects.
for k, v in fields.items():
link = ResourceFactory._extract_link(v)
if link is not None:
fields[k] = link
elif isinstance(v, list):
for idx, ele in enumerate(v):
link = ResourceFactory._extract_link(ele)
if link is not None:
v[idx] = link
if ct in self.entries_mapping:
clazz = self.entries_mapping[ct]
result = clazz()
for k, v in clazz.__entry_fields__.items():
field_value = fields.get(v.field_id)
if field_value is not None:
setattr(result, k, ResourceFactory.convert_value(field_value, v))
else:
result = Entry()
result.sys = sys
result.fields = fields
result.raw_fields = raw_fields
return result | [
"def",
"create_entry",
"(",
"self",
",",
"json",
")",
":",
"sys",
"=",
"json",
"[",
"'sys'",
"]",
"ct",
"=",
"sys",
"[",
"'contentType'",
"]",
"[",
"'sys'",
"]",
"[",
"'id'",
"]",
"fields",
"=",
"json",
"[",
"'fields'",
"]",
"raw_fields",
"=",
"copy",
".",
"deepcopy",
"(",
"fields",
")",
"# Replace links with :class:`.resources.ResourceLink` objects.",
"for",
"k",
",",
"v",
"in",
"fields",
".",
"items",
"(",
")",
":",
"link",
"=",
"ResourceFactory",
".",
"_extract_link",
"(",
"v",
")",
"if",
"link",
"is",
"not",
"None",
":",
"fields",
"[",
"k",
"]",
"=",
"link",
"elif",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"for",
"idx",
",",
"ele",
"in",
"enumerate",
"(",
"v",
")",
":",
"link",
"=",
"ResourceFactory",
".",
"_extract_link",
"(",
"ele",
")",
"if",
"link",
"is",
"not",
"None",
":",
"v",
"[",
"idx",
"]",
"=",
"link",
"if",
"ct",
"in",
"self",
".",
"entries_mapping",
":",
"clazz",
"=",
"self",
".",
"entries_mapping",
"[",
"ct",
"]",
"result",
"=",
"clazz",
"(",
")",
"for",
"k",
",",
"v",
"in",
"clazz",
".",
"__entry_fields__",
".",
"items",
"(",
")",
":",
"field_value",
"=",
"fields",
".",
"get",
"(",
"v",
".",
"field_id",
")",
"if",
"field_value",
"is",
"not",
"None",
":",
"setattr",
"(",
"result",
",",
"k",
",",
"ResourceFactory",
".",
"convert_value",
"(",
"field_value",
",",
"v",
")",
")",
"else",
":",
"result",
"=",
"Entry",
"(",
")",
"result",
".",
"sys",
"=",
"sys",
"result",
".",
"fields",
"=",
"fields",
"result",
".",
"raw_fields",
"=",
"raw_fields",
"return",
"result"
] | Create :class:`.resources.Entry` from JSON.
:param json: JSON dict.
:return: Entry instance. | [
"Create",
":",
"class",
":",
".",
"resources",
".",
"Entry",
"from",
"JSON",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L64-L101 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_asset | def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result | python | def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result | [
"def",
"create_asset",
"(",
"json",
")",
":",
"result",
"=",
"Asset",
"(",
"json",
"[",
"'sys'",
"]",
")",
"file_dict",
"=",
"json",
"[",
"'fields'",
"]",
"[",
"'file'",
"]",
"result",
".",
"fields",
"=",
"json",
"[",
"'fields'",
"]",
"result",
".",
"url",
"=",
"file_dict",
"[",
"'url'",
"]",
"result",
".",
"mimeType",
"=",
"file_dict",
"[",
"'contentType'",
"]",
"return",
"result"
] | Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance. | [
"Create",
":",
"class",
":",
".",
"resources",
".",
"Asset",
"from",
"JSON",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L104-L115 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_content_type | def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result | python | def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result | [
"def",
"create_content_type",
"(",
"json",
")",
":",
"result",
"=",
"ContentType",
"(",
"json",
"[",
"'sys'",
"]",
")",
"for",
"field",
"in",
"json",
"[",
"'fields'",
"]",
":",
"field_id",
"=",
"field",
"[",
"'id'",
"]",
"del",
"field",
"[",
"'id'",
"]",
"result",
".",
"fields",
"[",
"field_id",
"]",
"=",
"field",
"result",
".",
"name",
"=",
"json",
"[",
"'name'",
"]",
"result",
".",
"display_field",
"=",
"json",
".",
"get",
"(",
"'displayField'",
")",
"return",
"result"
] | Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance. | [
"Create",
":",
"class",
":",
".",
"resource",
".",
"ContentType",
"from",
"JSON",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L118-L134 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.convert_value | def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value | python | def convert_value(value, field):
"""Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value.
"""
clz = field.field_type
if clz is Boolean:
if not isinstance(value, bool):
return bool(value)
elif clz is Date:
if not isinstance(value, str):
value = str(value)
return parser.parse(value)
elif clz is Number:
if not isinstance(value, int):
return int(value)
elif clz is Object:
if not isinstance(value, dict):
return ast.literal_eval(value)
elif clz is Text or clz is Symbol:
if not isinstance(value, str):
return str(value)
elif clz is List or clz is MultipleAssets or clz is MultipleEntries:
if not isinstance(value, list):
return [value]
# No need to convert :class:`.fields.Link` types as the expected value
# should be of type :class:`.resources.ResourceLink` for links.
return value | [
"def",
"convert_value",
"(",
"value",
",",
"field",
")",
":",
"clz",
"=",
"field",
".",
"field_type",
"if",
"clz",
"is",
"Boolean",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"bool",
"(",
"value",
")",
"elif",
"clz",
"is",
"Date",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"return",
"parser",
".",
"parse",
"(",
"value",
")",
"elif",
"clz",
"is",
"Number",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"int",
"(",
"value",
")",
"elif",
"clz",
"is",
"Object",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"ast",
".",
"literal_eval",
"(",
"value",
")",
"elif",
"clz",
"is",
"Text",
"or",
"clz",
"is",
"Symbol",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"str",
"(",
"value",
")",
"elif",
"clz",
"is",
"List",
"or",
"clz",
"is",
"MultipleAssets",
"or",
"clz",
"is",
"MultipleEntries",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"[",
"value",
"]",
"# No need to convert :class:`.fields.Link` types as the expected value",
"# should be of type :class:`.resources.ResourceLink` for links.",
"return",
"value"
] | Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise
attempt to convert it.
:param value: field value.
:param field: :class:`.fields.Field` instance.
:return: Result value. | [
"Given",
"a",
":",
"class",
":",
".",
"fields",
".",
"Field",
"and",
"a",
"value",
"ensure",
"that",
"the",
"value",
"matches",
"the",
"given",
"type",
"otherwise",
"attempt",
"to",
"convert",
"it",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L148-L186 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.process_array_items | def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed) | python | def process_array_items(self, array, json):
"""Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
for item in json['items']:
key = None
processed = self.from_json(item)
if isinstance(processed, Asset):
key = 'Asset'
elif isinstance(processed, Entry):
key = 'Entry'
if key is not None:
array.items_mapped[key][processed.sys['id']] = processed
array.items.append(processed) | [
"def",
"process_array_items",
"(",
"self",
",",
"array",
",",
"json",
")",
":",
"for",
"item",
"in",
"json",
"[",
"'items'",
"]",
":",
"key",
"=",
"None",
"processed",
"=",
"self",
".",
"from_json",
"(",
"item",
")",
"if",
"isinstance",
"(",
"processed",
",",
"Asset",
")",
":",
"key",
"=",
"'Asset'",
"elif",
"isinstance",
"(",
"processed",
",",
"Entry",
")",
":",
"key",
"=",
"'Entry'",
"if",
"key",
"is",
"not",
"None",
":",
"array",
".",
"items_mapped",
"[",
"key",
"]",
"[",
"processed",
".",
"sys",
"[",
"'id'",
"]",
"]",
"=",
"processed",
"array",
".",
"items",
".",
"append",
"(",
"processed",
")"
] | Iterate through all `items` and create a resource for each.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary. | [
"Iterate",
"through",
"all",
"items",
"and",
"create",
"a",
"resource",
"for",
"each",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L189-L209 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.process_array_includes | def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed | python | def process_array_includes(self, array, json):
"""Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary.
"""
includes = json.get('includes') or {}
for key in array.items_mapped.keys():
if key in includes:
for resource in includes[key]:
processed = self.from_json(resource)
array.items_mapped[key][processed.sys['id']] = processed | [
"def",
"process_array_includes",
"(",
"self",
",",
"array",
",",
"json",
")",
":",
"includes",
"=",
"json",
".",
"get",
"(",
"'includes'",
")",
"or",
"{",
"}",
"for",
"key",
"in",
"array",
".",
"items_mapped",
".",
"keys",
"(",
")",
":",
"if",
"key",
"in",
"includes",
":",
"for",
"resource",
"in",
"includes",
"[",
"key",
"]",
":",
"processed",
"=",
"self",
".",
"from_json",
"(",
"resource",
")",
"array",
".",
"items_mapped",
"[",
"key",
"]",
"[",
"processed",
".",
"sys",
"[",
"'id'",
"]",
"]",
"=",
"processed"
] | Iterate through all `includes` and create a resource for every item.
In addition map the resources under the `items_mapped` by the resource id and type.
:param array: Array resource.
:param json: Raw JSON dictionary. | [
"Iterate",
"through",
"all",
"includes",
"and",
"create",
"a",
"resource",
"for",
"every",
"item",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L211-L224 |
contentful-labs/contentful.py | contentful/cda/serialization.py | ResourceFactory.create_array | def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result | python | def create_array(self, json):
"""Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance.
"""
result = Array(json['sys'])
result.total = json['total']
result.skip = json['skip']
result.limit = json['limit']
result.items = []
result.items_mapped = {'Asset': {}, 'Entry': {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result | [
"def",
"create_array",
"(",
"self",
",",
"json",
")",
":",
"result",
"=",
"Array",
"(",
"json",
"[",
"'sys'",
"]",
")",
"result",
".",
"total",
"=",
"json",
"[",
"'total'",
"]",
"result",
".",
"skip",
"=",
"json",
"[",
"'skip'",
"]",
"result",
".",
"limit",
"=",
"json",
"[",
"'limit'",
"]",
"result",
".",
"items",
"=",
"[",
"]",
"result",
".",
"items_mapped",
"=",
"{",
"'Asset'",
":",
"{",
"}",
",",
"'Entry'",
":",
"{",
"}",
"}",
"self",
".",
"process_array_items",
"(",
"result",
",",
"json",
")",
"self",
".",
"process_array_includes",
"(",
"result",
",",
"json",
")",
"return",
"result"
] | Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance. | [
"Create",
":",
"class",
":",
".",
"resources",
".",
"Array",
"from",
"JSON",
"."
] | train | https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/serialization.py#L226-L242 |
fudge-py/fudge | fudge/__init__.py | with_fakes | def with_fakes(method):
"""Decorator that calls :func:`fudge.clear_calls` before method() and :func:`fudge.verify` afterwards.
"""
@wraps(method)
def apply_clear_and_verify(*args, **kw):
clear_calls()
method(*args, **kw)
verify() # if no exceptions
return apply_clear_and_verify | python | def with_fakes(method):
"""Decorator that calls :func:`fudge.clear_calls` before method() and :func:`fudge.verify` afterwards.
"""
@wraps(method)
def apply_clear_and_verify(*args, **kw):
clear_calls()
method(*args, **kw)
verify() # if no exceptions
return apply_clear_and_verify | [
"def",
"with_fakes",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"apply_clear_and_verify",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"clear_calls",
"(",
")",
"method",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"verify",
"(",
")",
"# if no exceptions",
"return",
"apply_clear_and_verify"
] | Decorator that calls :func:`fudge.clear_calls` before method() and :func:`fudge.verify` afterwards. | [
"Decorator",
"that",
"calls",
":",
"func",
":",
"fudge",
".",
"clear_calls",
"before",
"method",
"()",
"and",
":",
"func",
":",
"fudge",
".",
"verify",
"afterwards",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L162-L170 |
fudge-py/fudge | fudge/__init__.py | Registry.clear_calls | def clear_calls(self):
"""Clears out any calls that were made on previously
registered fake objects and resets all call stacks.
You do not need to use this directly. Use fudge.clear_calls()
"""
self.clear_actual_calls()
for stack in self.call_stacks:
stack.reset()
for fake, call_order in self.get_expected_call_order().items():
call_order.reset_calls() | python | def clear_calls(self):
"""Clears out any calls that were made on previously
registered fake objects and resets all call stacks.
You do not need to use this directly. Use fudge.clear_calls()
"""
self.clear_actual_calls()
for stack in self.call_stacks:
stack.reset()
for fake, call_order in self.get_expected_call_order().items():
call_order.reset_calls() | [
"def",
"clear_calls",
"(",
"self",
")",
":",
"self",
".",
"clear_actual_calls",
"(",
")",
"for",
"stack",
"in",
"self",
".",
"call_stacks",
":",
"stack",
".",
"reset",
"(",
")",
"for",
"fake",
",",
"call_order",
"in",
"self",
".",
"get_expected_call_order",
"(",
")",
".",
"items",
"(",
")",
":",
"call_order",
".",
"reset_calls",
"(",
")"
] | Clears out any calls that were made on previously
registered fake objects and resets all call stacks.
You do not need to use this directly. Use fudge.clear_calls() | [
"Clears",
"out",
"any",
"calls",
"that",
"were",
"made",
"on",
"previously",
"registered",
"fake",
"objects",
"and",
"resets",
"all",
"call",
"stacks",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L43-L53 |
fudge-py/fudge | fudge/__init__.py | Registry.verify | def verify(self):
"""Ensure all expected calls were called,
raise AssertionError otherwise.
You do not need to use this directly. Use fudge.verify()
"""
try:
for exp in self.get_expected_calls():
exp.assert_called()
exp.assert_times_called()
for fake, call_order in self.get_expected_call_order().items():
call_order.assert_order_met(finalize=True)
finally:
self.clear_calls() | python | def verify(self):
"""Ensure all expected calls were called,
raise AssertionError otherwise.
You do not need to use this directly. Use fudge.verify()
"""
try:
for exp in self.get_expected_calls():
exp.assert_called()
exp.assert_times_called()
for fake, call_order in self.get_expected_call_order().items():
call_order.assert_order_met(finalize=True)
finally:
self.clear_calls() | [
"def",
"verify",
"(",
"self",
")",
":",
"try",
":",
"for",
"exp",
"in",
"self",
".",
"get_expected_calls",
"(",
")",
":",
"exp",
".",
"assert_called",
"(",
")",
"exp",
".",
"assert_times_called",
"(",
")",
"for",
"fake",
",",
"call_order",
"in",
"self",
".",
"get_expected_call_order",
"(",
")",
".",
"items",
"(",
")",
":",
"call_order",
".",
"assert_order_met",
"(",
"finalize",
"=",
"True",
")",
"finally",
":",
"self",
".",
"clear_calls",
"(",
")"
] | Ensure all expected calls were called,
raise AssertionError otherwise.
You do not need to use this directly. Use fudge.verify() | [
"Ensure",
"all",
"expected",
"calls",
"were",
"called",
"raise",
"AssertionError",
"otherwise",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L87-L100 |
fudge-py/fudge | fudge/__init__.py | ExpectedCallOrder.assert_order_met | def assert_order_met(self, finalize=False):
"""assert that calls have been made in the right order."""
error = None
actual_call_len = len(self._actual_calls)
expected_call_len = len(self._call_order)
if actual_call_len == 0:
error = "Not enough calls were made"
else:
for i,call in enumerate(self._call_order):
if actual_call_len < i+1:
if not finalize:
# we're not done asserting calls so
# forget about not having enough calls
continue
calls_made = len(self._actual_calls)
if calls_made == 1:
error = "Only 1 call was made"
else:
error = "Only %s calls were made" % calls_made
break
ac_call = self._actual_calls[i]
if ac_call is not call:
error = "Call #%s was %r" % (i+1, ac_call)
break
if not error:
if actual_call_len > expected_call_len:
# only show the first extra call since this
# will be triggered before all calls are finished:
error = "#%s %s was unexpected" % (
expected_call_len+1,
self._actual_calls[expected_call_len]
)
if error:
msg = "%s; Expected: %s" % (
error, self._repr_call_list(self._call_order))
raise AssertionError(msg) | python | def assert_order_met(self, finalize=False):
"""assert that calls have been made in the right order."""
error = None
actual_call_len = len(self._actual_calls)
expected_call_len = len(self._call_order)
if actual_call_len == 0:
error = "Not enough calls were made"
else:
for i,call in enumerate(self._call_order):
if actual_call_len < i+1:
if not finalize:
# we're not done asserting calls so
# forget about not having enough calls
continue
calls_made = len(self._actual_calls)
if calls_made == 1:
error = "Only 1 call was made"
else:
error = "Only %s calls were made" % calls_made
break
ac_call = self._actual_calls[i]
if ac_call is not call:
error = "Call #%s was %r" % (i+1, ac_call)
break
if not error:
if actual_call_len > expected_call_len:
# only show the first extra call since this
# will be triggered before all calls are finished:
error = "#%s %s was unexpected" % (
expected_call_len+1,
self._actual_calls[expected_call_len]
)
if error:
msg = "%s; Expected: %s" % (
error, self._repr_call_list(self._call_order))
raise AssertionError(msg) | [
"def",
"assert_order_met",
"(",
"self",
",",
"finalize",
"=",
"False",
")",
":",
"error",
"=",
"None",
"actual_call_len",
"=",
"len",
"(",
"self",
".",
"_actual_calls",
")",
"expected_call_len",
"=",
"len",
"(",
"self",
".",
"_call_order",
")",
"if",
"actual_call_len",
"==",
"0",
":",
"error",
"=",
"\"Not enough calls were made\"",
"else",
":",
"for",
"i",
",",
"call",
"in",
"enumerate",
"(",
"self",
".",
"_call_order",
")",
":",
"if",
"actual_call_len",
"<",
"i",
"+",
"1",
":",
"if",
"not",
"finalize",
":",
"# we're not done asserting calls so",
"# forget about not having enough calls",
"continue",
"calls_made",
"=",
"len",
"(",
"self",
".",
"_actual_calls",
")",
"if",
"calls_made",
"==",
"1",
":",
"error",
"=",
"\"Only 1 call was made\"",
"else",
":",
"error",
"=",
"\"Only %s calls were made\"",
"%",
"calls_made",
"break",
"ac_call",
"=",
"self",
".",
"_actual_calls",
"[",
"i",
"]",
"if",
"ac_call",
"is",
"not",
"call",
":",
"error",
"=",
"\"Call #%s was %r\"",
"%",
"(",
"i",
"+",
"1",
",",
"ac_call",
")",
"break",
"if",
"not",
"error",
":",
"if",
"actual_call_len",
">",
"expected_call_len",
":",
"# only show the first extra call since this",
"# will be triggered before all calls are finished:",
"error",
"=",
"\"#%s %s was unexpected\"",
"%",
"(",
"expected_call_len",
"+",
"1",
",",
"self",
".",
"_actual_calls",
"[",
"expected_call_len",
"]",
")",
"if",
"error",
":",
"msg",
"=",
"\"%s; Expected: %s\"",
"%",
"(",
"error",
",",
"self",
".",
"_repr_call_list",
"(",
"self",
".",
"_call_order",
")",
")",
"raise",
"AssertionError",
"(",
"msg",
")"
] | assert that calls have been made in the right order. | [
"assert",
"that",
"calls",
"have",
"been",
"made",
"in",
"the",
"right",
"order",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L482-L522 |
fudge-py/fudge | fudge/__init__.py | Fake.expects_call | def expects_call(self):
"""The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
self._callable = ExpectedCall(self, call_name=self._name,
callable=True)
return self | python | def expects_call(self):
"""The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
self._callable = ExpectedCall(self, call_name=self._name,
callable=True)
return self | [
"def",
"expects_call",
"(",
"self",
")",
":",
"self",
".",
"_callable",
"=",
"ExpectedCall",
"(",
"self",
",",
"call_name",
"=",
"self",
".",
"_name",
",",
"callable",
"=",
"True",
")",
"return",
"self"
] | The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations() | [
"The",
"fake",
"must",
"be",
"called",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L777-L805 |
fudge-py/fudge | fudge/__init__.py | Fake.is_callable | def is_callable(self):
"""The fake can be called.
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = Fake('os.remove').is_callable()
>>> remove('some/path')
"""
self._callable = Call(self, call_name=self._name, callable=True)
return self | python | def is_callable(self):
"""The fake can be called.
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = Fake('os.remove').is_callable()
>>> remove('some/path')
"""
self._callable = Call(self, call_name=self._name, callable=True)
return self | [
"def",
"is_callable",
"(",
"self",
")",
":",
"self",
".",
"_callable",
"=",
"Call",
"(",
"self",
",",
"call_name",
"=",
"self",
".",
"_name",
",",
"callable",
"=",
"True",
")",
"return",
"self"
] | The fake can be called.
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = Fake('os.remove').is_callable()
>>> remove('some/path') | [
"The",
"fake",
"can",
"be",
"called",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L807-L819 |
fudge-py/fudge | fudge/__init__.py | Fake.calls | def calls(self, call):
"""Redefine a call.
The fake method will execute your function. I.E.::
>>> f = Fake().provides('hello').calls(lambda: 'Why, hello there')
>>> f.hello()
'Why, hello there'
"""
exp = self._get_current_call()
exp.call_replacement = call
return self | python | def calls(self, call):
"""Redefine a call.
The fake method will execute your function. I.E.::
>>> f = Fake().provides('hello').calls(lambda: 'Why, hello there')
>>> f.hello()
'Why, hello there'
"""
exp = self._get_current_call()
exp.call_replacement = call
return self | [
"def",
"calls",
"(",
"self",
",",
"call",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"exp",
".",
"call_replacement",
"=",
"call",
"return",
"self"
] | Redefine a call.
The fake method will execute your function. I.E.::
>>> f = Fake().provides('hello').calls(lambda: 'Why, hello there')
>>> f.hello()
'Why, hello there' | [
"Redefine",
"a",
"call",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L832-L844 |
fudge-py/fudge | fudge/__init__.py | Fake.expects | def expects(self, call_name):
"""Expect a call.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
If the method *call_name* is never called, then raise an error. I.E.::
>>> session = Fake('session').expects('open').expects('close')
>>> session.open()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:session.close() was not called
.. note::
If you want to also verify the order these calls are made in,
use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`
after ``expects(...)``, each new call will be part of the expected order
Declaring ``expects()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name)
self._last_declared_call_name = call_name
c = ExpectedCall(self, call_name, call_order=self._expected_call_order)
self._declare_call(call_name, c)
return self | python | def expects(self, call_name):
"""Expect a call.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
If the method *call_name* is never called, then raise an error. I.E.::
>>> session = Fake('session').expects('open').expects('close')
>>> session.open()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:session.close() was not called
.. note::
If you want to also verify the order these calls are made in,
use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`
after ``expects(...)``, each new call will be part of the expected order
Declaring ``expects()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name)
self._last_declared_call_name = call_name
c = ExpectedCall(self, call_name, call_order=self._expected_call_order)
self._declare_call(call_name, c)
return self | [
"def",
"expects",
"(",
"self",
",",
"call_name",
")",
":",
"if",
"call_name",
"in",
"self",
".",
"_declared_calls",
":",
"return",
"self",
".",
"next_call",
"(",
"for_method",
"=",
"call_name",
")",
"self",
".",
"_last_declared_call_name",
"=",
"call_name",
"c",
"=",
"ExpectedCall",
"(",
"self",
",",
"call_name",
",",
"call_order",
"=",
"self",
".",
"_expected_call_order",
")",
"self",
".",
"_declare_call",
"(",
"call_name",
",",
"c",
")",
"return",
"self"
] | Expect a call.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
If the method *call_name* is never called, then raise an error. I.E.::
>>> session = Fake('session').expects('open').expects('close')
>>> session.open()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:session.close() was not called
.. note::
If you want to also verify the order these calls are made in,
use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`
after ``expects(...)``, each new call will be part of the expected order
Declaring ``expects()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call` | [
"Expect",
"a",
"call",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L846-L879 |
fudge-py/fudge | fudge/__init__.py | Fake.next_call | def next_call(self, for_method=None):
"""Start expecting or providing multiple calls.
.. note:: next_call() cannot be used in combination with :func:`fudge.Fake.times_called`
Up until calling this method, calls are infinite.
For example, before next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f.status()
'Awake!'
>>> f.status()
'Awake!'
After next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f = f.next_call().returns('Asleep')
>>> f = f.next_call().returns('Dreaming')
>>> f.status()
'Awake!'
>>> f.status()
'Asleep'
>>> f.status()
'Dreaming'
>>> f.status()
Traceback (most recent call last):
...
AssertionError: This attribute of fake:unnamed can only be called 3 time(s). Call reset() if necessary or fudge.clear_calls().
If you need to affect the next call of something other than the last declared call,
use ``next_call(for_method="other_call")``. Here is an example using getters and setters
on a session object ::
>>> from fudge import Fake
>>> sess = Fake('session').provides('get_count').returns(1)
>>> sess = sess.provides('set_count').with_args(5)
Now go back and adjust return values for get_count() ::
>>> sess = sess.next_call(for_method='get_count').returns(5)
This allows these calls to be made ::
>>> sess.get_count()
1
>>> sess.set_count(5)
>>> sess.get_count()
5
When using :func:`fudge.Fake.remember_order` in combination with :func:`fudge.Fake.expects` and :func:`fudge.Fake.next_call` each new call will be part of the expected order.
"""
last_call_name = self._last_declared_call_name
if for_method:
if for_method not in self._declared_calls:
raise FakeDeclarationError(
"next_call(for_method=%r) is not possible; "
"declare expects(%r) or provides(%r) first" % (
for_method, for_method, for_method))
else:
# set this for the local function:
last_call_name = for_method
# reset this for subsequent methods:
self._last_declared_call_name = last_call_name
if last_call_name:
exp = self._declared_calls[last_call_name]
elif self._callable:
exp = self._callable
else:
raise FakeDeclarationError('next_call() must follow provides(), '
'expects() or is_callable()')
if getattr(exp, 'expected_times_called', None) is not None:
raise FakeDeclarationError("Cannot use next_call() in combination with times_called()")
if not isinstance(exp, CallStack):
# lazily create a stack with the last defined
# expected call as the first on the stack:
stack = CallStack(self, initial_calls=[exp],
expected=isinstance(exp, ExpectedCall),
call_name=exp.call_name)
# replace the old call obj using the same name:
if last_call_name:
self._declare_call(last_call_name, stack)
elif self._callable:
self._callable = stack
else:
stack = exp
# hmm, we need a copy here so that the last call
# falls off the stack.
if stack.expected:
next_call = ExpectedCall(self, call_name=exp.call_name, call_order=self._expected_call_order)
else:
next_call = Call(self, call_name=exp.call_name)
stack.add_call(next_call)
return self | python | def next_call(self, for_method=None):
"""Start expecting or providing multiple calls.
.. note:: next_call() cannot be used in combination with :func:`fudge.Fake.times_called`
Up until calling this method, calls are infinite.
For example, before next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f.status()
'Awake!'
>>> f.status()
'Awake!'
After next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f = f.next_call().returns('Asleep')
>>> f = f.next_call().returns('Dreaming')
>>> f.status()
'Awake!'
>>> f.status()
'Asleep'
>>> f.status()
'Dreaming'
>>> f.status()
Traceback (most recent call last):
...
AssertionError: This attribute of fake:unnamed can only be called 3 time(s). Call reset() if necessary or fudge.clear_calls().
If you need to affect the next call of something other than the last declared call,
use ``next_call(for_method="other_call")``. Here is an example using getters and setters
on a session object ::
>>> from fudge import Fake
>>> sess = Fake('session').provides('get_count').returns(1)
>>> sess = sess.provides('set_count').with_args(5)
Now go back and adjust return values for get_count() ::
>>> sess = sess.next_call(for_method='get_count').returns(5)
This allows these calls to be made ::
>>> sess.get_count()
1
>>> sess.set_count(5)
>>> sess.get_count()
5
When using :func:`fudge.Fake.remember_order` in combination with :func:`fudge.Fake.expects` and :func:`fudge.Fake.next_call` each new call will be part of the expected order.
"""
last_call_name = self._last_declared_call_name
if for_method:
if for_method not in self._declared_calls:
raise FakeDeclarationError(
"next_call(for_method=%r) is not possible; "
"declare expects(%r) or provides(%r) first" % (
for_method, for_method, for_method))
else:
# set this for the local function:
last_call_name = for_method
# reset this for subsequent methods:
self._last_declared_call_name = last_call_name
if last_call_name:
exp = self._declared_calls[last_call_name]
elif self._callable:
exp = self._callable
else:
raise FakeDeclarationError('next_call() must follow provides(), '
'expects() or is_callable()')
if getattr(exp, 'expected_times_called', None) is not None:
raise FakeDeclarationError("Cannot use next_call() in combination with times_called()")
if not isinstance(exp, CallStack):
# lazily create a stack with the last defined
# expected call as the first on the stack:
stack = CallStack(self, initial_calls=[exp],
expected=isinstance(exp, ExpectedCall),
call_name=exp.call_name)
# replace the old call obj using the same name:
if last_call_name:
self._declare_call(last_call_name, stack)
elif self._callable:
self._callable = stack
else:
stack = exp
# hmm, we need a copy here so that the last call
# falls off the stack.
if stack.expected:
next_call = ExpectedCall(self, call_name=exp.call_name, call_order=self._expected_call_order)
else:
next_call = Call(self, call_name=exp.call_name)
stack.add_call(next_call)
return self | [
"def",
"next_call",
"(",
"self",
",",
"for_method",
"=",
"None",
")",
":",
"last_call_name",
"=",
"self",
".",
"_last_declared_call_name",
"if",
"for_method",
":",
"if",
"for_method",
"not",
"in",
"self",
".",
"_declared_calls",
":",
"raise",
"FakeDeclarationError",
"(",
"\"next_call(for_method=%r) is not possible; \"",
"\"declare expects(%r) or provides(%r) first\"",
"%",
"(",
"for_method",
",",
"for_method",
",",
"for_method",
")",
")",
"else",
":",
"# set this for the local function:",
"last_call_name",
"=",
"for_method",
"# reset this for subsequent methods:",
"self",
".",
"_last_declared_call_name",
"=",
"last_call_name",
"if",
"last_call_name",
":",
"exp",
"=",
"self",
".",
"_declared_calls",
"[",
"last_call_name",
"]",
"elif",
"self",
".",
"_callable",
":",
"exp",
"=",
"self",
".",
"_callable",
"else",
":",
"raise",
"FakeDeclarationError",
"(",
"'next_call() must follow provides(), '",
"'expects() or is_callable()'",
")",
"if",
"getattr",
"(",
"exp",
",",
"'expected_times_called'",
",",
"None",
")",
"is",
"not",
"None",
":",
"raise",
"FakeDeclarationError",
"(",
"\"Cannot use next_call() in combination with times_called()\"",
")",
"if",
"not",
"isinstance",
"(",
"exp",
",",
"CallStack",
")",
":",
"# lazily create a stack with the last defined",
"# expected call as the first on the stack:",
"stack",
"=",
"CallStack",
"(",
"self",
",",
"initial_calls",
"=",
"[",
"exp",
"]",
",",
"expected",
"=",
"isinstance",
"(",
"exp",
",",
"ExpectedCall",
")",
",",
"call_name",
"=",
"exp",
".",
"call_name",
")",
"# replace the old call obj using the same name:",
"if",
"last_call_name",
":",
"self",
".",
"_declare_call",
"(",
"last_call_name",
",",
"stack",
")",
"elif",
"self",
".",
"_callable",
":",
"self",
".",
"_callable",
"=",
"stack",
"else",
":",
"stack",
"=",
"exp",
"# hmm, we need a copy here so that the last call",
"# falls off the stack.",
"if",
"stack",
".",
"expected",
":",
"next_call",
"=",
"ExpectedCall",
"(",
"self",
",",
"call_name",
"=",
"exp",
".",
"call_name",
",",
"call_order",
"=",
"self",
".",
"_expected_call_order",
")",
"else",
":",
"next_call",
"=",
"Call",
"(",
"self",
",",
"call_name",
"=",
"exp",
".",
"call_name",
")",
"stack",
".",
"add_call",
"(",
"next_call",
")",
"return",
"self"
] | Start expecting or providing multiple calls.
.. note:: next_call() cannot be used in combination with :func:`fudge.Fake.times_called`
Up until calling this method, calls are infinite.
For example, before next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f.status()
'Awake!'
>>> f.status()
'Awake!'
After next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f = f.next_call().returns('Asleep')
>>> f = f.next_call().returns('Dreaming')
>>> f.status()
'Awake!'
>>> f.status()
'Asleep'
>>> f.status()
'Dreaming'
>>> f.status()
Traceback (most recent call last):
...
AssertionError: This attribute of fake:unnamed can only be called 3 time(s). Call reset() if necessary or fudge.clear_calls().
If you need to affect the next call of something other than the last declared call,
use ``next_call(for_method="other_call")``. Here is an example using getters and setters
on a session object ::
>>> from fudge import Fake
>>> sess = Fake('session').provides('get_count').returns(1)
>>> sess = sess.provides('set_count').with_args(5)
Now go back and adjust return values for get_count() ::
>>> sess = sess.next_call(for_method='get_count').returns(5)
This allows these calls to be made ::
>>> sess.get_count()
1
>>> sess.set_count(5)
>>> sess.get_count()
5
When using :func:`fudge.Fake.remember_order` in combination with :func:`fudge.Fake.expects` and :func:`fudge.Fake.next_call` each new call will be part of the expected order. | [
"Start",
"expecting",
"or",
"providing",
"multiple",
"calls",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L914-L1016 |
fudge-py/fudge | fudge/__init__.py | Fake.provides | def provides(self, call_name):
"""Provide a call.
The call acts as a stub -- no error is raised if it is not called.::
>>> session = Fake('session').provides('open').provides('close')
>>> import fudge
>>> fudge.clear_expectations() # from any previously declared fakes
>>> fudge.clear_calls()
>>> session.open()
>>> fudge.verify() # close() not called but no error
Declaring ``provides()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name)
self._last_declared_call_name = call_name
c = Call(self, call_name)
self._declare_call(call_name, c)
return self | python | def provides(self, call_name):
"""Provide a call.
The call acts as a stub -- no error is raised if it is not called.::
>>> session = Fake('session').provides('open').provides('close')
>>> import fudge
>>> fudge.clear_expectations() # from any previously declared fakes
>>> fudge.clear_calls()
>>> session.open()
>>> fudge.verify() # close() not called but no error
Declaring ``provides()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name)
self._last_declared_call_name = call_name
c = Call(self, call_name)
self._declare_call(call_name, c)
return self | [
"def",
"provides",
"(",
"self",
",",
"call_name",
")",
":",
"if",
"call_name",
"in",
"self",
".",
"_declared_calls",
":",
"return",
"self",
".",
"next_call",
"(",
"for_method",
"=",
"call_name",
")",
"self",
".",
"_last_declared_call_name",
"=",
"call_name",
"c",
"=",
"Call",
"(",
"self",
",",
"call_name",
")",
"self",
".",
"_declare_call",
"(",
"call_name",
",",
"c",
")",
"return",
"self"
] | Provide a call.
The call acts as a stub -- no error is raised if it is not called.::
>>> session = Fake('session').provides('open').provides('close')
>>> import fudge
>>> fudge.clear_expectations() # from any previously declared fakes
>>> fudge.clear_calls()
>>> session.open()
>>> fudge.verify() # close() not called but no error
Declaring ``provides()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call` | [
"Provide",
"a",
"call",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1018-L1040 |
fudge-py/fudge | fudge/__init__.py | Fake.raises | def raises(self, exc):
"""Set last call to raise an exception class or instance.
For example::
>>> import fudge
>>> db = fudge.Fake('db').provides('insert').raises(ValueError("not enough parameters for insert"))
>>> db.insert()
Traceback (most recent call last):
...
ValueError: not enough parameters for insert
"""
exp = self._get_current_call()
exp.exception_to_raise = exc
return self | python | def raises(self, exc):
"""Set last call to raise an exception class or instance.
For example::
>>> import fudge
>>> db = fudge.Fake('db').provides('insert').raises(ValueError("not enough parameters for insert"))
>>> db.insert()
Traceback (most recent call last):
...
ValueError: not enough parameters for insert
"""
exp = self._get_current_call()
exp.exception_to_raise = exc
return self | [
"def",
"raises",
"(",
"self",
",",
"exc",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"exp",
".",
"exception_to_raise",
"=",
"exc",
"return",
"self"
] | Set last call to raise an exception class or instance.
For example::
>>> import fudge
>>> db = fudge.Fake('db').provides('insert').raises(ValueError("not enough parameters for insert"))
>>> db.insert()
Traceback (most recent call last):
...
ValueError: not enough parameters for insert | [
"Set",
"last",
"call",
"to",
"raise",
"an",
"exception",
"class",
"or",
"instance",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1042-L1057 |
fudge-py/fudge | fudge/__init__.py | Fake.remember_order | def remember_order(self):
"""Verify that subsequent :func:`fudge.Fake.expects` are called in the right order.
For example::
>>> import fudge
>>> db = fudge.Fake('db').remember_order().expects('insert').expects('update')
>>> db.update()
Traceback (most recent call last):
...
AssertionError: Call #1 was fake:db.update(); Expected: #1 fake:db.insert(), #2 fake:db.update(), end
>>> fudge.clear_expectations()
When declaring multiple calls using :func:`fudge.Fake.next_call`, each subsequent call will be added
to the expected order of calls ::
>>> import fudge
>>> sess = fudge.Fake("session").remember_order().expects("get_id").returns(1)
>>> sess = sess.expects("set_id").with_args(5)
>>> sess = sess.next_call(for_method="get_id").returns(5)
Multiple calls to ``get_id()`` are now expected ::
>>> sess.get_id()
1
>>> sess.set_id(5)
>>> sess.get_id()
5
>>> fudge.verify()
>>> fudge.clear_expectations()
"""
if self._callable:
raise FakeDeclarationError(
"remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)")
self._expected_call_order = ExpectedCallOrder(self)
registry.remember_expected_call_order(self._expected_call_order)
return self | python | def remember_order(self):
"""Verify that subsequent :func:`fudge.Fake.expects` are called in the right order.
For example::
>>> import fudge
>>> db = fudge.Fake('db').remember_order().expects('insert').expects('update')
>>> db.update()
Traceback (most recent call last):
...
AssertionError: Call #1 was fake:db.update(); Expected: #1 fake:db.insert(), #2 fake:db.update(), end
>>> fudge.clear_expectations()
When declaring multiple calls using :func:`fudge.Fake.next_call`, each subsequent call will be added
to the expected order of calls ::
>>> import fudge
>>> sess = fudge.Fake("session").remember_order().expects("get_id").returns(1)
>>> sess = sess.expects("set_id").with_args(5)
>>> sess = sess.next_call(for_method="get_id").returns(5)
Multiple calls to ``get_id()`` are now expected ::
>>> sess.get_id()
1
>>> sess.set_id(5)
>>> sess.get_id()
5
>>> fudge.verify()
>>> fudge.clear_expectations()
"""
if self._callable:
raise FakeDeclarationError(
"remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)")
self._expected_call_order = ExpectedCallOrder(self)
registry.remember_expected_call_order(self._expected_call_order)
return self | [
"def",
"remember_order",
"(",
"self",
")",
":",
"if",
"self",
".",
"_callable",
":",
"raise",
"FakeDeclarationError",
"(",
"\"remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)\"",
")",
"self",
".",
"_expected_call_order",
"=",
"ExpectedCallOrder",
"(",
"self",
")",
"registry",
".",
"remember_expected_call_order",
"(",
"self",
".",
"_expected_call_order",
")",
"return",
"self"
] | Verify that subsequent :func:`fudge.Fake.expects` are called in the right order.
For example::
>>> import fudge
>>> db = fudge.Fake('db').remember_order().expects('insert').expects('update')
>>> db.update()
Traceback (most recent call last):
...
AssertionError: Call #1 was fake:db.update(); Expected: #1 fake:db.insert(), #2 fake:db.update(), end
>>> fudge.clear_expectations()
When declaring multiple calls using :func:`fudge.Fake.next_call`, each subsequent call will be added
to the expected order of calls ::
>>> import fudge
>>> sess = fudge.Fake("session").remember_order().expects("get_id").returns(1)
>>> sess = sess.expects("set_id").with_args(5)
>>> sess = sess.next_call(for_method="get_id").returns(5)
Multiple calls to ``get_id()`` are now expected ::
>>> sess.get_id()
1
>>> sess.set_id(5)
>>> sess.get_id()
5
>>> fudge.verify()
>>> fudge.clear_expectations() | [
"Verify",
"that",
"subsequent",
":",
"func",
":",
"fudge",
".",
"Fake",
".",
"expects",
"are",
"called",
"in",
"the",
"right",
"order",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1059-L1096 |
fudge-py/fudge | fudge/__init__.py | Fake.returns | def returns(self, val):
"""Set the last call to return a value.
Set a static value to return when a method is called. I.E.::
>>> f = Fake().provides('get_number').returns(64)
>>> f.get_number()
64
"""
exp = self._get_current_call()
exp.return_val = val
return self | python | def returns(self, val):
"""Set the last call to return a value.
Set a static value to return when a method is called. I.E.::
>>> f = Fake().provides('get_number').returns(64)
>>> f.get_number()
64
"""
exp = self._get_current_call()
exp.return_val = val
return self | [
"def",
"returns",
"(",
"self",
",",
"val",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"exp",
".",
"return_val",
"=",
"val",
"return",
"self"
] | Set the last call to return a value.
Set a static value to return when a method is called. I.E.::
>>> f = Fake().provides('get_number').returns(64)
>>> f.get_number()
64 | [
"Set",
"the",
"last",
"call",
"to",
"return",
"a",
"value",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1098-L1110 |
fudge-py/fudge | fudge/__init__.py | Fake.returns_fake | def returns_fake(self, *args, **kwargs):
"""Set the last call to return a new :class:`fudge.Fake`.
Any given arguments are passed to the :class:`fudge.Fake` constructor
Take note that this is different from the cascading nature of
other methods. This will return an instance of the *new* Fake,
not self, so you should be careful to store its return value in a new
variable.
I.E.::
>>> session = Fake('session')
>>> query = session.provides('query').returns_fake(name="Query")
>>> assert query is not session
>>> query = query.provides('one').returns(['object'])
>>> session.query().one()
['object']
"""
exp = self._get_current_call()
endpoint = kwargs.get('name', exp.call_name)
name = self._endpoint_name(endpoint)
kwargs['name'] = '%s()' % name
fake = self.__class__(*args, **kwargs)
exp.return_val = fake
return fake | python | def returns_fake(self, *args, **kwargs):
"""Set the last call to return a new :class:`fudge.Fake`.
Any given arguments are passed to the :class:`fudge.Fake` constructor
Take note that this is different from the cascading nature of
other methods. This will return an instance of the *new* Fake,
not self, so you should be careful to store its return value in a new
variable.
I.E.::
>>> session = Fake('session')
>>> query = session.provides('query').returns_fake(name="Query")
>>> assert query is not session
>>> query = query.provides('one').returns(['object'])
>>> session.query().one()
['object']
"""
exp = self._get_current_call()
endpoint = kwargs.get('name', exp.call_name)
name = self._endpoint_name(endpoint)
kwargs['name'] = '%s()' % name
fake = self.__class__(*args, **kwargs)
exp.return_val = fake
return fake | [
"def",
"returns_fake",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"endpoint",
"=",
"kwargs",
".",
"get",
"(",
"'name'",
",",
"exp",
".",
"call_name",
")",
"name",
"=",
"self",
".",
"_endpoint_name",
"(",
"endpoint",
")",
"kwargs",
"[",
"'name'",
"]",
"=",
"'%s()'",
"%",
"name",
"fake",
"=",
"self",
".",
"__class__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"exp",
".",
"return_val",
"=",
"fake",
"return",
"fake"
] | Set the last call to return a new :class:`fudge.Fake`.
Any given arguments are passed to the :class:`fudge.Fake` constructor
Take note that this is different from the cascading nature of
other methods. This will return an instance of the *new* Fake,
not self, so you should be careful to store its return value in a new
variable.
I.E.::
>>> session = Fake('session')
>>> query = session.provides('query').returns_fake(name="Query")
>>> assert query is not session
>>> query = query.provides('one').returns(['object'])
>>> session.query().one()
['object'] | [
"Set",
"the",
"last",
"call",
"to",
"return",
"a",
"new",
":",
"class",
":",
"fudge",
".",
"Fake",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1112-L1139 |
fudge-py/fudge | fudge/__init__.py | Fake.times_called | def times_called(self, n):
"""Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call`
"""
if self._last_declared_call_name:
actual_last_call = self._declared_calls[self._last_declared_call_name]
if isinstance(actual_last_call, CallStack):
raise FakeDeclarationError("Cannot use times_called() in combination with next_call()")
# else: # self._callable is in effect
exp = self._get_current_call()
exp.expected_times_called = n
return self | python | def times_called(self, n):
"""Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call`
"""
if self._last_declared_call_name:
actual_last_call = self._declared_calls[self._last_declared_call_name]
if isinstance(actual_last_call, CallStack):
raise FakeDeclarationError("Cannot use times_called() in combination with next_call()")
# else: # self._callable is in effect
exp = self._get_current_call()
exp.expected_times_called = n
return self | [
"def",
"times_called",
"(",
"self",
",",
"n",
")",
":",
"if",
"self",
".",
"_last_declared_call_name",
":",
"actual_last_call",
"=",
"self",
".",
"_declared_calls",
"[",
"self",
".",
"_last_declared_call_name",
"]",
"if",
"isinstance",
"(",
"actual_last_call",
",",
"CallStack",
")",
":",
"raise",
"FakeDeclarationError",
"(",
"\"Cannot use times_called() in combination with next_call()\"",
")",
"# else: # self._callable is in effect",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"exp",
".",
"expected_times_called",
"=",
"n",
"return",
"self"
] | Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call` | [
"Set",
"the",
"number",
"of",
"times",
"an",
"object",
"can",
"be",
"called",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1141-L1176 |
fudge-py/fudge | fudge/__init__.py | Fake.with_args | def with_args(self, *args, **kwargs):
"""Set the last call to expect specific argument values.
The app under test must send all declared arguments and keyword arguments
otherwise your test will raise an AssertionError. For example:
.. doctest::
>>> import fudge
>>> counter = fudge.Fake('counter').expects('increment').with_args(25, table='hits')
>>> counter.increment(24, table='clicks')
Traceback (most recent call last):
...
AssertionError: fake:counter.increment(25, table='hits') was called unexpectedly with args (24, table='clicks')
If you need to work with dynamic argument values
consider using :func:`fudge.Fake.with_matching_args` to make looser declarations.
You can also use :mod:`fudge.inspector` functions. Here is an example of providing
a more flexible ``with_args()`` declaration using inspectors:
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> counter = fudge.Fake('counter')
>>> counter = counter.expects('increment').with_args(
... arg.any(),
... table=arg.endswith("hits"))
...
The above declaration would allow you to call counter like this:
.. doctest::
>>> counter.increment(999, table="image_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_calls()
Or like this:
.. doctest::
>>> counter.increment(22, table="user_profile_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
exp = self._get_current_call()
if args:
exp.expected_args = args
if kwargs:
exp.expected_kwargs = kwargs
return self | python | def with_args(self, *args, **kwargs):
"""Set the last call to expect specific argument values.
The app under test must send all declared arguments and keyword arguments
otherwise your test will raise an AssertionError. For example:
.. doctest::
>>> import fudge
>>> counter = fudge.Fake('counter').expects('increment').with_args(25, table='hits')
>>> counter.increment(24, table='clicks')
Traceback (most recent call last):
...
AssertionError: fake:counter.increment(25, table='hits') was called unexpectedly with args (24, table='clicks')
If you need to work with dynamic argument values
consider using :func:`fudge.Fake.with_matching_args` to make looser declarations.
You can also use :mod:`fudge.inspector` functions. Here is an example of providing
a more flexible ``with_args()`` declaration using inspectors:
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> counter = fudge.Fake('counter')
>>> counter = counter.expects('increment').with_args(
... arg.any(),
... table=arg.endswith("hits"))
...
The above declaration would allow you to call counter like this:
.. doctest::
>>> counter.increment(999, table="image_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_calls()
Or like this:
.. doctest::
>>> counter.increment(22, table="user_profile_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
exp = self._get_current_call()
if args:
exp.expected_args = args
if kwargs:
exp.expected_kwargs = kwargs
return self | [
"def",
"with_args",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"if",
"args",
":",
"exp",
".",
"expected_args",
"=",
"args",
"if",
"kwargs",
":",
"exp",
".",
"expected_kwargs",
"=",
"kwargs",
"return",
"self"
] | Set the last call to expect specific argument values.
The app under test must send all declared arguments and keyword arguments
otherwise your test will raise an AssertionError. For example:
.. doctest::
>>> import fudge
>>> counter = fudge.Fake('counter').expects('increment').with_args(25, table='hits')
>>> counter.increment(24, table='clicks')
Traceback (most recent call last):
...
AssertionError: fake:counter.increment(25, table='hits') was called unexpectedly with args (24, table='clicks')
If you need to work with dynamic argument values
consider using :func:`fudge.Fake.with_matching_args` to make looser declarations.
You can also use :mod:`fudge.inspector` functions. Here is an example of providing
a more flexible ``with_args()`` declaration using inspectors:
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> counter = fudge.Fake('counter')
>>> counter = counter.expects('increment').with_args(
... arg.any(),
... table=arg.endswith("hits"))
...
The above declaration would allow you to call counter like this:
.. doctest::
>>> counter.increment(999, table="image_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_calls()
Or like this:
.. doctest::
>>> counter.increment(22, table="user_profile_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations() | [
"Set",
"the",
"last",
"call",
"to",
"expect",
"specific",
"argument",
"values",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1178-L1243 |
fudge-py/fudge | fudge/__init__.py | Fake.with_matching_args | def with_matching_args(self, *args, **kwargs):
"""Set the last call to expect specific argument values if those arguments exist.
Unlike :func:`fudge.Fake.with_args` use this if you want to only declare
expectations about matching arguments. Any unknown keyword arguments
used by the app under test will be allowed.
For example, you can declare positional arguments but ignore keyword arguments:
.. doctest::
>>> import fudge
>>> db = fudge.Fake('db').expects('transaction').with_matching_args('insert')
With this declaration, any keyword argument is allowed:
.. doctest::
>>> db.transaction('insert', isolation_level='lock')
>>> db.transaction('insert', isolation_level='shared')
>>> db.transaction('insert', retry_on_error=True)
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. note::
you may get more mileage out of :mod:`fudge.inspector` functions as
described in :func:`fudge.Fake.with_args`
"""
exp = self._get_current_call()
if args:
exp.expected_matching_args = args
if kwargs:
exp.expected_matching_kwargs = kwargs
return self | python | def with_matching_args(self, *args, **kwargs):
"""Set the last call to expect specific argument values if those arguments exist.
Unlike :func:`fudge.Fake.with_args` use this if you want to only declare
expectations about matching arguments. Any unknown keyword arguments
used by the app under test will be allowed.
For example, you can declare positional arguments but ignore keyword arguments:
.. doctest::
>>> import fudge
>>> db = fudge.Fake('db').expects('transaction').with_matching_args('insert')
With this declaration, any keyword argument is allowed:
.. doctest::
>>> db.transaction('insert', isolation_level='lock')
>>> db.transaction('insert', isolation_level='shared')
>>> db.transaction('insert', retry_on_error=True)
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. note::
you may get more mileage out of :mod:`fudge.inspector` functions as
described in :func:`fudge.Fake.with_args`
"""
exp = self._get_current_call()
if args:
exp.expected_matching_args = args
if kwargs:
exp.expected_matching_kwargs = kwargs
return self | [
"def",
"with_matching_args",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"if",
"args",
":",
"exp",
".",
"expected_matching_args",
"=",
"args",
"if",
"kwargs",
":",
"exp",
".",
"expected_matching_kwargs",
"=",
"kwargs",
"return",
"self"
] | Set the last call to expect specific argument values if those arguments exist.
Unlike :func:`fudge.Fake.with_args` use this if you want to only declare
expectations about matching arguments. Any unknown keyword arguments
used by the app under test will be allowed.
For example, you can declare positional arguments but ignore keyword arguments:
.. doctest::
>>> import fudge
>>> db = fudge.Fake('db').expects('transaction').with_matching_args('insert')
With this declaration, any keyword argument is allowed:
.. doctest::
>>> db.transaction('insert', isolation_level='lock')
>>> db.transaction('insert', isolation_level='shared')
>>> db.transaction('insert', retry_on_error=True)
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. note::
you may get more mileage out of :mod:`fudge.inspector` functions as
described in :func:`fudge.Fake.with_args` | [
"Set",
"the",
"last",
"call",
"to",
"expect",
"specific",
"argument",
"values",
"if",
"those",
"arguments",
"exist",
"."
] | train | https://github.com/fudge-py/fudge/blob/b283fbc1a41900f3f5845b10b8c2ef9136a67ebc/fudge/__init__.py#L1245-L1283 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.