repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 4
175
| func_name
stringlengths 1
129
| whole_func_string
stringlengths 91
50.9k
| language
stringclasses 1
value | func_code_string
stringlengths 91
50.9k
| func_code_tokens
sequence | func_documentation_string
stringlengths 1
31.6k
| func_documentation_tokens
sequence | split_name
stringclasses 1
value | func_code_url
stringlengths 89
268
| score
float64 0
0.09
|
---|---|---|---|---|---|---|---|---|---|---|---|
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getdoc | def getdoc(obj):
"""
Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring
"""
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or ''
# Most of the time doc will only contain ascii characters, but there are
# some docstrings that contain non-ascii characters. Not all source files
# declare their encoding in the first line, so querying for that might not
# yield anything, either. So assume the most commonly used
# multi-byte file encoding (which also covers ascii).
try:
docstring = to_text_string(docstring)
except:
pass
# Doc dict keys
doc = {'name': '',
'argspec': '',
'note': '',
'docstring': docstring}
if callable(obj):
try:
name = obj.__name__
except AttributeError:
doc['docstring'] = docstring
return doc
if inspect.ismethod(obj):
imclass = get_meth_class(obj)
if get_meth_class_inst(obj) is not None:
doc['note'] = 'Method of %s instance' \
% get_meth_class_inst(obj).__class__.__name__
else:
doc['note'] = 'Unbound %s method' % imclass.__name__
obj = get_meth_func(obj)
elif hasattr(obj, '__module__'):
doc['note'] = 'Function of %s module' % obj.__module__
else:
doc['note'] = 'Function'
doc['name'] = obj.__name__
if inspect.isfunction(obj):
if PY2:
args, varargs, varkw, defaults = inspect.getargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults,
formatvalue=lambda o:'='+repr(o))
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations) = inspect.getfullargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations, formatvalue=lambda o:'='+repr(o))
if name == '<lambda>':
doc['name'] = name + ' lambda '
doc['argspec'] = doc['argspec'][1:-1] # remove parentheses
else:
argspec = getargspecfromtext(doc['docstring'])
if argspec:
doc['argspec'] = argspec
# Many scipy and numpy docstrings begin with a function
# signature on the first line. This ends up begin redundant
# when we are using title and argspec to create the
# rich text "Definition:" field. We'll carefully remove this
# redundancy but only under a strict set of conditions:
# Remove the starting charaters of the 'doc' portion *iff*
# the non-whitespace characters on the first line
# match *exactly* the combined function title
# and argspec we determined above.
signature = doc['name'] + doc['argspec']
docstring_blocks = doc['docstring'].split("\n\n")
first_block = docstring_blocks[0].strip()
if first_block == signature:
doc['docstring'] = doc['docstring'].replace(
signature, '', 1).lstrip()
else:
doc['argspec'] = '(...)'
# Remove self from argspec
argspec = doc['argspec']
doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(')
return doc | python | def getdoc(obj):
"""
Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring
"""
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or ''
# Most of the time doc will only contain ascii characters, but there are
# some docstrings that contain non-ascii characters. Not all source files
# declare their encoding in the first line, so querying for that might not
# yield anything, either. So assume the most commonly used
# multi-byte file encoding (which also covers ascii).
try:
docstring = to_text_string(docstring)
except:
pass
# Doc dict keys
doc = {'name': '',
'argspec': '',
'note': '',
'docstring': docstring}
if callable(obj):
try:
name = obj.__name__
except AttributeError:
doc['docstring'] = docstring
return doc
if inspect.ismethod(obj):
imclass = get_meth_class(obj)
if get_meth_class_inst(obj) is not None:
doc['note'] = 'Method of %s instance' \
% get_meth_class_inst(obj).__class__.__name__
else:
doc['note'] = 'Unbound %s method' % imclass.__name__
obj = get_meth_func(obj)
elif hasattr(obj, '__module__'):
doc['note'] = 'Function of %s module' % obj.__module__
else:
doc['note'] = 'Function'
doc['name'] = obj.__name__
if inspect.isfunction(obj):
if PY2:
args, varargs, varkw, defaults = inspect.getargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults,
formatvalue=lambda o:'='+repr(o))
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations) = inspect.getfullargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations, formatvalue=lambda o:'='+repr(o))
if name == '<lambda>':
doc['name'] = name + ' lambda '
doc['argspec'] = doc['argspec'][1:-1] # remove parentheses
else:
argspec = getargspecfromtext(doc['docstring'])
if argspec:
doc['argspec'] = argspec
# Many scipy and numpy docstrings begin with a function
# signature on the first line. This ends up begin redundant
# when we are using title and argspec to create the
# rich text "Definition:" field. We'll carefully remove this
# redundancy but only under a strict set of conditions:
# Remove the starting charaters of the 'doc' portion *iff*
# the non-whitespace characters on the first line
# match *exactly* the combined function title
# and argspec we determined above.
signature = doc['name'] + doc['argspec']
docstring_blocks = doc['docstring'].split("\n\n")
first_block = docstring_blocks[0].strip()
if first_block == signature:
doc['docstring'] = doc['docstring'].replace(
signature, '', 1).lstrip()
else:
doc['argspec'] = '(...)'
# Remove self from argspec
argspec = doc['argspec']
doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(')
return doc | [
"def",
"getdoc",
"(",
"obj",
")",
":",
"docstring",
"=",
"inspect",
".",
"getdoc",
"(",
"obj",
")",
"or",
"inspect",
".",
"getcomments",
"(",
"obj",
")",
"or",
"''",
"# Most of the time doc will only contain ascii characters, but there are",
"# some docstrings that contain non-ascii characters. Not all source files",
"# declare their encoding in the first line, so querying for that might not",
"# yield anything, either. So assume the most commonly used",
"# multi-byte file encoding (which also covers ascii). ",
"try",
":",
"docstring",
"=",
"to_text_string",
"(",
"docstring",
")",
"except",
":",
"pass",
"# Doc dict keys",
"doc",
"=",
"{",
"'name'",
":",
"''",
",",
"'argspec'",
":",
"''",
",",
"'note'",
":",
"''",
",",
"'docstring'",
":",
"docstring",
"}",
"if",
"callable",
"(",
"obj",
")",
":",
"try",
":",
"name",
"=",
"obj",
".",
"__name__",
"except",
"AttributeError",
":",
"doc",
"[",
"'docstring'",
"]",
"=",
"docstring",
"return",
"doc",
"if",
"inspect",
".",
"ismethod",
"(",
"obj",
")",
":",
"imclass",
"=",
"get_meth_class",
"(",
"obj",
")",
"if",
"get_meth_class_inst",
"(",
"obj",
")",
"is",
"not",
"None",
":",
"doc",
"[",
"'note'",
"]",
"=",
"'Method of %s instance'",
"%",
"get_meth_class_inst",
"(",
"obj",
")",
".",
"__class__",
".",
"__name__",
"else",
":",
"doc",
"[",
"'note'",
"]",
"=",
"'Unbound %s method'",
"%",
"imclass",
".",
"__name__",
"obj",
"=",
"get_meth_func",
"(",
"obj",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"'__module__'",
")",
":",
"doc",
"[",
"'note'",
"]",
"=",
"'Function of %s module'",
"%",
"obj",
".",
"__module__",
"else",
":",
"doc",
"[",
"'note'",
"]",
"=",
"'Function'",
"doc",
"[",
"'name'",
"]",
"=",
"obj",
".",
"__name__",
"if",
"inspect",
".",
"isfunction",
"(",
"obj",
")",
":",
"if",
"PY2",
":",
"args",
",",
"varargs",
",",
"varkw",
",",
"defaults",
"=",
"inspect",
".",
"getargspec",
"(",
"obj",
")",
"doc",
"[",
"'argspec'",
"]",
"=",
"inspect",
".",
"formatargspec",
"(",
"args",
",",
"varargs",
",",
"varkw",
",",
"defaults",
",",
"formatvalue",
"=",
"lambda",
"o",
":",
"'='",
"+",
"repr",
"(",
"o",
")",
")",
"else",
":",
"(",
"args",
",",
"varargs",
",",
"varkw",
",",
"defaults",
",",
"kwonlyargs",
",",
"kwonlydefaults",
",",
"annotations",
")",
"=",
"inspect",
".",
"getfullargspec",
"(",
"obj",
")",
"doc",
"[",
"'argspec'",
"]",
"=",
"inspect",
".",
"formatargspec",
"(",
"args",
",",
"varargs",
",",
"varkw",
",",
"defaults",
",",
"kwonlyargs",
",",
"kwonlydefaults",
",",
"annotations",
",",
"formatvalue",
"=",
"lambda",
"o",
":",
"'='",
"+",
"repr",
"(",
"o",
")",
")",
"if",
"name",
"==",
"'<lambda>'",
":",
"doc",
"[",
"'name'",
"]",
"=",
"name",
"+",
"' lambda '",
"doc",
"[",
"'argspec'",
"]",
"=",
"doc",
"[",
"'argspec'",
"]",
"[",
"1",
":",
"-",
"1",
"]",
"# remove parentheses",
"else",
":",
"argspec",
"=",
"getargspecfromtext",
"(",
"doc",
"[",
"'docstring'",
"]",
")",
"if",
"argspec",
":",
"doc",
"[",
"'argspec'",
"]",
"=",
"argspec",
"# Many scipy and numpy docstrings begin with a function",
"# signature on the first line. This ends up begin redundant",
"# when we are using title and argspec to create the",
"# rich text \"Definition:\" field. We'll carefully remove this",
"# redundancy but only under a strict set of conditions:",
"# Remove the starting charaters of the 'doc' portion *iff*",
"# the non-whitespace characters on the first line ",
"# match *exactly* the combined function title ",
"# and argspec we determined above.",
"signature",
"=",
"doc",
"[",
"'name'",
"]",
"+",
"doc",
"[",
"'argspec'",
"]",
"docstring_blocks",
"=",
"doc",
"[",
"'docstring'",
"]",
".",
"split",
"(",
"\"\\n\\n\"",
")",
"first_block",
"=",
"docstring_blocks",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"first_block",
"==",
"signature",
":",
"doc",
"[",
"'docstring'",
"]",
"=",
"doc",
"[",
"'docstring'",
"]",
".",
"replace",
"(",
"signature",
",",
"''",
",",
"1",
")",
".",
"lstrip",
"(",
")",
"else",
":",
"doc",
"[",
"'argspec'",
"]",
"=",
"'(...)'",
"# Remove self from argspec",
"argspec",
"=",
"doc",
"[",
"'argspec'",
"]",
"doc",
"[",
"'argspec'",
"]",
"=",
"argspec",
".",
"replace",
"(",
"'(self)'",
",",
"'()'",
")",
".",
"replace",
"(",
"'(self, '",
",",
"'('",
")",
"return",
"doc"
] | Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring | [
"Return",
"text",
"documentation",
"from",
"an",
"object",
".",
"This",
"comes",
"in",
"a",
"form",
"of",
"dictionary",
"with",
"four",
"keys",
":"
] | train | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L63-L157 | 0.003794 |
pandas-dev/pandas | pandas/io/sql.py | SQLDatabase.read_table | def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize) | python | def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize) | [
"def",
"read_table",
"(",
"self",
",",
"table_name",
",",
"index_col",
"=",
"None",
",",
"coerce_float",
"=",
"True",
",",
"parse_dates",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"chunksize",
"=",
"None",
")",
":",
"table",
"=",
"SQLTable",
"(",
"table_name",
",",
"self",
",",
"index",
"=",
"index_col",
",",
"schema",
"=",
"schema",
")",
"return",
"table",
".",
"read",
"(",
"coerce_float",
"=",
"coerce_float",
",",
"parse_dates",
"=",
"parse_dates",
",",
"columns",
"=",
"columns",
",",
"chunksize",
"=",
"chunksize",
")"
] | Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query | [
"Read",
"SQL",
"database",
"table",
"into",
"a",
"DataFrame",
"."
] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L982-L1029 | 0.00189 |
fermiPy/fermipy | fermipy/diffuse/name_policy.py | NameFactory.ccube | def ccube(self, **kwargs):
""" return the name of a counts cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
localpath = NameFactory.ccube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | python | def ccube(self, **kwargs):
""" return the name of a counts cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
localpath = NameFactory.ccube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | [
"def",
"ccube",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs_copy",
"=",
"self",
".",
"base_dict",
".",
"copy",
"(",
")",
"kwargs_copy",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"kwargs_copy",
"[",
"'dataset'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'dataset'",
",",
"self",
".",
"dataset",
"(",
"*",
"*",
"kwargs",
")",
")",
"kwargs_copy",
"[",
"'component'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'component'",
",",
"self",
".",
"component",
"(",
"*",
"*",
"kwargs",
")",
")",
"localpath",
"=",
"NameFactory",
".",
"ccube_format",
".",
"format",
"(",
"*",
"*",
"kwargs_copy",
")",
"if",
"kwargs",
".",
"get",
"(",
"'fullpath'",
",",
"False",
")",
":",
"return",
"self",
".",
"fullpath",
"(",
"localpath",
"=",
"localpath",
")",
"return",
"localpath"
] | return the name of a counts cube file | [
"return",
"the",
"name",
"of",
"a",
"counts",
"cube",
"file"
] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L363-L374 | 0.003745 |
wummel/patool | patoolib/util.py | get_nt_7z_dir | def get_nt_7z_dir ():
"""Return 7-Zip directory from registry, or an empty string."""
# Python 3.x renamed the _winreg module to winreg
try:
import _winreg as winreg
except ImportError:
import winreg
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\7-Zip")
try:
return winreg.QueryValueEx(key, "Path")[0]
finally:
winreg.CloseKey(key)
except WindowsError:
return "" | python | def get_nt_7z_dir ():
"""Return 7-Zip directory from registry, or an empty string."""
# Python 3.x renamed the _winreg module to winreg
try:
import _winreg as winreg
except ImportError:
import winreg
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\7-Zip")
try:
return winreg.QueryValueEx(key, "Path")[0]
finally:
winreg.CloseKey(key)
except WindowsError:
return "" | [
"def",
"get_nt_7z_dir",
"(",
")",
":",
"# Python 3.x renamed the _winreg module to winreg",
"try",
":",
"import",
"_winreg",
"as",
"winreg",
"except",
"ImportError",
":",
"import",
"winreg",
"try",
":",
"key",
"=",
"winreg",
".",
"OpenKey",
"(",
"winreg",
".",
"HKEY_LOCAL_MACHINE",
",",
"r\"SOFTWARE\\7-Zip\"",
")",
"try",
":",
"return",
"winreg",
".",
"QueryValueEx",
"(",
"key",
",",
"\"Path\"",
")",
"[",
"0",
"]",
"finally",
":",
"winreg",
".",
"CloseKey",
"(",
"key",
")",
"except",
"WindowsError",
":",
"return",
"\"\""
] | Return 7-Zip directory from registry, or an empty string. | [
"Return",
"7",
"-",
"Zip",
"directory",
"from",
"registry",
"or",
"an",
"empty",
"string",
"."
] | train | https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/util.py#L629-L643 | 0.004202 |
PyCQA/astroid | astroid/brain/brain_builtin_inference.py | infer_isinstance | def infer_isinstance(callnode, context=None):
"""Infer isinstance calls
:param nodes.Call callnode: an isinstance call
:param InferenceContext: context for call
(currently unused but is a common interface for inference)
:rtype nodes.Const: Boolean Const value of isinstance call
:raises UseInferenceDefault: If the node cannot be inferred
"""
call = arguments.CallSite.from_call(callnode)
if call.keyword_arguments:
# isinstance doesn't support keyword arguments
raise UseInferenceDefault("TypeError: isinstance() takes no keyword arguments")
if len(call.positional_arguments) != 2:
raise UseInferenceDefault(
"Expected two arguments, got {count}".format(
count=len(call.positional_arguments)
)
)
# The left hand argument is the obj to be checked
obj_node, class_or_tuple_node = call.positional_arguments
# The right hand argument is the class(es) that the given
# obj is to be check is an instance of
try:
class_container = _class_or_tuple_to_container(
class_or_tuple_node, context=context
)
except InferenceError:
raise UseInferenceDefault
try:
isinstance_bool = helpers.object_isinstance(obj_node, class_container, context)
except AstroidTypeError as exc:
raise UseInferenceDefault("TypeError: " + str(exc))
except MroError as exc:
raise UseInferenceDefault from exc
if isinstance_bool is util.Uninferable:
raise UseInferenceDefault
return nodes.Const(isinstance_bool) | python | def infer_isinstance(callnode, context=None):
"""Infer isinstance calls
:param nodes.Call callnode: an isinstance call
:param InferenceContext: context for call
(currently unused but is a common interface for inference)
:rtype nodes.Const: Boolean Const value of isinstance call
:raises UseInferenceDefault: If the node cannot be inferred
"""
call = arguments.CallSite.from_call(callnode)
if call.keyword_arguments:
# isinstance doesn't support keyword arguments
raise UseInferenceDefault("TypeError: isinstance() takes no keyword arguments")
if len(call.positional_arguments) != 2:
raise UseInferenceDefault(
"Expected two arguments, got {count}".format(
count=len(call.positional_arguments)
)
)
# The left hand argument is the obj to be checked
obj_node, class_or_tuple_node = call.positional_arguments
# The right hand argument is the class(es) that the given
# obj is to be check is an instance of
try:
class_container = _class_or_tuple_to_container(
class_or_tuple_node, context=context
)
except InferenceError:
raise UseInferenceDefault
try:
isinstance_bool = helpers.object_isinstance(obj_node, class_container, context)
except AstroidTypeError as exc:
raise UseInferenceDefault("TypeError: " + str(exc))
except MroError as exc:
raise UseInferenceDefault from exc
if isinstance_bool is util.Uninferable:
raise UseInferenceDefault
return nodes.Const(isinstance_bool) | [
"def",
"infer_isinstance",
"(",
"callnode",
",",
"context",
"=",
"None",
")",
":",
"call",
"=",
"arguments",
".",
"CallSite",
".",
"from_call",
"(",
"callnode",
")",
"if",
"call",
".",
"keyword_arguments",
":",
"# isinstance doesn't support keyword arguments",
"raise",
"UseInferenceDefault",
"(",
"\"TypeError: isinstance() takes no keyword arguments\"",
")",
"if",
"len",
"(",
"call",
".",
"positional_arguments",
")",
"!=",
"2",
":",
"raise",
"UseInferenceDefault",
"(",
"\"Expected two arguments, got {count}\"",
".",
"format",
"(",
"count",
"=",
"len",
"(",
"call",
".",
"positional_arguments",
")",
")",
")",
"# The left hand argument is the obj to be checked",
"obj_node",
",",
"class_or_tuple_node",
"=",
"call",
".",
"positional_arguments",
"# The right hand argument is the class(es) that the given",
"# obj is to be check is an instance of",
"try",
":",
"class_container",
"=",
"_class_or_tuple_to_container",
"(",
"class_or_tuple_node",
",",
"context",
"=",
"context",
")",
"except",
"InferenceError",
":",
"raise",
"UseInferenceDefault",
"try",
":",
"isinstance_bool",
"=",
"helpers",
".",
"object_isinstance",
"(",
"obj_node",
",",
"class_container",
",",
"context",
")",
"except",
"AstroidTypeError",
"as",
"exc",
":",
"raise",
"UseInferenceDefault",
"(",
"\"TypeError: \"",
"+",
"str",
"(",
"exc",
")",
")",
"except",
"MroError",
"as",
"exc",
":",
"raise",
"UseInferenceDefault",
"from",
"exc",
"if",
"isinstance_bool",
"is",
"util",
".",
"Uninferable",
":",
"raise",
"UseInferenceDefault",
"return",
"nodes",
".",
"Const",
"(",
"isinstance_bool",
")"
] | Infer isinstance calls
:param nodes.Call callnode: an isinstance call
:param InferenceContext: context for call
(currently unused but is a common interface for inference)
:rtype nodes.Const: Boolean Const value of isinstance call
:raises UseInferenceDefault: If the node cannot be inferred | [
"Infer",
"isinstance",
"calls"
] | train | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_builtin_inference.py#L605-L643 | 0.001873 |
mesbahamin/chronophore | chronophore/controller.py | sign_out | def sign_out(entry, time_out=None, forgot=False):
"""Sign out of an existing entry in the timesheet. If the user
forgot to sign out, flag the entry.
:param entry: `models.Entry` object. The entry to sign out.
:param time_out: (optional) `datetime.time` object. Specify the sign out time.
:param forgot: (optional) If true, user forgot to sign out. Entry will be flagged as forgotten.
:return: The signed out entry.
""" # noqa
if time_out is None:
time_out = datetime.today().time()
if forgot:
entry.forgot_sign_out = True
logger.info(
'{} forgot to sign out on {}.'.format(entry.user_id, entry.date)
)
else:
entry.time_out = time_out
logger.info('{} ({}) signed out.'.format(entry.user_id, entry.user_type))
return entry | python | def sign_out(entry, time_out=None, forgot=False):
"""Sign out of an existing entry in the timesheet. If the user
forgot to sign out, flag the entry.
:param entry: `models.Entry` object. The entry to sign out.
:param time_out: (optional) `datetime.time` object. Specify the sign out time.
:param forgot: (optional) If true, user forgot to sign out. Entry will be flagged as forgotten.
:return: The signed out entry.
""" # noqa
if time_out is None:
time_out = datetime.today().time()
if forgot:
entry.forgot_sign_out = True
logger.info(
'{} forgot to sign out on {}.'.format(entry.user_id, entry.date)
)
else:
entry.time_out = time_out
logger.info('{} ({}) signed out.'.format(entry.user_id, entry.user_type))
return entry | [
"def",
"sign_out",
"(",
"entry",
",",
"time_out",
"=",
"None",
",",
"forgot",
"=",
"False",
")",
":",
"# noqa",
"if",
"time_out",
"is",
"None",
":",
"time_out",
"=",
"datetime",
".",
"today",
"(",
")",
".",
"time",
"(",
")",
"if",
"forgot",
":",
"entry",
".",
"forgot_sign_out",
"=",
"True",
"logger",
".",
"info",
"(",
"'{} forgot to sign out on {}.'",
".",
"format",
"(",
"entry",
".",
"user_id",
",",
"entry",
".",
"date",
")",
")",
"else",
":",
"entry",
".",
"time_out",
"=",
"time_out",
"logger",
".",
"info",
"(",
"'{} ({}) signed out.'",
".",
"format",
"(",
"entry",
".",
"user_id",
",",
"entry",
".",
"user_type",
")",
")",
"return",
"entry"
] | Sign out of an existing entry in the timesheet. If the user
forgot to sign out, flag the entry.
:param entry: `models.Entry` object. The entry to sign out.
:param time_out: (optional) `datetime.time` object. Specify the sign out time.
:param forgot: (optional) If true, user forgot to sign out. Entry will be flagged as forgotten.
:return: The signed out entry. | [
"Sign",
"out",
"of",
"an",
"existing",
"entry",
"in",
"the",
"timesheet",
".",
"If",
"the",
"user",
"forgot",
"to",
"sign",
"out",
"flag",
"the",
"entry",
"."
] | train | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L177-L199 | 0.002427 |
KnorrFG/pyparadigm | pyparadigm/eventlistener.py | EventListener.listen_until_return | def listen_until_return(self, *temporary_handlers, timeout=0):
"""Calls listen repeatedly until listen returns something else than None.
Then returns listen's result. If timeout is not zero listen_until_return
stops after timeout seconds and returns None."""
start = time.time()
while timeout == 0 or time.time() - start < timeout:
res = self.listen(*temporary_handlers)
if res is not None:
return res | python | def listen_until_return(self, *temporary_handlers, timeout=0):
"""Calls listen repeatedly until listen returns something else than None.
Then returns listen's result. If timeout is not zero listen_until_return
stops after timeout seconds and returns None."""
start = time.time()
while timeout == 0 or time.time() - start < timeout:
res = self.listen(*temporary_handlers)
if res is not None:
return res | [
"def",
"listen_until_return",
"(",
"self",
",",
"*",
"temporary_handlers",
",",
"timeout",
"=",
"0",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"timeout",
"==",
"0",
"or",
"time",
".",
"time",
"(",
")",
"-",
"start",
"<",
"timeout",
":",
"res",
"=",
"self",
".",
"listen",
"(",
"*",
"temporary_handlers",
")",
"if",
"res",
"is",
"not",
"None",
":",
"return",
"res"
] | Calls listen repeatedly until listen returns something else than None.
Then returns listen's result. If timeout is not zero listen_until_return
stops after timeout seconds and returns None. | [
"Calls",
"listen",
"repeatedly",
"until",
"listen",
"returns",
"something",
"else",
"than",
"None",
".",
"Then",
"returns",
"listen",
"s",
"result",
".",
"If",
"timeout",
"is",
"not",
"zero",
"listen_until_return",
"stops",
"after",
"timeout",
"seconds",
"and",
"returns",
"None",
"."
] | train | https://github.com/KnorrFG/pyparadigm/blob/69944cdf3ce2f6414ae1aa1d27a0d8c6e5fb3fd3/pyparadigm/eventlistener.py#L227-L235 | 0.008316 |
LaoLiulaoliu/pgwrapper | pgwrapper/pgpool.py | PGPool._create_connection | def _create_connection(self):
""".. :py:method::
If we hava several hosts, we can random choice one to connect
"""
db = psycopg2.connect(database=self.dbname,
user=self.user, password=self.password,
host=self.host, port=self.port)
if 'psycopg2.extras' in sys.modules:
psycopg2.extras.register_hstore(db)
return db | python | def _create_connection(self):
""".. :py:method::
If we hava several hosts, we can random choice one to connect
"""
db = psycopg2.connect(database=self.dbname,
user=self.user, password=self.password,
host=self.host, port=self.port)
if 'psycopg2.extras' in sys.modules:
psycopg2.extras.register_hstore(db)
return db | [
"def",
"_create_connection",
"(",
"self",
")",
":",
"db",
"=",
"psycopg2",
".",
"connect",
"(",
"database",
"=",
"self",
".",
"dbname",
",",
"user",
"=",
"self",
".",
"user",
",",
"password",
"=",
"self",
".",
"password",
",",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
")",
"if",
"'psycopg2.extras'",
"in",
"sys",
".",
"modules",
":",
"psycopg2",
".",
"extras",
".",
"register_hstore",
"(",
"db",
")",
"return",
"db"
] | .. :py:method::
If we hava several hosts, we can random choice one to connect | [
"..",
":",
"py",
":",
"method",
"::"
] | train | https://github.com/LaoLiulaoliu/pgwrapper/blob/063a164713b79bfadb56a01c4ae19911f508d01e/pgwrapper/pgpool.py#L63-L73 | 0.009009 |
gwpy/gwpy | gwpy/plot/tex.py | label_to_latex | def label_to_latex(text):
# pylint: disable=anomalous-backslash-in-string
r"""Convert text into a latex-passable representation.
This method just escapes the following reserved LaTeX characters:
% \ _ ~ &, whilst trying to avoid doubly-escaping already escaped
characters
Parameters
----------
text : `str`
input text to convert
Returns
-------
tex : `str`
a modified version of the input text with all unescaped reserved
latex characters escaped
Examples
--------
>>> from gwpy.plot.tex import label_to_latex
>>> label_to_latex('normal text')
'normal text'
>>> label_to_latex('$1 + 2 = 3$')
'$1 + 2 = 3$'
>>> label_to_latex('H1:ABC-DEF_GHI')
'H1:ABC-DEF\\_GHI'
>>> label_to_latex('H1:ABC-DEF\_GHI')
'H1:ABC-DEF\\_GHI'
"""
if text is None:
return ''
out = []
x = None
# loop over matches in reverse order and replace
for m in re_latex_control.finditer(text):
a, b = m.span()
char = m.group()[0]
out.append(text[x:a])
out.append(r'\%s' % char)
x = b
if not x: # no match
return text
# append prefix and return joined components
out.append(text[b:])
return ''.join(out) | python | def label_to_latex(text):
# pylint: disable=anomalous-backslash-in-string
r"""Convert text into a latex-passable representation.
This method just escapes the following reserved LaTeX characters:
% \ _ ~ &, whilst trying to avoid doubly-escaping already escaped
characters
Parameters
----------
text : `str`
input text to convert
Returns
-------
tex : `str`
a modified version of the input text with all unescaped reserved
latex characters escaped
Examples
--------
>>> from gwpy.plot.tex import label_to_latex
>>> label_to_latex('normal text')
'normal text'
>>> label_to_latex('$1 + 2 = 3$')
'$1 + 2 = 3$'
>>> label_to_latex('H1:ABC-DEF_GHI')
'H1:ABC-DEF\\_GHI'
>>> label_to_latex('H1:ABC-DEF\_GHI')
'H1:ABC-DEF\\_GHI'
"""
if text is None:
return ''
out = []
x = None
# loop over matches in reverse order and replace
for m in re_latex_control.finditer(text):
a, b = m.span()
char = m.group()[0]
out.append(text[x:a])
out.append(r'\%s' % char)
x = b
if not x: # no match
return text
# append prefix and return joined components
out.append(text[b:])
return ''.join(out) | [
"def",
"label_to_latex",
"(",
"text",
")",
":",
"# pylint: disable=anomalous-backslash-in-string",
"if",
"text",
"is",
"None",
":",
"return",
"''",
"out",
"=",
"[",
"]",
"x",
"=",
"None",
"# loop over matches in reverse order and replace",
"for",
"m",
"in",
"re_latex_control",
".",
"finditer",
"(",
"text",
")",
":",
"a",
",",
"b",
"=",
"m",
".",
"span",
"(",
")",
"char",
"=",
"m",
".",
"group",
"(",
")",
"[",
"0",
"]",
"out",
".",
"append",
"(",
"text",
"[",
"x",
":",
"a",
"]",
")",
"out",
".",
"append",
"(",
"r'\\%s'",
"%",
"char",
")",
"x",
"=",
"b",
"if",
"not",
"x",
":",
"# no match",
"return",
"text",
"# append prefix and return joined components",
"out",
".",
"append",
"(",
"text",
"[",
"b",
":",
"]",
")",
"return",
"''",
".",
"join",
"(",
"out",
")"
] | r"""Convert text into a latex-passable representation.
This method just escapes the following reserved LaTeX characters:
% \ _ ~ &, whilst trying to avoid doubly-escaping already escaped
characters
Parameters
----------
text : `str`
input text to convert
Returns
-------
tex : `str`
a modified version of the input text with all unescaped reserved
latex characters escaped
Examples
--------
>>> from gwpy.plot.tex import label_to_latex
>>> label_to_latex('normal text')
'normal text'
>>> label_to_latex('$1 + 2 = 3$')
'$1 + 2 = 3$'
>>> label_to_latex('H1:ABC-DEF_GHI')
'H1:ABC-DEF\\_GHI'
>>> label_to_latex('H1:ABC-DEF\_GHI')
'H1:ABC-DEF\\_GHI' | [
"r",
"Convert",
"text",
"into",
"a",
"latex",
"-",
"passable",
"representation",
"."
] | train | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/tex.py#L112-L158 | 0.000781 |
AguaClara/aguaclara | aguaclara/research/environmental_processes_analysis.py | alpha0_carbonate | def alpha0_carbonate(pH):
"""Calculate the fraction of total carbonates in carbonic acid form (H2CO3)
:param pH: pH of the system
:type pH: float
:return: Fraction of carbonates in carbonic acid form (H2CO3)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import alpha0_carbonate
>>> round(alpha0_carbonate(10), 7)
<Quantity(0.00015, 'dimensionless')>
"""
alpha0_carbonate = 1/(1+(K1_carbonate/invpH(pH)) *
(1+(K2_carbonate/invpH(pH))))
return alpha0_carbonate | python | def alpha0_carbonate(pH):
"""Calculate the fraction of total carbonates in carbonic acid form (H2CO3)
:param pH: pH of the system
:type pH: float
:return: Fraction of carbonates in carbonic acid form (H2CO3)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import alpha0_carbonate
>>> round(alpha0_carbonate(10), 7)
<Quantity(0.00015, 'dimensionless')>
"""
alpha0_carbonate = 1/(1+(K1_carbonate/invpH(pH)) *
(1+(K2_carbonate/invpH(pH))))
return alpha0_carbonate | [
"def",
"alpha0_carbonate",
"(",
"pH",
")",
":",
"alpha0_carbonate",
"=",
"1",
"/",
"(",
"1",
"+",
"(",
"K1_carbonate",
"/",
"invpH",
"(",
"pH",
")",
")",
"*",
"(",
"1",
"+",
"(",
"K2_carbonate",
"/",
"invpH",
"(",
"pH",
")",
")",
")",
")",
"return",
"alpha0_carbonate"
] | Calculate the fraction of total carbonates in carbonic acid form (H2CO3)
:param pH: pH of the system
:type pH: float
:return: Fraction of carbonates in carbonic acid form (H2CO3)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import alpha0_carbonate
>>> round(alpha0_carbonate(10), 7)
<Quantity(0.00015, 'dimensionless')> | [
"Calculate",
"the",
"fraction",
"of",
"total",
"carbonates",
"in",
"carbonic",
"acid",
"form",
"(",
"H2CO3",
")"
] | train | https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/research/environmental_processes_analysis.py#L38-L55 | 0.00346 |
thieman/dagobah | dagobah/core/core.py | Dagobah.set_backend | def set_backend(self, backend):
""" Manually set backend after construction. """
self.backend = backend
self.dagobah_id = self.backend.get_new_dagobah_id()
for job in self.jobs:
job.backend = backend
for task in job.tasks.values():
task.backend = backend
self.commit(cascade=True) | python | def set_backend(self, backend):
""" Manually set backend after construction. """
self.backend = backend
self.dagobah_id = self.backend.get_new_dagobah_id()
for job in self.jobs:
job.backend = backend
for task in job.tasks.values():
task.backend = backend
self.commit(cascade=True) | [
"def",
"set_backend",
"(",
"self",
",",
"backend",
")",
":",
"self",
".",
"backend",
"=",
"backend",
"self",
".",
"dagobah_id",
"=",
"self",
".",
"backend",
".",
"get_new_dagobah_id",
"(",
")",
"for",
"job",
"in",
"self",
".",
"jobs",
":",
"job",
".",
"backend",
"=",
"backend",
"for",
"task",
"in",
"job",
".",
"tasks",
".",
"values",
"(",
")",
":",
"task",
".",
"backend",
"=",
"backend",
"self",
".",
"commit",
"(",
"cascade",
"=",
"True",
")"
] | Manually set backend after construction. | [
"Manually",
"set",
"backend",
"after",
"construction",
"."
] | train | https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/core/core.py#L56-L67 | 0.00551 |
mrstephenneal/mysql-toolkit | mysql/toolkit/components/operations/export.py | Export.dump_table | def dump_table(self, table, drop_statement=True):
"""Export a table structure and data to SQL file for backup or later import."""
create_statement = self.get_table_definition(table)
data = self.select_all(table)
statements = ['\n', sql_file_comment(''),
sql_file_comment('Table structure and data dump for {0}'.format(table)), sql_file_comment('')]
if drop_statement:
statements.append('\nDROP TABLE IF EXISTS {0};'.format(wrap(table)))
statements.append('{0};\n'.format(create_statement))
if len(data) > 0:
statements.append('{0};'.format(insert_statement(table, self.get_columns(table), data)))
return '\n'.join(statements) | python | def dump_table(self, table, drop_statement=True):
"""Export a table structure and data to SQL file for backup or later import."""
create_statement = self.get_table_definition(table)
data = self.select_all(table)
statements = ['\n', sql_file_comment(''),
sql_file_comment('Table structure and data dump for {0}'.format(table)), sql_file_comment('')]
if drop_statement:
statements.append('\nDROP TABLE IF EXISTS {0};'.format(wrap(table)))
statements.append('{0};\n'.format(create_statement))
if len(data) > 0:
statements.append('{0};'.format(insert_statement(table, self.get_columns(table), data)))
return '\n'.join(statements) | [
"def",
"dump_table",
"(",
"self",
",",
"table",
",",
"drop_statement",
"=",
"True",
")",
":",
"create_statement",
"=",
"self",
".",
"get_table_definition",
"(",
"table",
")",
"data",
"=",
"self",
".",
"select_all",
"(",
"table",
")",
"statements",
"=",
"[",
"'\\n'",
",",
"sql_file_comment",
"(",
"''",
")",
",",
"sql_file_comment",
"(",
"'Table structure and data dump for {0}'",
".",
"format",
"(",
"table",
")",
")",
",",
"sql_file_comment",
"(",
"''",
")",
"]",
"if",
"drop_statement",
":",
"statements",
".",
"append",
"(",
"'\\nDROP TABLE IF EXISTS {0};'",
".",
"format",
"(",
"wrap",
"(",
"table",
")",
")",
")",
"statements",
".",
"append",
"(",
"'{0};\\n'",
".",
"format",
"(",
"create_statement",
")",
")",
"if",
"len",
"(",
"data",
")",
">",
"0",
":",
"statements",
".",
"append",
"(",
"'{0};'",
".",
"format",
"(",
"insert_statement",
"(",
"table",
",",
"self",
".",
"get_columns",
"(",
"table",
")",
",",
"data",
")",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"statements",
")"
] | Export a table structure and data to SQL file for backup or later import. | [
"Export",
"a",
"table",
"structure",
"and",
"data",
"to",
"SQL",
"file",
"for",
"backup",
"or",
"later",
"import",
"."
] | train | https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/operations/export.py#L42-L53 | 0.008163 |
gbowerman/azurerm | examples/get_vmss.py | main | def main():
'''Main routine.'''
# process arguments
if len(sys.argv) < 3:
usage()
rgname = sys.argv[1]
vmss_name = sys.argv[2]
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
print('Printing VMSS details\n')
vmssget = azurerm.get_vmss(
access_token, subscription_id, rgname, vmss_name)
name = vmssget['name']
capacity = vmssget['sku']['capacity']
location = vmssget['location']
offer = \
vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['offer']
sku = vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku']
print(json.dumps(vmssget, sort_keys=False, indent=2, separators=(',', ': ')))
print('Name: ' + name + ', capacity: ' + str(capacity) + ', ' + location + ', ' + offer + ', '
+ sku)
print('Printing VMSS instance view')
instance_view = azurerm.get_vmss_instance_view(
access_token, subscription_id, rgname, vmss_name)
print(json.dumps(instance_view, sort_keys=False,
indent=2, separators=(',', ': ')))
'''
print('Listing VMSS VMs')
vmss_vms = azurerm.list_vmss_vms(access_token, subscription_id, rg, vmss)
print(json.dumps(vmss_vms, sort_keys=False, indent=2, separators=(',', ': ')))
for vm in vmss_vms['value']:
instanceId = vm['instanceId']
vminstance_view = azurerm.get_vmss_vm_instance_view(access_token, subscription_id, rg, vmss,
instanceId)
print('VM ' + str(instanceId) + ' instance view')
print(json.dumps(vminstance_view, sort_keys=False, indent=2, separators=(',', ': ')))
''' | python | def main():
'''Main routine.'''
# process arguments
if len(sys.argv) < 3:
usage()
rgname = sys.argv[1]
vmss_name = sys.argv[2]
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
print('Printing VMSS details\n')
vmssget = azurerm.get_vmss(
access_token, subscription_id, rgname, vmss_name)
name = vmssget['name']
capacity = vmssget['sku']['capacity']
location = vmssget['location']
offer = \
vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['offer']
sku = vmssget['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku']
print(json.dumps(vmssget, sort_keys=False, indent=2, separators=(',', ': ')))
print('Name: ' + name + ', capacity: ' + str(capacity) + ', ' + location + ', ' + offer + ', '
+ sku)
print('Printing VMSS instance view')
instance_view = azurerm.get_vmss_instance_view(
access_token, subscription_id, rgname, vmss_name)
print(json.dumps(instance_view, sort_keys=False,
indent=2, separators=(',', ': ')))
'''
print('Listing VMSS VMs')
vmss_vms = azurerm.list_vmss_vms(access_token, subscription_id, rg, vmss)
print(json.dumps(vmss_vms, sort_keys=False, indent=2, separators=(',', ': ')))
for vm in vmss_vms['value']:
instanceId = vm['instanceId']
vminstance_view = azurerm.get_vmss_vm_instance_view(access_token, subscription_id, rg, vmss,
instanceId)
print('VM ' + str(instanceId) + ' instance view')
print(json.dumps(vminstance_view, sort_keys=False, indent=2, separators=(',', ': ')))
''' | [
"def",
"main",
"(",
")",
":",
"# process arguments",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"<",
"3",
":",
"usage",
"(",
")",
"rgname",
"=",
"sys",
".",
"argv",
"[",
"1",
"]",
"vmss_name",
"=",
"sys",
".",
"argv",
"[",
"2",
"]",
"# Load Azure app defaults",
"try",
":",
"with",
"open",
"(",
"'azurermconfig.json'",
")",
"as",
"config_file",
":",
"config_data",
"=",
"json",
".",
"load",
"(",
"config_file",
")",
"except",
"FileNotFoundError",
":",
"sys",
".",
"exit",
"(",
"'Error: Expecting azurermconfig.json in current folder'",
")",
"tenant_id",
"=",
"config_data",
"[",
"'tenantId'",
"]",
"app_id",
"=",
"config_data",
"[",
"'appId'",
"]",
"app_secret",
"=",
"config_data",
"[",
"'appSecret'",
"]",
"subscription_id",
"=",
"config_data",
"[",
"'subscriptionId'",
"]",
"access_token",
"=",
"azurerm",
".",
"get_access_token",
"(",
"tenant_id",
",",
"app_id",
",",
"app_secret",
")",
"print",
"(",
"'Printing VMSS details\\n'",
")",
"vmssget",
"=",
"azurerm",
".",
"get_vmss",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"vmss_name",
")",
"name",
"=",
"vmssget",
"[",
"'name'",
"]",
"capacity",
"=",
"vmssget",
"[",
"'sku'",
"]",
"[",
"'capacity'",
"]",
"location",
"=",
"vmssget",
"[",
"'location'",
"]",
"offer",
"=",
"vmssget",
"[",
"'properties'",
"]",
"[",
"'virtualMachineProfile'",
"]",
"[",
"'storageProfile'",
"]",
"[",
"'imageReference'",
"]",
"[",
"'offer'",
"]",
"sku",
"=",
"vmssget",
"[",
"'properties'",
"]",
"[",
"'virtualMachineProfile'",
"]",
"[",
"'storageProfile'",
"]",
"[",
"'imageReference'",
"]",
"[",
"'sku'",
"]",
"print",
"(",
"json",
".",
"dumps",
"(",
"vmssget",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"print",
"(",
"'Name: '",
"+",
"name",
"+",
"', capacity: '",
"+",
"str",
"(",
"capacity",
")",
"+",
"', '",
"+",
"location",
"+",
"', '",
"+",
"offer",
"+",
"', '",
"+",
"sku",
")",
"print",
"(",
"'Printing VMSS instance view'",
")",
"instance_view",
"=",
"azurerm",
".",
"get_vmss_instance_view",
"(",
"access_token",
",",
"subscription_id",
",",
"rgname",
",",
"vmss_name",
")",
"print",
"(",
"json",
".",
"dumps",
"(",
"instance_view",
",",
"sort_keys",
"=",
"False",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"'''\n print('Listing VMSS VMs')\n vmss_vms = azurerm.list_vmss_vms(access_token, subscription_id, rg, vmss)\n print(json.dumps(vmss_vms, sort_keys=False, indent=2, separators=(',', ': ')))\n for vm in vmss_vms['value']:\n instanceId = vm['instanceId']\n vminstance_view = azurerm.get_vmss_vm_instance_view(access_token, subscription_id, rg, vmss,\n instanceId)\n print('VM ' + str(instanceId) + ' instance view')\n print(json.dumps(vminstance_view, sort_keys=False, indent=2, separators=(',', ': ')))\n '''"
] | Main routine. | [
"Main",
"routine",
"."
] | train | https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/get_vmss.py#L13-L63 | 0.004174 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/model.py | ShardState.reset_for_retry | def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.input_finished = False
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False | python | def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.input_finished = False
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False | [
"def",
"reset_for_retry",
"(",
"self",
")",
":",
"self",
".",
"retries",
"+=",
"1",
"self",
".",
"last_work_item",
"=",
"\"\"",
"self",
".",
"active",
"=",
"True",
"self",
".",
"result_status",
"=",
"None",
"self",
".",
"input_finished",
"=",
"False",
"self",
".",
"counters_map",
"=",
"CountersMap",
"(",
")",
"self",
".",
"slice_id",
"=",
"0",
"self",
".",
"slice_start_time",
"=",
"None",
"self",
".",
"slice_request_id",
"=",
"None",
"self",
".",
"slice_retries",
"=",
"0",
"self",
".",
"acquired_once",
"=",
"False"
] | Reset self for shard retry. | [
"Reset",
"self",
"for",
"shard",
"retry",
"."
] | train | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L973-L985 | 0.002604 |
Alignak-monitoring/alignak | alignak/external_command.py | ExternalCommandManager.change_custom_contact_var | def change_custom_contact_var(self, contact, varname, varvalue):
"""Change custom contact variable
Format of the line that triggers function call::
CHANGE_CUSTOM_CONTACT_VAR;<contact_name>;<varname>;<varvalue>
:param contact: contact to edit
:type contact: alignak.objects.contact.Contact
:param varname: variable name to change
:type varname: str
:param varvalue: variable new value
:type varvalue: str
:return: None
"""
if varname.upper() in contact.customs:
contact.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value
contact.customs[varname.upper()] = varvalue
self.send_an_element(contact.get_update_status_brok()) | python | def change_custom_contact_var(self, contact, varname, varvalue):
"""Change custom contact variable
Format of the line that triggers function call::
CHANGE_CUSTOM_CONTACT_VAR;<contact_name>;<varname>;<varvalue>
:param contact: contact to edit
:type contact: alignak.objects.contact.Contact
:param varname: variable name to change
:type varname: str
:param varvalue: variable new value
:type varvalue: str
:return: None
"""
if varname.upper() in contact.customs:
contact.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value
contact.customs[varname.upper()] = varvalue
self.send_an_element(contact.get_update_status_brok()) | [
"def",
"change_custom_contact_var",
"(",
"self",
",",
"contact",
",",
"varname",
",",
"varvalue",
")",
":",
"if",
"varname",
".",
"upper",
"(",
")",
"in",
"contact",
".",
"customs",
":",
"contact",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_CUSTOM_VARIABLE\"",
"]",
".",
"value",
"contact",
".",
"customs",
"[",
"varname",
".",
"upper",
"(",
")",
"]",
"=",
"varvalue",
"self",
".",
"send_an_element",
"(",
"contact",
".",
"get_update_status_brok",
"(",
")",
")"
] | Change custom contact variable
Format of the line that triggers function call::
CHANGE_CUSTOM_CONTACT_VAR;<contact_name>;<varname>;<varvalue>
:param contact: contact to edit
:type contact: alignak.objects.contact.Contact
:param varname: variable name to change
:type varname: str
:param varvalue: variable new value
:type varvalue: str
:return: None | [
"Change",
"custom",
"contact",
"variable",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1207-L1224 | 0.003896 |
quantumlib/Cirq | cirq/protocols/apply_unitary.py | apply_unitary | def apply_unitary(unitary_value: Any,
args: ApplyUnitaryArgs,
default: TDefault = RaiseTypeErrorIfNotProvided
) -> Union[np.ndarray, TDefault]:
"""High performance left-multiplication of a unitary effect onto a tensor.
If `unitary_value` defines an `_apply_unitary_` method, that method will be
used to apply `unitary_value`'s unitary effect to the target tensor.
Otherwise, if `unitary_value` defines a `_unitary_` method, its unitary
matrix will be retrieved and applied using a generic method. Otherwise the
application fails, and either an exception is raised or the specified
default value is returned.
Args:
unitary_value: The value with a unitary effect to apply to the target.
args: A mutable `cirq.ApplyUnitaryArgs` object describing the target
tensor, available workspace, and axes to operate on. The attributes
of this object will be mutated as part of computing the result.
default: What should be returned if `unitary_value` doesn't have a
unitary effect. If not specified, a TypeError is raised instead of
returning a default value.
Returns:
If the receiving object is not able to apply its unitary effect,
the specified default value is returned (or a TypeError is raised). If
this occurs, then `target_tensor` should not have been mutated.
If the receiving object was able to work inline, directly
mutating target_tensor it will return target_tensor. The caller is
responsible for checking if the result is target_tensor.
If the receiving object wrote its output over available_buffer, the
result will be available_buffer. The caller is responsible for
checking if the result is available_buffer (and e.g. swapping
the buffer for the target tensor before the next call).
The receiving object may also write its output over a new buffer
that it created, in which case that new array is returned.
Raises:
TypeError: `unitary_value` doesn't have a unitary effect and `default`
wasn't specified.
"""
# Check if the specialized method is present.
func = getattr(unitary_value, '_apply_unitary_', None)
if func is not None:
result = func(args)
if result is not NotImplemented and result is not None:
return result
# Fallback to using the object's _unitary_ matrix.
matrix = unitary(unitary_value, None)
if matrix is not None:
# Special case for single-qubit operations.
if matrix.shape == (2, 2):
zero = args.subspace_index(0)
one = args.subspace_index(1)
return linalg.apply_matrix_to_slices(args.target_tensor,
matrix,
[zero, one],
out=args.available_buffer)
# Fallback to np.einsum for the general case.
return linalg.targeted_left_multiply(
matrix.astype(args.target_tensor.dtype).reshape(
(2,) * (2 * len(args.axes))),
args.target_tensor,
args.axes,
out=args.available_buffer)
# Don't know how to apply. Fallback to specified default behavior.
if default is not RaiseTypeErrorIfNotProvided:
return default
raise TypeError(
"object of type '{}' has no _apply_unitary_ or _unitary_ methods "
"(or they returned None or NotImplemented).".format(
type(unitary_value))) | python | def apply_unitary(unitary_value: Any,
args: ApplyUnitaryArgs,
default: TDefault = RaiseTypeErrorIfNotProvided
) -> Union[np.ndarray, TDefault]:
"""High performance left-multiplication of a unitary effect onto a tensor.
If `unitary_value` defines an `_apply_unitary_` method, that method will be
used to apply `unitary_value`'s unitary effect to the target tensor.
Otherwise, if `unitary_value` defines a `_unitary_` method, its unitary
matrix will be retrieved and applied using a generic method. Otherwise the
application fails, and either an exception is raised or the specified
default value is returned.
Args:
unitary_value: The value with a unitary effect to apply to the target.
args: A mutable `cirq.ApplyUnitaryArgs` object describing the target
tensor, available workspace, and axes to operate on. The attributes
of this object will be mutated as part of computing the result.
default: What should be returned if `unitary_value` doesn't have a
unitary effect. If not specified, a TypeError is raised instead of
returning a default value.
Returns:
If the receiving object is not able to apply its unitary effect,
the specified default value is returned (or a TypeError is raised). If
this occurs, then `target_tensor` should not have been mutated.
If the receiving object was able to work inline, directly
mutating target_tensor it will return target_tensor. The caller is
responsible for checking if the result is target_tensor.
If the receiving object wrote its output over available_buffer, the
result will be available_buffer. The caller is responsible for
checking if the result is available_buffer (and e.g. swapping
the buffer for the target tensor before the next call).
The receiving object may also write its output over a new buffer
that it created, in which case that new array is returned.
Raises:
TypeError: `unitary_value` doesn't have a unitary effect and `default`
wasn't specified.
"""
# Check if the specialized method is present.
func = getattr(unitary_value, '_apply_unitary_', None)
if func is not None:
result = func(args)
if result is not NotImplemented and result is not None:
return result
# Fallback to using the object's _unitary_ matrix.
matrix = unitary(unitary_value, None)
if matrix is not None:
# Special case for single-qubit operations.
if matrix.shape == (2, 2):
zero = args.subspace_index(0)
one = args.subspace_index(1)
return linalg.apply_matrix_to_slices(args.target_tensor,
matrix,
[zero, one],
out=args.available_buffer)
# Fallback to np.einsum for the general case.
return linalg.targeted_left_multiply(
matrix.astype(args.target_tensor.dtype).reshape(
(2,) * (2 * len(args.axes))),
args.target_tensor,
args.axes,
out=args.available_buffer)
# Don't know how to apply. Fallback to specified default behavior.
if default is not RaiseTypeErrorIfNotProvided:
return default
raise TypeError(
"object of type '{}' has no _apply_unitary_ or _unitary_ methods "
"(or they returned None or NotImplemented).".format(
type(unitary_value))) | [
"def",
"apply_unitary",
"(",
"unitary_value",
":",
"Any",
",",
"args",
":",
"ApplyUnitaryArgs",
",",
"default",
":",
"TDefault",
"=",
"RaiseTypeErrorIfNotProvided",
")",
"->",
"Union",
"[",
"np",
".",
"ndarray",
",",
"TDefault",
"]",
":",
"# Check if the specialized method is present.",
"func",
"=",
"getattr",
"(",
"unitary_value",
",",
"'_apply_unitary_'",
",",
"None",
")",
"if",
"func",
"is",
"not",
"None",
":",
"result",
"=",
"func",
"(",
"args",
")",
"if",
"result",
"is",
"not",
"NotImplemented",
"and",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"# Fallback to using the object's _unitary_ matrix.",
"matrix",
"=",
"unitary",
"(",
"unitary_value",
",",
"None",
")",
"if",
"matrix",
"is",
"not",
"None",
":",
"# Special case for single-qubit operations.",
"if",
"matrix",
".",
"shape",
"==",
"(",
"2",
",",
"2",
")",
":",
"zero",
"=",
"args",
".",
"subspace_index",
"(",
"0",
")",
"one",
"=",
"args",
".",
"subspace_index",
"(",
"1",
")",
"return",
"linalg",
".",
"apply_matrix_to_slices",
"(",
"args",
".",
"target_tensor",
",",
"matrix",
",",
"[",
"zero",
",",
"one",
"]",
",",
"out",
"=",
"args",
".",
"available_buffer",
")",
"# Fallback to np.einsum for the general case.",
"return",
"linalg",
".",
"targeted_left_multiply",
"(",
"matrix",
".",
"astype",
"(",
"args",
".",
"target_tensor",
".",
"dtype",
")",
".",
"reshape",
"(",
"(",
"2",
",",
")",
"*",
"(",
"2",
"*",
"len",
"(",
"args",
".",
"axes",
")",
")",
")",
",",
"args",
".",
"target_tensor",
",",
"args",
".",
"axes",
",",
"out",
"=",
"args",
".",
"available_buffer",
")",
"# Don't know how to apply. Fallback to specified default behavior.",
"if",
"default",
"is",
"not",
"RaiseTypeErrorIfNotProvided",
":",
"return",
"default",
"raise",
"TypeError",
"(",
"\"object of type '{}' has no _apply_unitary_ or _unitary_ methods \"",
"\"(or they returned None or NotImplemented).\"",
".",
"format",
"(",
"type",
"(",
"unitary_value",
")",
")",
")"
] | High performance left-multiplication of a unitary effect onto a tensor.
If `unitary_value` defines an `_apply_unitary_` method, that method will be
used to apply `unitary_value`'s unitary effect to the target tensor.
Otherwise, if `unitary_value` defines a `_unitary_` method, its unitary
matrix will be retrieved and applied using a generic method. Otherwise the
application fails, and either an exception is raised or the specified
default value is returned.
Args:
unitary_value: The value with a unitary effect to apply to the target.
args: A mutable `cirq.ApplyUnitaryArgs` object describing the target
tensor, available workspace, and axes to operate on. The attributes
of this object will be mutated as part of computing the result.
default: What should be returned if `unitary_value` doesn't have a
unitary effect. If not specified, a TypeError is raised instead of
returning a default value.
Returns:
If the receiving object is not able to apply its unitary effect,
the specified default value is returned (or a TypeError is raised). If
this occurs, then `target_tensor` should not have been mutated.
If the receiving object was able to work inline, directly
mutating target_tensor it will return target_tensor. The caller is
responsible for checking if the result is target_tensor.
If the receiving object wrote its output over available_buffer, the
result will be available_buffer. The caller is responsible for
checking if the result is available_buffer (and e.g. swapping
the buffer for the target tensor before the next call).
The receiving object may also write its output over a new buffer
that it created, in which case that new array is returned.
Raises:
TypeError: `unitary_value` doesn't have a unitary effect and `default`
wasn't specified. | [
"High",
"performance",
"left",
"-",
"multiplication",
"of",
"a",
"unitary",
"effect",
"onto",
"a",
"tensor",
"."
] | train | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/protocols/apply_unitary.py#L161-L238 | 0.000274 |
cthorey/pdsimage | pdsimage/PDS_Extractor.py | WacMap._format_name_map | def _format_name_map(self, lonc, latc):
''' Return the name of the map in the good format '''
return '_'.join(['WAC', 'GLOBAL'] +
['E' + latc + lonc, "{0:0>3}".format(self.ppd) + 'P']) | python | def _format_name_map(self, lonc, latc):
''' Return the name of the map in the good format '''
return '_'.join(['WAC', 'GLOBAL'] +
['E' + latc + lonc, "{0:0>3}".format(self.ppd) + 'P']) | [
"def",
"_format_name_map",
"(",
"self",
",",
"lonc",
",",
"latc",
")",
":",
"return",
"'_'",
".",
"join",
"(",
"[",
"'WAC'",
",",
"'GLOBAL'",
"]",
"+",
"[",
"'E'",
"+",
"latc",
"+",
"lonc",
",",
"\"{0:0>3}\"",
".",
"format",
"(",
"self",
".",
"ppd",
")",
"+",
"'P'",
"]",
")"
] | Return the name of the map in the good format | [
"Return",
"the",
"name",
"of",
"the",
"map",
"in",
"the",
"good",
"format"
] | train | https://github.com/cthorey/pdsimage/blob/f71de6dfddd3d538d76da229b4b9605c40f3fbac/pdsimage/PDS_Extractor.py#L730-L734 | 0.008889 |
Zsailer/kubeconf | kubeconf/kubeconf.py | KubeConf.print_users | def print_users(self, names=False):
"""Print users"""
users = self.get_users()
if names:
users = [user['name'] for user in users]
pprint.pprint(users) | python | def print_users(self, names=False):
"""Print users"""
users = self.get_users()
if names:
users = [user['name'] for user in users]
pprint.pprint(users) | [
"def",
"print_users",
"(",
"self",
",",
"names",
"=",
"False",
")",
":",
"users",
"=",
"self",
".",
"get_users",
"(",
")",
"if",
"names",
":",
"users",
"=",
"[",
"user",
"[",
"'name'",
"]",
"for",
"user",
"in",
"users",
"]",
"pprint",
".",
"pprint",
"(",
"users",
")"
] | Print users | [
"Print",
"users"
] | train | https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L228-L233 | 0.010309 |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py | PX4_update | def PX4_update(IMU, ATT):
'''implement full DCM using PX4 native SD log data'''
global px4_state
if px4_state is None:
px4_state = PX4_State(degrees(ATT.Roll), degrees(ATT.Pitch), degrees(ATT.Yaw), IMU._timestamp)
gyro = Vector3(IMU.GyroX, IMU.GyroY, IMU.GyroZ)
accel = Vector3(IMU.AccX, IMU.AccY, IMU.AccZ)
px4_state.update(gyro, accel, IMU._timestamp)
return px4_state | python | def PX4_update(IMU, ATT):
'''implement full DCM using PX4 native SD log data'''
global px4_state
if px4_state is None:
px4_state = PX4_State(degrees(ATT.Roll), degrees(ATT.Pitch), degrees(ATT.Yaw), IMU._timestamp)
gyro = Vector3(IMU.GyroX, IMU.GyroY, IMU.GyroZ)
accel = Vector3(IMU.AccX, IMU.AccY, IMU.AccZ)
px4_state.update(gyro, accel, IMU._timestamp)
return px4_state | [
"def",
"PX4_update",
"(",
"IMU",
",",
"ATT",
")",
":",
"global",
"px4_state",
"if",
"px4_state",
"is",
"None",
":",
"px4_state",
"=",
"PX4_State",
"(",
"degrees",
"(",
"ATT",
".",
"Roll",
")",
",",
"degrees",
"(",
"ATT",
".",
"Pitch",
")",
",",
"degrees",
"(",
"ATT",
".",
"Yaw",
")",
",",
"IMU",
".",
"_timestamp",
")",
"gyro",
"=",
"Vector3",
"(",
"IMU",
".",
"GyroX",
",",
"IMU",
".",
"GyroY",
",",
"IMU",
".",
"GyroZ",
")",
"accel",
"=",
"Vector3",
"(",
"IMU",
".",
"AccX",
",",
"IMU",
".",
"AccY",
",",
"IMU",
".",
"AccZ",
")",
"px4_state",
".",
"update",
"(",
"gyro",
",",
"accel",
",",
"IMU",
".",
"_timestamp",
")",
"return",
"px4_state"
] | implement full DCM using PX4 native SD log data | [
"implement",
"full",
"DCM",
"using",
"PX4",
"native",
"SD",
"log",
"data"
] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L853-L862 | 0.007353 |
hollenstein/maspy | maspy/auxiliary.py | returnSplineList | def returnSplineList(dependentVar, independentVar, subsetPercentage=0.4,
cycles=10, minKnotPoints=10, initialKnots=200,
splineOrder=2, terminalExpansion=0.1
):
""" #TODO: docstring
Note: Expects sorted arrays.
:param dependentVar: #TODO: docstring
:param independentVar: #TODO: docstring
:param subsetPercentage: #TODO: docstring
:param cycles: #TODO: docstring
:param minKnotPoints: #TODO: docstring
:param initialKnots: #TODO: docstring
:param splineOrder: #TODO: docstring
:param terminalExpansion: expand subsets on both sides
:returns: #TODO: docstring
"""
expansions = ddict(list)
expansionArea = (independentVar[-1] - independentVar[0]) * terminalExpansion
#adds 100 data points at both ends of the dependent and independent array
for i in range(100):
expansions['indUp'].append(independentVar[-1] + expansionArea/100*i)
expansions['indDown'].append(independentVar[0] -
expansionArea/100*(100-i+1)
)
expansions['depUp'].append(dependentVar[-1])
expansions['depDown'].append(dependentVar[0])
dependentVar = numpy.array(expansions['depDown'] + list(dependentVar) +
expansions['depUp'], dtype=numpy.float64
)
independentVar = numpy.array(expansions['indDown'] + list(independentVar) +
expansions['indUp'], dtype=numpy.float64
)
splineList = list()
for cycle in range(cycles):
subset = sorted(random.sample(range(len(dependentVar)),
int(len(dependentVar) * subsetPercentage)
)
)
terminalExpansion
dependentSubset = dependentVar[subset]
independentSubset = independentVar[subset]
minIndVar = independentSubset[minKnotPoints]
maxIndVar = independentSubset[-minKnotPoints]
knots = [float(i) * (maxIndVar-minIndVar) / initialKnots + minIndVar
for i in range(1, initialKnots)
]
## remove knots with less then minKnotPoints data points ##
lastKnot = knots[0]
newKnotList = [lastKnot]
for knotPos in range(1,len(knots)):
nextKnot = knots[knotPos]
numHits = (len(independentSubset[(independentSubset >= lastKnot) &
(independentSubset <= nextKnot)])
)
if numHits >= minKnotPoints:
newKnotList.append(nextKnot)
lastKnot = nextKnot
knots = newKnotList
spline = LSQUnivariateSpline(independentSubset, dependentSubset, knots,
k=splineOrder)
splineList.append(spline)
return splineList | python | def returnSplineList(dependentVar, independentVar, subsetPercentage=0.4,
cycles=10, minKnotPoints=10, initialKnots=200,
splineOrder=2, terminalExpansion=0.1
):
""" #TODO: docstring
Note: Expects sorted arrays.
:param dependentVar: #TODO: docstring
:param independentVar: #TODO: docstring
:param subsetPercentage: #TODO: docstring
:param cycles: #TODO: docstring
:param minKnotPoints: #TODO: docstring
:param initialKnots: #TODO: docstring
:param splineOrder: #TODO: docstring
:param terminalExpansion: expand subsets on both sides
:returns: #TODO: docstring
"""
expansions = ddict(list)
expansionArea = (independentVar[-1] - independentVar[0]) * terminalExpansion
#adds 100 data points at both ends of the dependent and independent array
for i in range(100):
expansions['indUp'].append(independentVar[-1] + expansionArea/100*i)
expansions['indDown'].append(independentVar[0] -
expansionArea/100*(100-i+1)
)
expansions['depUp'].append(dependentVar[-1])
expansions['depDown'].append(dependentVar[0])
dependentVar = numpy.array(expansions['depDown'] + list(dependentVar) +
expansions['depUp'], dtype=numpy.float64
)
independentVar = numpy.array(expansions['indDown'] + list(independentVar) +
expansions['indUp'], dtype=numpy.float64
)
splineList = list()
for cycle in range(cycles):
subset = sorted(random.sample(range(len(dependentVar)),
int(len(dependentVar) * subsetPercentage)
)
)
terminalExpansion
dependentSubset = dependentVar[subset]
independentSubset = independentVar[subset]
minIndVar = independentSubset[minKnotPoints]
maxIndVar = independentSubset[-minKnotPoints]
knots = [float(i) * (maxIndVar-minIndVar) / initialKnots + minIndVar
for i in range(1, initialKnots)
]
## remove knots with less then minKnotPoints data points ##
lastKnot = knots[0]
newKnotList = [lastKnot]
for knotPos in range(1,len(knots)):
nextKnot = knots[knotPos]
numHits = (len(independentSubset[(independentSubset >= lastKnot) &
(independentSubset <= nextKnot)])
)
if numHits >= minKnotPoints:
newKnotList.append(nextKnot)
lastKnot = nextKnot
knots = newKnotList
spline = LSQUnivariateSpline(independentSubset, dependentSubset, knots,
k=splineOrder)
splineList.append(spline)
return splineList | [
"def",
"returnSplineList",
"(",
"dependentVar",
",",
"independentVar",
",",
"subsetPercentage",
"=",
"0.4",
",",
"cycles",
"=",
"10",
",",
"minKnotPoints",
"=",
"10",
",",
"initialKnots",
"=",
"200",
",",
"splineOrder",
"=",
"2",
",",
"terminalExpansion",
"=",
"0.1",
")",
":",
"expansions",
"=",
"ddict",
"(",
"list",
")",
"expansionArea",
"=",
"(",
"independentVar",
"[",
"-",
"1",
"]",
"-",
"independentVar",
"[",
"0",
"]",
")",
"*",
"terminalExpansion",
"#adds 100 data points at both ends of the dependent and independent array",
"for",
"i",
"in",
"range",
"(",
"100",
")",
":",
"expansions",
"[",
"'indUp'",
"]",
".",
"append",
"(",
"independentVar",
"[",
"-",
"1",
"]",
"+",
"expansionArea",
"/",
"100",
"*",
"i",
")",
"expansions",
"[",
"'indDown'",
"]",
".",
"append",
"(",
"independentVar",
"[",
"0",
"]",
"-",
"expansionArea",
"/",
"100",
"*",
"(",
"100",
"-",
"i",
"+",
"1",
")",
")",
"expansions",
"[",
"'depUp'",
"]",
".",
"append",
"(",
"dependentVar",
"[",
"-",
"1",
"]",
")",
"expansions",
"[",
"'depDown'",
"]",
".",
"append",
"(",
"dependentVar",
"[",
"0",
"]",
")",
"dependentVar",
"=",
"numpy",
".",
"array",
"(",
"expansions",
"[",
"'depDown'",
"]",
"+",
"list",
"(",
"dependentVar",
")",
"+",
"expansions",
"[",
"'depUp'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"float64",
")",
"independentVar",
"=",
"numpy",
".",
"array",
"(",
"expansions",
"[",
"'indDown'",
"]",
"+",
"list",
"(",
"independentVar",
")",
"+",
"expansions",
"[",
"'indUp'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"float64",
")",
"splineList",
"=",
"list",
"(",
")",
"for",
"cycle",
"in",
"range",
"(",
"cycles",
")",
":",
"subset",
"=",
"sorted",
"(",
"random",
".",
"sample",
"(",
"range",
"(",
"len",
"(",
"dependentVar",
")",
")",
",",
"int",
"(",
"len",
"(",
"dependentVar",
")",
"*",
"subsetPercentage",
")",
")",
")",
"terminalExpansion",
"dependentSubset",
"=",
"dependentVar",
"[",
"subset",
"]",
"independentSubset",
"=",
"independentVar",
"[",
"subset",
"]",
"minIndVar",
"=",
"independentSubset",
"[",
"minKnotPoints",
"]",
"maxIndVar",
"=",
"independentSubset",
"[",
"-",
"minKnotPoints",
"]",
"knots",
"=",
"[",
"float",
"(",
"i",
")",
"*",
"(",
"maxIndVar",
"-",
"minIndVar",
")",
"/",
"initialKnots",
"+",
"minIndVar",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"initialKnots",
")",
"]",
"## remove knots with less then minKnotPoints data points ##",
"lastKnot",
"=",
"knots",
"[",
"0",
"]",
"newKnotList",
"=",
"[",
"lastKnot",
"]",
"for",
"knotPos",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"knots",
")",
")",
":",
"nextKnot",
"=",
"knots",
"[",
"knotPos",
"]",
"numHits",
"=",
"(",
"len",
"(",
"independentSubset",
"[",
"(",
"independentSubset",
">=",
"lastKnot",
")",
"&",
"(",
"independentSubset",
"<=",
"nextKnot",
")",
"]",
")",
")",
"if",
"numHits",
">=",
"minKnotPoints",
":",
"newKnotList",
".",
"append",
"(",
"nextKnot",
")",
"lastKnot",
"=",
"nextKnot",
"knots",
"=",
"newKnotList",
"spline",
"=",
"LSQUnivariateSpline",
"(",
"independentSubset",
",",
"dependentSubset",
",",
"knots",
",",
"k",
"=",
"splineOrder",
")",
"splineList",
".",
"append",
"(",
"spline",
")",
"return",
"splineList"
] | #TODO: docstring
Note: Expects sorted arrays.
:param dependentVar: #TODO: docstring
:param independentVar: #TODO: docstring
:param subsetPercentage: #TODO: docstring
:param cycles: #TODO: docstring
:param minKnotPoints: #TODO: docstring
:param initialKnots: #TODO: docstring
:param splineOrder: #TODO: docstring
:param terminalExpansion: expand subsets on both sides
:returns: #TODO: docstring | [
"#TODO",
":",
"docstring"
] | train | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/auxiliary.py#L733-L803 | 0.001688 |
pallets/werkzeug | src/werkzeug/_reloader.py | _iter_module_files | def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, "__file__", None)
if filename:
if os.path.isdir(filename) and os.path.exists(
os.path.join(filename, "__init__.py")
):
filename = os.path.join(filename, "__init__.py")
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
yield filename | python | def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, "__file__", None)
if filename:
if os.path.isdir(filename) and os.path.exists(
os.path.join(filename, "__init__.py")
):
filename = os.path.join(filename, "__init__.py")
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
yield filename | [
"def",
"_iter_module_files",
"(",
")",
":",
"# The list call is necessary on Python 3 in case the module",
"# dictionary modifies during iteration.",
"for",
"module",
"in",
"list",
"(",
"sys",
".",
"modules",
".",
"values",
"(",
")",
")",
":",
"if",
"module",
"is",
"None",
":",
"continue",
"filename",
"=",
"getattr",
"(",
"module",
",",
"\"__file__\"",
",",
"None",
")",
"if",
"filename",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filename",
",",
"\"__init__.py\"",
")",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filename",
",",
"\"__init__.py\"",
")",
"old",
"=",
"None",
"while",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"old",
"=",
"filename",
"filename",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"if",
"filename",
"==",
"old",
":",
"break",
"else",
":",
"if",
"filename",
"[",
"-",
"4",
":",
"]",
"in",
"(",
"\".pyc\"",
",",
"\".pyo\"",
")",
":",
"filename",
"=",
"filename",
"[",
":",
"-",
"1",
"]",
"yield",
"filename"
] | This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package. | [
"This",
"iterates",
"over",
"all",
"relevant",
"Python",
"files",
".",
"It",
"goes",
"through",
"all",
"loaded",
"files",
"from",
"modules",
"all",
"files",
"in",
"folders",
"of",
"already",
"loaded",
"modules",
"as",
"well",
"as",
"all",
"files",
"reachable",
"through",
"a",
"package",
"."
] | train | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/_reloader.py#L14-L40 | 0.000931 |
log2timeline/dfdatetime | dfdatetime/precisions.py | SecondsPrecisionHelper.CopyToDateTimeString | def CopyToDateTimeString(cls, time_elements_tuple, fraction_of_second):
"""Copies the time elements and fraction of second to a string.
Args:
time_elements_tuple (tuple[int, int, int, int, int, int]):
time elements, contains year, month, day of month, hours, minutes and
seconds.
fraction_of_second (decimal.Decimal): fraction of second, which must be a
value between 0.0 and 1.0.
Returns:
str: date and time value formatted as:
YYYY-MM-DD hh:mm:ss
Raises:
ValueError: if the fraction of second is out of bounds.
"""
if fraction_of_second < 0.0 or fraction_of_second >= 1.0:
raise ValueError('Fraction of second value: {0:f} out of bounds.'.format(
fraction_of_second))
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(
time_elements_tuple[0], time_elements_tuple[1], time_elements_tuple[2],
time_elements_tuple[3], time_elements_tuple[4], time_elements_tuple[5]) | python | def CopyToDateTimeString(cls, time_elements_tuple, fraction_of_second):
"""Copies the time elements and fraction of second to a string.
Args:
time_elements_tuple (tuple[int, int, int, int, int, int]):
time elements, contains year, month, day of month, hours, minutes and
seconds.
fraction_of_second (decimal.Decimal): fraction of second, which must be a
value between 0.0 and 1.0.
Returns:
str: date and time value formatted as:
YYYY-MM-DD hh:mm:ss
Raises:
ValueError: if the fraction of second is out of bounds.
"""
if fraction_of_second < 0.0 or fraction_of_second >= 1.0:
raise ValueError('Fraction of second value: {0:f} out of bounds.'.format(
fraction_of_second))
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(
time_elements_tuple[0], time_elements_tuple[1], time_elements_tuple[2],
time_elements_tuple[3], time_elements_tuple[4], time_elements_tuple[5]) | [
"def",
"CopyToDateTimeString",
"(",
"cls",
",",
"time_elements_tuple",
",",
"fraction_of_second",
")",
":",
"if",
"fraction_of_second",
"<",
"0.0",
"or",
"fraction_of_second",
">=",
"1.0",
":",
"raise",
"ValueError",
"(",
"'Fraction of second value: {0:f} out of bounds.'",
".",
"format",
"(",
"fraction_of_second",
")",
")",
"return",
"'{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'",
".",
"format",
"(",
"time_elements_tuple",
"[",
"0",
"]",
",",
"time_elements_tuple",
"[",
"1",
"]",
",",
"time_elements_tuple",
"[",
"2",
"]",
",",
"time_elements_tuple",
"[",
"3",
"]",
",",
"time_elements_tuple",
"[",
"4",
"]",
",",
"time_elements_tuple",
"[",
"5",
"]",
")"
] | Copies the time elements and fraction of second to a string.
Args:
time_elements_tuple (tuple[int, int, int, int, int, int]):
time elements, contains year, month, day of month, hours, minutes and
seconds.
fraction_of_second (decimal.Decimal): fraction of second, which must be a
value between 0.0 and 1.0.
Returns:
str: date and time value formatted as:
YYYY-MM-DD hh:mm:ss
Raises:
ValueError: if the fraction of second is out of bounds. | [
"Copies",
"the",
"time",
"elements",
"and",
"fraction",
"of",
"second",
"to",
"a",
"string",
"."
] | train | https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/precisions.py#L78-L101 | 0.001988 |
PMEAL/OpenPNM | openpnm/topotools/topotools.py | tri_to_am | def tri_to_am(tri):
r"""
Given a Delaunay Triangulation object from Scipy's ``spatial`` module,
converts to a sparse adjacency matrix network representation.
Parameters
----------
tri : Delaunay Triangulation Object
This object is produced by ``scipy.spatial.Delaunay``
Returns
-------
A sparse adjacency matrix in COO format. The network is undirected
and unweighted, so the adjacency matrix is upper-triangular and all the
weights are set to 1.
"""
# Create an empty list-of-list matrix
lil = sprs.lil_matrix((tri.npoints, tri.npoints))
# Scan through Delaunay triangulation to retrieve pairs
indices, indptr = tri.vertex_neighbor_vertices
for k in range(tri.npoints):
lil.rows[k] = indptr[indices[k]:indices[k+1]]
# Convert to coo format
lil.data = lil.rows # Just a dummy array to make things work properly
coo = lil.tocoo()
# Set weights to 1's
coo.data = sp.ones_like(coo.data)
# Remove diagonal, and convert to csr remove duplicates
am = sp.sparse.triu(A=coo, k=1, format='csr')
# The convert back to COO and return
am = am.tocoo()
return am | python | def tri_to_am(tri):
r"""
Given a Delaunay Triangulation object from Scipy's ``spatial`` module,
converts to a sparse adjacency matrix network representation.
Parameters
----------
tri : Delaunay Triangulation Object
This object is produced by ``scipy.spatial.Delaunay``
Returns
-------
A sparse adjacency matrix in COO format. The network is undirected
and unweighted, so the adjacency matrix is upper-triangular and all the
weights are set to 1.
"""
# Create an empty list-of-list matrix
lil = sprs.lil_matrix((tri.npoints, tri.npoints))
# Scan through Delaunay triangulation to retrieve pairs
indices, indptr = tri.vertex_neighbor_vertices
for k in range(tri.npoints):
lil.rows[k] = indptr[indices[k]:indices[k+1]]
# Convert to coo format
lil.data = lil.rows # Just a dummy array to make things work properly
coo = lil.tocoo()
# Set weights to 1's
coo.data = sp.ones_like(coo.data)
# Remove diagonal, and convert to csr remove duplicates
am = sp.sparse.triu(A=coo, k=1, format='csr')
# The convert back to COO and return
am = am.tocoo()
return am | [
"def",
"tri_to_am",
"(",
"tri",
")",
":",
"# Create an empty list-of-list matrix",
"lil",
"=",
"sprs",
".",
"lil_matrix",
"(",
"(",
"tri",
".",
"npoints",
",",
"tri",
".",
"npoints",
")",
")",
"# Scan through Delaunay triangulation to retrieve pairs",
"indices",
",",
"indptr",
"=",
"tri",
".",
"vertex_neighbor_vertices",
"for",
"k",
"in",
"range",
"(",
"tri",
".",
"npoints",
")",
":",
"lil",
".",
"rows",
"[",
"k",
"]",
"=",
"indptr",
"[",
"indices",
"[",
"k",
"]",
":",
"indices",
"[",
"k",
"+",
"1",
"]",
"]",
"# Convert to coo format",
"lil",
".",
"data",
"=",
"lil",
".",
"rows",
"# Just a dummy array to make things work properly",
"coo",
"=",
"lil",
".",
"tocoo",
"(",
")",
"# Set weights to 1's",
"coo",
".",
"data",
"=",
"sp",
".",
"ones_like",
"(",
"coo",
".",
"data",
")",
"# Remove diagonal, and convert to csr remove duplicates",
"am",
"=",
"sp",
".",
"sparse",
".",
"triu",
"(",
"A",
"=",
"coo",
",",
"k",
"=",
"1",
",",
"format",
"=",
"'csr'",
")",
"# The convert back to COO and return",
"am",
"=",
"am",
".",
"tocoo",
"(",
")",
"return",
"am"
] | r"""
Given a Delaunay Triangulation object from Scipy's ``spatial`` module,
converts to a sparse adjacency matrix network representation.
Parameters
----------
tri : Delaunay Triangulation Object
This object is produced by ``scipy.spatial.Delaunay``
Returns
-------
A sparse adjacency matrix in COO format. The network is undirected
and unweighted, so the adjacency matrix is upper-triangular and all the
weights are set to 1. | [
"r",
"Given",
"a",
"Delaunay",
"Triangulation",
"object",
"from",
"Scipy",
"s",
"spatial",
"module",
"converts",
"to",
"a",
"sparse",
"adjacency",
"matrix",
"network",
"representation",
"."
] | train | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/topotools/topotools.py#L460-L492 | 0.00085 |
atmos-python/atmos | atmos/plot.py | SkewTAxes.plot_dry_adiabats | def plot_dry_adiabats(self, p=None, theta=None, **kwargs):
r'''Plot dry adiabats.
Adds dry adiabats (lines of constant potential temperature) to the
plot. The default style of these lines is dashed red lines with an
alpha value of 0.5. These can be overridden using keyword arguments.
Parameters
----------
p : array_like, optional
1-dimensional array of pressure values to be included in the dry
adiabats. If not specified, they will be linearly distributed
across the current plotted pressure range.
theta : array_like, optional
1-dimensional array of potential temperature values for dry
adiabats. By default these will be generated based on the current
temperature limits.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also#B85C00
--------
plot_moist_adiabats
`matplotlib.collections.LineCollection`
`metpy.calc.dry_lapse`
'''
for artist in self._dry_adiabats:
artist.remove()
self._dry_adiabats = []
# Determine set of starting temps if necessary
if theta is None:
xmin, xmax = self.get_xlim()
theta = np.arange(xmin, xmax + 201, 10)
# Get pressure levels based on ylims if necessary
if p is None:
p = np.linspace(*self.get_ylim())
# Assemble into data for plotting
t = calculate('T', theta=theta[:, None], p=p, p_units='hPa',
T_units='degC', theta_units='degC')
linedata = [np.vstack((ti, p)).T for ti in t]
# Add to plot
kwargs.setdefault('clip_on', True)
kwargs.setdefault('colors', '#A65300')
kwargs.setdefault('linestyles', '-')
kwargs.setdefault('alpha', 1)
kwargs.setdefault('linewidth', 0.5)
kwargs.setdefault('zorder', 1.1)
collection = LineCollection(linedata, **kwargs)
self._dry_adiabats.append(collection)
self.add_collection(collection)
theta = theta.flatten()
T_label = calculate('T', p=140, p_units='hPa', theta=theta,
T_units='degC', theta_units='degC')
for i in range(len(theta)):
text = self.text(
T_label[i], 140, '{:.0f}'.format(theta[i]),
fontsize=8, ha='left', va='center', rotation=-60,
color='#A65300', bbox={
'facecolor': 'w', 'edgecolor': 'w', 'alpha': 0,
}, zorder=1.2)
text.set_clip_on(True)
self._dry_adiabats.append(text) | python | def plot_dry_adiabats(self, p=None, theta=None, **kwargs):
r'''Plot dry adiabats.
Adds dry adiabats (lines of constant potential temperature) to the
plot. The default style of these lines is dashed red lines with an
alpha value of 0.5. These can be overridden using keyword arguments.
Parameters
----------
p : array_like, optional
1-dimensional array of pressure values to be included in the dry
adiabats. If not specified, they will be linearly distributed
across the current plotted pressure range.
theta : array_like, optional
1-dimensional array of potential temperature values for dry
adiabats. By default these will be generated based on the current
temperature limits.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also#B85C00
--------
plot_moist_adiabats
`matplotlib.collections.LineCollection`
`metpy.calc.dry_lapse`
'''
for artist in self._dry_adiabats:
artist.remove()
self._dry_adiabats = []
# Determine set of starting temps if necessary
if theta is None:
xmin, xmax = self.get_xlim()
theta = np.arange(xmin, xmax + 201, 10)
# Get pressure levels based on ylims if necessary
if p is None:
p = np.linspace(*self.get_ylim())
# Assemble into data for plotting
t = calculate('T', theta=theta[:, None], p=p, p_units='hPa',
T_units='degC', theta_units='degC')
linedata = [np.vstack((ti, p)).T for ti in t]
# Add to plot
kwargs.setdefault('clip_on', True)
kwargs.setdefault('colors', '#A65300')
kwargs.setdefault('linestyles', '-')
kwargs.setdefault('alpha', 1)
kwargs.setdefault('linewidth', 0.5)
kwargs.setdefault('zorder', 1.1)
collection = LineCollection(linedata, **kwargs)
self._dry_adiabats.append(collection)
self.add_collection(collection)
theta = theta.flatten()
T_label = calculate('T', p=140, p_units='hPa', theta=theta,
T_units='degC', theta_units='degC')
for i in range(len(theta)):
text = self.text(
T_label[i], 140, '{:.0f}'.format(theta[i]),
fontsize=8, ha='left', va='center', rotation=-60,
color='#A65300', bbox={
'facecolor': 'w', 'edgecolor': 'w', 'alpha': 0,
}, zorder=1.2)
text.set_clip_on(True)
self._dry_adiabats.append(text) | [
"def",
"plot_dry_adiabats",
"(",
"self",
",",
"p",
"=",
"None",
",",
"theta",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"artist",
"in",
"self",
".",
"_dry_adiabats",
":",
"artist",
".",
"remove",
"(",
")",
"self",
".",
"_dry_adiabats",
"=",
"[",
"]",
"# Determine set of starting temps if necessary",
"if",
"theta",
"is",
"None",
":",
"xmin",
",",
"xmax",
"=",
"self",
".",
"get_xlim",
"(",
")",
"theta",
"=",
"np",
".",
"arange",
"(",
"xmin",
",",
"xmax",
"+",
"201",
",",
"10",
")",
"# Get pressure levels based on ylims if necessary",
"if",
"p",
"is",
"None",
":",
"p",
"=",
"np",
".",
"linspace",
"(",
"*",
"self",
".",
"get_ylim",
"(",
")",
")",
"# Assemble into data for plotting",
"t",
"=",
"calculate",
"(",
"'T'",
",",
"theta",
"=",
"theta",
"[",
":",
",",
"None",
"]",
",",
"p",
"=",
"p",
",",
"p_units",
"=",
"'hPa'",
",",
"T_units",
"=",
"'degC'",
",",
"theta_units",
"=",
"'degC'",
")",
"linedata",
"=",
"[",
"np",
".",
"vstack",
"(",
"(",
"ti",
",",
"p",
")",
")",
".",
"T",
"for",
"ti",
"in",
"t",
"]",
"# Add to plot",
"kwargs",
".",
"setdefault",
"(",
"'clip_on'",
",",
"True",
")",
"kwargs",
".",
"setdefault",
"(",
"'colors'",
",",
"'#A65300'",
")",
"kwargs",
".",
"setdefault",
"(",
"'linestyles'",
",",
"'-'",
")",
"kwargs",
".",
"setdefault",
"(",
"'alpha'",
",",
"1",
")",
"kwargs",
".",
"setdefault",
"(",
"'linewidth'",
",",
"0.5",
")",
"kwargs",
".",
"setdefault",
"(",
"'zorder'",
",",
"1.1",
")",
"collection",
"=",
"LineCollection",
"(",
"linedata",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_dry_adiabats",
".",
"append",
"(",
"collection",
")",
"self",
".",
"add_collection",
"(",
"collection",
")",
"theta",
"=",
"theta",
".",
"flatten",
"(",
")",
"T_label",
"=",
"calculate",
"(",
"'T'",
",",
"p",
"=",
"140",
",",
"p_units",
"=",
"'hPa'",
",",
"theta",
"=",
"theta",
",",
"T_units",
"=",
"'degC'",
",",
"theta_units",
"=",
"'degC'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"theta",
")",
")",
":",
"text",
"=",
"self",
".",
"text",
"(",
"T_label",
"[",
"i",
"]",
",",
"140",
",",
"'{:.0f}'",
".",
"format",
"(",
"theta",
"[",
"i",
"]",
")",
",",
"fontsize",
"=",
"8",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'center'",
",",
"rotation",
"=",
"-",
"60",
",",
"color",
"=",
"'#A65300'",
",",
"bbox",
"=",
"{",
"'facecolor'",
":",
"'w'",
",",
"'edgecolor'",
":",
"'w'",
",",
"'alpha'",
":",
"0",
",",
"}",
",",
"zorder",
"=",
"1.2",
")",
"text",
".",
"set_clip_on",
"(",
"True",
")",
"self",
".",
"_dry_adiabats",
".",
"append",
"(",
"text",
")"
] | r'''Plot dry adiabats.
Adds dry adiabats (lines of constant potential temperature) to the
plot. The default style of these lines is dashed red lines with an
alpha value of 0.5. These can be overridden using keyword arguments.
Parameters
----------
p : array_like, optional
1-dimensional array of pressure values to be included in the dry
adiabats. If not specified, they will be linearly distributed
across the current plotted pressure range.
theta : array_like, optional
1-dimensional array of potential temperature values for dry
adiabats. By default these will be generated based on the current
temperature limits.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also#B85C00
--------
plot_moist_adiabats
`matplotlib.collections.LineCollection`
`metpy.calc.dry_lapse` | [
"r",
"Plot",
"dry",
"adiabats",
"."
] | train | https://github.com/atmos-python/atmos/blob/f4af8eaca23cce881bde979599d15d322fc1935e/atmos/plot.py#L323-L389 | 0.000736 |
datacamp/shellwhat | shellwhat/checks/check_funcs.py | has_output | def has_output(state,
text,
incorrect_msg="The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.",
fixed=False,
strip_ansi=True):
"""Check whether student output contains specific text.
Before you use ``has_output()``, have a look at ``has_expr_output()`` or ``has_expr_error()``;
they might be more fit for your use case.
Args:
state: State instance describing student and solution code. Can be omitted if used with ``Ex()``.
text : text that student output must contain. Can be a regex pattern or a simple string.
incorrect_msg: if specified, this overrides the automatically generated feedback message
in case ``text`` is not found in the student output.
fixed: whether to match ``text`` exactly, rather than using regular expressions.
strip_ansi: whether to remove ANSI escape codes from output
:Example:
Suppose the solution requires you to do: ::
echo 'this is a printout!'
The following SCT can be written: ::
Ex().has_output(r'this\\s+is\\s+a\\s+print\\s*out')
Submissions that would pass: ::
echo 'this is a print out'
test='this is a printout!' && echo $test
Submissions that would fail: ::
echo 'this is a wrong printout'
"""
stu_output = state.student_result
if strip_ansi: stu_output = _strip_ansi(stu_output)
# either simple text matching or regex test
res = text in stu_output if fixed else re.search(text, stu_output)
if not res:
_msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text, 'fixed': fixed })
state.do_test(_msg)
return state | python | def has_output(state,
text,
incorrect_msg="The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.",
fixed=False,
strip_ansi=True):
"""Check whether student output contains specific text.
Before you use ``has_output()``, have a look at ``has_expr_output()`` or ``has_expr_error()``;
they might be more fit for your use case.
Args:
state: State instance describing student and solution code. Can be omitted if used with ``Ex()``.
text : text that student output must contain. Can be a regex pattern or a simple string.
incorrect_msg: if specified, this overrides the automatically generated feedback message
in case ``text`` is not found in the student output.
fixed: whether to match ``text`` exactly, rather than using regular expressions.
strip_ansi: whether to remove ANSI escape codes from output
:Example:
Suppose the solution requires you to do: ::
echo 'this is a printout!'
The following SCT can be written: ::
Ex().has_output(r'this\\s+is\\s+a\\s+print\\s*out')
Submissions that would pass: ::
echo 'this is a print out'
test='this is a printout!' && echo $test
Submissions that would fail: ::
echo 'this is a wrong printout'
"""
stu_output = state.student_result
if strip_ansi: stu_output = _strip_ansi(stu_output)
# either simple text matching or regex test
res = text in stu_output if fixed else re.search(text, stu_output)
if not res:
_msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text, 'fixed': fixed })
state.do_test(_msg)
return state | [
"def",
"has_output",
"(",
"state",
",",
"text",
",",
"incorrect_msg",
"=",
"\"The checker expected to find {{'' if fixed else 'the pattern '}}`{{text}}` in the output of your command.\"",
",",
"fixed",
"=",
"False",
",",
"strip_ansi",
"=",
"True",
")",
":",
"stu_output",
"=",
"state",
".",
"student_result",
"if",
"strip_ansi",
":",
"stu_output",
"=",
"_strip_ansi",
"(",
"stu_output",
")",
"# either simple text matching or regex test",
"res",
"=",
"text",
"in",
"stu_output",
"if",
"fixed",
"else",
"re",
".",
"search",
"(",
"text",
",",
"stu_output",
")",
"if",
"not",
"res",
":",
"_msg",
"=",
"state",
".",
"build_message",
"(",
"incorrect_msg",
",",
"fmt_kwargs",
"=",
"{",
"'text'",
":",
"text",
",",
"'fixed'",
":",
"fixed",
"}",
")",
"state",
".",
"do_test",
"(",
"_msg",
")",
"return",
"state"
] | Check whether student output contains specific text.
Before you use ``has_output()``, have a look at ``has_expr_output()`` or ``has_expr_error()``;
they might be more fit for your use case.
Args:
state: State instance describing student and solution code. Can be omitted if used with ``Ex()``.
text : text that student output must contain. Can be a regex pattern or a simple string.
incorrect_msg: if specified, this overrides the automatically generated feedback message
in case ``text`` is not found in the student output.
fixed: whether to match ``text`` exactly, rather than using regular expressions.
strip_ansi: whether to remove ANSI escape codes from output
:Example:
Suppose the solution requires you to do: ::
echo 'this is a printout!'
The following SCT can be written: ::
Ex().has_output(r'this\\s+is\\s+a\\s+print\\s*out')
Submissions that would pass: ::
echo 'this is a print out'
test='this is a printout!' && echo $test
Submissions that would fail: ::
echo 'this is a wrong printout' | [
"Check",
"whether",
"student",
"output",
"contains",
"specific",
"text",
"."
] | train | https://github.com/datacamp/shellwhat/blob/ee2f875e3db0eb06d69cc946c8e9700e0edceea2/shellwhat/checks/check_funcs.py#L62-L111 | 0.006064 |
sliem/barrett | example/plot.py | plot_oneD | def plot_oneD(dataset, vars, filename, bins=60):
""" Plot 1D marginalised posteriors for the 'vars' of interest."""
n = len(vars)
fig, axes = plt.subplots(nrows=n,
ncols=1,
sharex=False,
sharey=False)
for i, x in enumerate(vars):
ax = axes[i]
P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins)
P.plot(ax)
ax.set_xlabel(labels(x))
ax.set_yticklabels([])
fig.set_size_inches(4, 4*n)
fig.savefig(filename, dpi=200, bbox_inches='tight')
plt.close(fig) | python | def plot_oneD(dataset, vars, filename, bins=60):
""" Plot 1D marginalised posteriors for the 'vars' of interest."""
n = len(vars)
fig, axes = plt.subplots(nrows=n,
ncols=1,
sharex=False,
sharey=False)
for i, x in enumerate(vars):
ax = axes[i]
P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins)
P.plot(ax)
ax.set_xlabel(labels(x))
ax.set_yticklabels([])
fig.set_size_inches(4, 4*n)
fig.savefig(filename, dpi=200, bbox_inches='tight')
plt.close(fig) | [
"def",
"plot_oneD",
"(",
"dataset",
",",
"vars",
",",
"filename",
",",
"bins",
"=",
"60",
")",
":",
"n",
"=",
"len",
"(",
"vars",
")",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"n",
",",
"ncols",
"=",
"1",
",",
"sharex",
"=",
"False",
",",
"sharey",
"=",
"False",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"vars",
")",
":",
"ax",
"=",
"axes",
"[",
"i",
"]",
"P",
"=",
"posterior",
".",
"oneD",
"(",
"dataset",
"+",
"'.h5'",
",",
"x",
",",
"limits",
"=",
"limits",
"(",
"x",
")",
",",
"bins",
"=",
"bins",
")",
"P",
".",
"plot",
"(",
"ax",
")",
"ax",
".",
"set_xlabel",
"(",
"labels",
"(",
"x",
")",
")",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"fig",
".",
"set_size_inches",
"(",
"4",
",",
"4",
"*",
"n",
")",
"fig",
".",
"savefig",
"(",
"filename",
",",
"dpi",
"=",
"200",
",",
"bbox_inches",
"=",
"'tight'",
")",
"plt",
".",
"close",
"(",
"fig",
")"
] | Plot 1D marginalised posteriors for the 'vars' of interest. | [
"Plot",
"1D",
"marginalised",
"posteriors",
"for",
"the",
"vars",
"of",
"interest",
"."
] | train | https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/example/plot.py#L113-L131 | 0.001613 |
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.get_all_operators | def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result | python | def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result | [
"def",
"get_all_operators",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"operators",
")",
">=",
"len",
"(",
"OperatorStatisticNames",
")",
":",
"return",
"self",
".",
"operators",
"result",
"=",
"yield",
"from",
"self",
".",
"load_all_operators",
"(",
")",
"return",
"result"
] | |coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found | [
"|coro|"
] | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1198-L1212 | 0.007561 |
citruz/beacontools | beacontools/scanner.py | Monitor.toggle_scan | def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command) | python | def toggle_scan(self, enable, filter_duplicates=False):
"""Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets"""
command = struct.pack(">BB", enable, filter_duplicates)
self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command) | [
"def",
"toggle_scan",
"(",
"self",
",",
"enable",
",",
"filter_duplicates",
"=",
"False",
")",
":",
"command",
"=",
"struct",
".",
"pack",
"(",
"\">BB\"",
",",
"enable",
",",
"filter_duplicates",
")",
"self",
".",
"bluez",
".",
"hci_send_cmd",
"(",
"self",
".",
"socket",
",",
"OGF_LE_CTL",
",",
"OCF_LE_SET_SCAN_ENABLE",
",",
"command",
")"
] | Enables or disables BLE scanning
Args:
enable: boolean value to enable (True) or disable (False) scanner
filter_duplicates: boolean value to enable/disable filter, that
omits duplicated packets | [
"Enables",
"or",
"disables",
"BLE",
"scanning"
] | train | https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L153-L161 | 0.006438 |
mitsei/dlkit | dlkit/json_/repository/sessions.py | AssetRepositoryAssignmentSession.get_assignable_repository_ids | def get_assignable_repository_ids(self, repository_id):
"""Gets a list of repositories including and under the given repository node in which any asset can be assigned.
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
return: (osid.id.IdList) - list of assignable repository ``Ids``
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
# This will likely be overridden by an authorization adapter
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
repositories = lookup_session.get_repositories()
id_list = []
for repository in repositories:
id_list.append(repository.get_id())
return IdList(id_list) | python | def get_assignable_repository_ids(self, repository_id):
"""Gets a list of repositories including and under the given repository node in which any asset can be assigned.
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
return: (osid.id.IdList) - list of assignable repository ``Ids``
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
# This will likely be overridden by an authorization adapter
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
repositories = lookup_session.get_repositories()
id_list = []
for repository in repositories:
id_list.append(repository.get_id())
return IdList(id_list) | [
"def",
"get_assignable_repository_ids",
"(",
"self",
",",
"repository_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids",
"# This will likely be overridden by an authorization adapter",
"mgr",
"=",
"self",
".",
"_get_provider_manager",
"(",
"'REPOSITORY'",
",",
"local",
"=",
"True",
")",
"lookup_session",
"=",
"mgr",
".",
"get_repository_lookup_session",
"(",
"proxy",
"=",
"self",
".",
"_proxy",
")",
"repositories",
"=",
"lookup_session",
".",
"get_repositories",
"(",
")",
"id_list",
"=",
"[",
"]",
"for",
"repository",
"in",
"repositories",
":",
"id_list",
".",
"append",
"(",
"repository",
".",
"get_id",
"(",
")",
")",
"return",
"IdList",
"(",
"id_list",
")"
] | Gets a list of repositories including and under the given repository node in which any asset can be assigned.
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
return: (osid.id.IdList) - list of assignable repository ``Ids``
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"a",
"list",
"of",
"repositories",
"including",
"and",
"under",
"the",
"given",
"repository",
"node",
"in",
"which",
"any",
"asset",
"can",
"be",
"assigned",
"."
] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L2503-L2523 | 0.002796 |
annayqho/TheCannon | TheCannon/normalization.py | _find_cont_fitfunc | def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1):
""" Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
if n_proc == 1:
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npixels)
y = flux[contmask]
x = pix[contmask]
yivar = ivar[contmask]
yivar[yivar == 0] = SMALL**2
if ffunc=="sinusoid":
p0 = np.ones(deg*2) # one for cos, one for sin
L = max(x)-min(x)
pcont_func = _partial_func(_sinusoid, L=L, y=flux)
popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0,
sigma=1./np.sqrt(yivar))
elif ffunc=="chebyshev":
fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg)
for element in pix:
if ffunc=="sinusoid":
cont[jj,element] = _sinusoid(element, popt, L=L, y=flux)
elif ffunc=="chebyshev":
cont[jj,element] = fit(element)
else:
# start mp.Pool
pool = mp.Pool(processes=n_proc)
mp_results = []
for i in xrange(nstars):
mp_results.append(pool.apply_async(\
_find_cont_fitfunc,
(fluxes[i, :].reshape((1, -1)),
ivars[i, :].reshape((1, -1)),
contmask[:]),
{'deg':deg, 'ffunc':ffunc}))
# close mp.Pool
pool.close()
pool.join()
cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)])
return cont | python | def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1):
""" Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
if n_proc == 1:
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npixels)
y = flux[contmask]
x = pix[contmask]
yivar = ivar[contmask]
yivar[yivar == 0] = SMALL**2
if ffunc=="sinusoid":
p0 = np.ones(deg*2) # one for cos, one for sin
L = max(x)-min(x)
pcont_func = _partial_func(_sinusoid, L=L, y=flux)
popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0,
sigma=1./np.sqrt(yivar))
elif ffunc=="chebyshev":
fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg)
for element in pix:
if ffunc=="sinusoid":
cont[jj,element] = _sinusoid(element, popt, L=L, y=flux)
elif ffunc=="chebyshev":
cont[jj,element] = fit(element)
else:
# start mp.Pool
pool = mp.Pool(processes=n_proc)
mp_results = []
for i in xrange(nstars):
mp_results.append(pool.apply_async(\
_find_cont_fitfunc,
(fluxes[i, :].reshape((1, -1)),
ivars[i, :].reshape((1, -1)),
contmask[:]),
{'deg':deg, 'ffunc':ffunc}))
# close mp.Pool
pool.close()
pool.join()
cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)])
return cont | [
"def",
"_find_cont_fitfunc",
"(",
"fluxes",
",",
"ivars",
",",
"contmask",
",",
"deg",
",",
"ffunc",
",",
"n_proc",
"=",
"1",
")",
":",
"nstars",
"=",
"fluxes",
".",
"shape",
"[",
"0",
"]",
"npixels",
"=",
"fluxes",
".",
"shape",
"[",
"1",
"]",
"cont",
"=",
"np",
".",
"zeros",
"(",
"fluxes",
".",
"shape",
")",
"if",
"n_proc",
"==",
"1",
":",
"for",
"jj",
"in",
"range",
"(",
"nstars",
")",
":",
"flux",
"=",
"fluxes",
"[",
"jj",
",",
":",
"]",
"ivar",
"=",
"ivars",
"[",
"jj",
",",
":",
"]",
"pix",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"npixels",
")",
"y",
"=",
"flux",
"[",
"contmask",
"]",
"x",
"=",
"pix",
"[",
"contmask",
"]",
"yivar",
"=",
"ivar",
"[",
"contmask",
"]",
"yivar",
"[",
"yivar",
"==",
"0",
"]",
"=",
"SMALL",
"**",
"2",
"if",
"ffunc",
"==",
"\"sinusoid\"",
":",
"p0",
"=",
"np",
".",
"ones",
"(",
"deg",
"*",
"2",
")",
"# one for cos, one for sin",
"L",
"=",
"max",
"(",
"x",
")",
"-",
"min",
"(",
"x",
")",
"pcont_func",
"=",
"_partial_func",
"(",
"_sinusoid",
",",
"L",
"=",
"L",
",",
"y",
"=",
"flux",
")",
"popt",
",",
"pcov",
"=",
"opt",
".",
"curve_fit",
"(",
"pcont_func",
",",
"x",
",",
"y",
",",
"p0",
"=",
"p0",
",",
"sigma",
"=",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"yivar",
")",
")",
"elif",
"ffunc",
"==",
"\"chebyshev\"",
":",
"fit",
"=",
"np",
".",
"polynomial",
".",
"chebyshev",
".",
"Chebyshev",
".",
"fit",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"w",
"=",
"yivar",
",",
"deg",
"=",
"deg",
")",
"for",
"element",
"in",
"pix",
":",
"if",
"ffunc",
"==",
"\"sinusoid\"",
":",
"cont",
"[",
"jj",
",",
"element",
"]",
"=",
"_sinusoid",
"(",
"element",
",",
"popt",
",",
"L",
"=",
"L",
",",
"y",
"=",
"flux",
")",
"elif",
"ffunc",
"==",
"\"chebyshev\"",
":",
"cont",
"[",
"jj",
",",
"element",
"]",
"=",
"fit",
"(",
"element",
")",
"else",
":",
"# start mp.Pool",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"processes",
"=",
"n_proc",
")",
"mp_results",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"nstars",
")",
":",
"mp_results",
".",
"append",
"(",
"pool",
".",
"apply_async",
"(",
"_find_cont_fitfunc",
",",
"(",
"fluxes",
"[",
"i",
",",
":",
"]",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"ivars",
"[",
"i",
",",
":",
"]",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"contmask",
"[",
":",
"]",
")",
",",
"{",
"'deg'",
":",
"deg",
",",
"'ffunc'",
":",
"ffunc",
"}",
")",
")",
"# close mp.Pool",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"cont",
"=",
"np",
".",
"array",
"(",
"[",
"mp_results",
"[",
"i",
"]",
".",
"get",
"(",
")",
".",
"flatten",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"nstars",
")",
"]",
")",
"return",
"cont"
] | Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes | [
"Fit",
"a",
"continuum",
"to",
"a",
"continuum",
"pixels",
"in",
"a",
"segment",
"of",
"spectra"
] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L150-L220 | 0.007746 |
jmgilman/Neolib | neolib/pyamf/util/__init__.py | get_properties | def get_properties(obj):
"""
Returns a list of properties for L{obj}
@since: 0.5
"""
if hasattr(obj, 'keys'):
return obj.keys()
elif hasattr(obj, '__dict__'):
return obj.__dict__.keys()
return [] | python | def get_properties(obj):
"""
Returns a list of properties for L{obj}
@since: 0.5
"""
if hasattr(obj, 'keys'):
return obj.keys()
elif hasattr(obj, '__dict__'):
return obj.__dict__.keys()
return [] | [
"def",
"get_properties",
"(",
"obj",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'keys'",
")",
":",
"return",
"obj",
".",
"keys",
"(",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"'__dict__'",
")",
":",
"return",
"obj",
".",
"__dict__",
".",
"keys",
"(",
")",
"return",
"[",
"]"
] | Returns a list of properties for L{obj}
@since: 0.5 | [
"Returns",
"a",
"list",
"of",
"properties",
"for",
"L",
"{",
"obj",
"}"
] | train | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/util/__init__.py#L61-L72 | 0.004149 |
senaite/senaite.core | bika/lims/adapters/referencewidgetvocabulary.py | DefaultReferenceWidgetVocabulary.search_term | def search_term(self):
"""Returns the search term
"""
search_term = _c(self.request.get("searchTerm", ""))
return search_term.lower().strip() | python | def search_term(self):
"""Returns the search term
"""
search_term = _c(self.request.get("searchTerm", ""))
return search_term.lower().strip() | [
"def",
"search_term",
"(",
"self",
")",
":",
"search_term",
"=",
"_c",
"(",
"self",
".",
"request",
".",
"get",
"(",
"\"searchTerm\"",
",",
"\"\"",
")",
")",
"return",
"search_term",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")"
] | Returns the search term | [
"Returns",
"the",
"search",
"term"
] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/adapters/referencewidgetvocabulary.py#L59-L63 | 0.011561 |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/identity/identity_client.py | IdentityClient.delete_group | def delete_group(self, group_id):
"""DeleteGroup.
:param str group_id:
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
self._send(http_method='DELETE',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='5.0',
route_values=route_values) | python | def delete_group(self, group_id):
"""DeleteGroup.
:param str group_id:
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
self._send(http_method='DELETE',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='5.0',
route_values=route_values) | [
"def",
"delete_group",
"(",
"self",
",",
"group_id",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"group_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'groupId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'group_id'",
",",
"group_id",
",",
"'str'",
")",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'DELETE'",
",",
"location_id",
"=",
"'5966283b-4196-4d57-9211-1b68f41ec1c2'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
")"
] | DeleteGroup.
:param str group_id: | [
"DeleteGroup",
".",
":",
"param",
"str",
"group_id",
":"
] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/identity/identity_client.py#L73-L83 | 0.006881 |
cthorey/pdsimage | pdsimage/PDS_Extractor.py | WacMap._cas_4 | def _cas_4(self):
''' Longitude/Lagitude overlap (4 images) '''
lonc_left = self._format_lon(self.lonm)
lonc_right = self._format_lon(self.lonM)
latc_top = self._format_lat(self.latM)
latc_bot = self._format_lat(self.latm)
img_name_00 = self._format_name_map(lonc_left, latc_top)
img_00 = BinaryTable(img_name_00, self.path_pdsfiles)
X_00, Y_00, Z_00 = img_00.extract_grid(self.lonm,
float(
img_00.EASTERNMOST_LONGITUDE),
float(img_00.MINIMUM_LATITUDE),
self.latM)
img_name_01 = self._format_name_map(lonc_right, latc_top)
img_01 = BinaryTable(img_name_01, self.path_pdsfiles)
X_01, Y_01, Z_01 = img_01.extract_grid(float(img_01.WESTERNMOST_LONGITUDE),
self.lonM,
float(img_01.MINIMUM_LATITUDE),
self.latM)
img_name_10 = self._format_name_map(lonc_left, latc_bot)
img_10 = BinaryTable(img_name_10, self.path_pdsfiles)
X_10, Y_10, Z_10 = img_10.extract_grid(self.lonm,
float(
img_10.EASTERNMOST_LONGITUDE),
self.latm,
float(img_10.MAXIMUM_LATITUDE))
img_name_11 = self._format_name_map(lonc_right, latc_bot)
img_11 = BinaryTable(img_name_11, self.path_pdsfiles)
X_11, Y_11, Z_11 = img_11.extract_grid(float(img_11.WESTERNMOST_LONGITUDE),
self.lonM,
self.latm,
float(img_11.MAXIMUM_LATITUDE))
X_new_top = np.hstack((X_00, X_01))
X_new_bot = np.hstack((X_10, X_11))
X_new = np.vstack((X_new_top, X_new_bot))
Y_new_top = np.hstack((Y_00, Y_01))
Y_new_bot = np.hstack((Y_10, Y_11))
Y_new = np.vstack((Y_new_top, Y_new_bot))
Z_new_top = np.hstack((Z_00, Z_01))
Z_new_bot = np.hstack((Z_10, Z_11))
Z_new = np.vstack((Z_new_top, Z_new_bot))
return X_new, Y_new, Z_new | python | def _cas_4(self):
''' Longitude/Lagitude overlap (4 images) '''
lonc_left = self._format_lon(self.lonm)
lonc_right = self._format_lon(self.lonM)
latc_top = self._format_lat(self.latM)
latc_bot = self._format_lat(self.latm)
img_name_00 = self._format_name_map(lonc_left, latc_top)
img_00 = BinaryTable(img_name_00, self.path_pdsfiles)
X_00, Y_00, Z_00 = img_00.extract_grid(self.lonm,
float(
img_00.EASTERNMOST_LONGITUDE),
float(img_00.MINIMUM_LATITUDE),
self.latM)
img_name_01 = self._format_name_map(lonc_right, latc_top)
img_01 = BinaryTable(img_name_01, self.path_pdsfiles)
X_01, Y_01, Z_01 = img_01.extract_grid(float(img_01.WESTERNMOST_LONGITUDE),
self.lonM,
float(img_01.MINIMUM_LATITUDE),
self.latM)
img_name_10 = self._format_name_map(lonc_left, latc_bot)
img_10 = BinaryTable(img_name_10, self.path_pdsfiles)
X_10, Y_10, Z_10 = img_10.extract_grid(self.lonm,
float(
img_10.EASTERNMOST_LONGITUDE),
self.latm,
float(img_10.MAXIMUM_LATITUDE))
img_name_11 = self._format_name_map(lonc_right, latc_bot)
img_11 = BinaryTable(img_name_11, self.path_pdsfiles)
X_11, Y_11, Z_11 = img_11.extract_grid(float(img_11.WESTERNMOST_LONGITUDE),
self.lonM,
self.latm,
float(img_11.MAXIMUM_LATITUDE))
X_new_top = np.hstack((X_00, X_01))
X_new_bot = np.hstack((X_10, X_11))
X_new = np.vstack((X_new_top, X_new_bot))
Y_new_top = np.hstack((Y_00, Y_01))
Y_new_bot = np.hstack((Y_10, Y_11))
Y_new = np.vstack((Y_new_top, Y_new_bot))
Z_new_top = np.hstack((Z_00, Z_01))
Z_new_bot = np.hstack((Z_10, Z_11))
Z_new = np.vstack((Z_new_top, Z_new_bot))
return X_new, Y_new, Z_new | [
"def",
"_cas_4",
"(",
"self",
")",
":",
"lonc_left",
"=",
"self",
".",
"_format_lon",
"(",
"self",
".",
"lonm",
")",
"lonc_right",
"=",
"self",
".",
"_format_lon",
"(",
"self",
".",
"lonM",
")",
"latc_top",
"=",
"self",
".",
"_format_lat",
"(",
"self",
".",
"latM",
")",
"latc_bot",
"=",
"self",
".",
"_format_lat",
"(",
"self",
".",
"latm",
")",
"img_name_00",
"=",
"self",
".",
"_format_name_map",
"(",
"lonc_left",
",",
"latc_top",
")",
"img_00",
"=",
"BinaryTable",
"(",
"img_name_00",
",",
"self",
".",
"path_pdsfiles",
")",
"X_00",
",",
"Y_00",
",",
"Z_00",
"=",
"img_00",
".",
"extract_grid",
"(",
"self",
".",
"lonm",
",",
"float",
"(",
"img_00",
".",
"EASTERNMOST_LONGITUDE",
")",
",",
"float",
"(",
"img_00",
".",
"MINIMUM_LATITUDE",
")",
",",
"self",
".",
"latM",
")",
"img_name_01",
"=",
"self",
".",
"_format_name_map",
"(",
"lonc_right",
",",
"latc_top",
")",
"img_01",
"=",
"BinaryTable",
"(",
"img_name_01",
",",
"self",
".",
"path_pdsfiles",
")",
"X_01",
",",
"Y_01",
",",
"Z_01",
"=",
"img_01",
".",
"extract_grid",
"(",
"float",
"(",
"img_01",
".",
"WESTERNMOST_LONGITUDE",
")",
",",
"self",
".",
"lonM",
",",
"float",
"(",
"img_01",
".",
"MINIMUM_LATITUDE",
")",
",",
"self",
".",
"latM",
")",
"img_name_10",
"=",
"self",
".",
"_format_name_map",
"(",
"lonc_left",
",",
"latc_bot",
")",
"img_10",
"=",
"BinaryTable",
"(",
"img_name_10",
",",
"self",
".",
"path_pdsfiles",
")",
"X_10",
",",
"Y_10",
",",
"Z_10",
"=",
"img_10",
".",
"extract_grid",
"(",
"self",
".",
"lonm",
",",
"float",
"(",
"img_10",
".",
"EASTERNMOST_LONGITUDE",
")",
",",
"self",
".",
"latm",
",",
"float",
"(",
"img_10",
".",
"MAXIMUM_LATITUDE",
")",
")",
"img_name_11",
"=",
"self",
".",
"_format_name_map",
"(",
"lonc_right",
",",
"latc_bot",
")",
"img_11",
"=",
"BinaryTable",
"(",
"img_name_11",
",",
"self",
".",
"path_pdsfiles",
")",
"X_11",
",",
"Y_11",
",",
"Z_11",
"=",
"img_11",
".",
"extract_grid",
"(",
"float",
"(",
"img_11",
".",
"WESTERNMOST_LONGITUDE",
")",
",",
"self",
".",
"lonM",
",",
"self",
".",
"latm",
",",
"float",
"(",
"img_11",
".",
"MAXIMUM_LATITUDE",
")",
")",
"X_new_top",
"=",
"np",
".",
"hstack",
"(",
"(",
"X_00",
",",
"X_01",
")",
")",
"X_new_bot",
"=",
"np",
".",
"hstack",
"(",
"(",
"X_10",
",",
"X_11",
")",
")",
"X_new",
"=",
"np",
".",
"vstack",
"(",
"(",
"X_new_top",
",",
"X_new_bot",
")",
")",
"Y_new_top",
"=",
"np",
".",
"hstack",
"(",
"(",
"Y_00",
",",
"Y_01",
")",
")",
"Y_new_bot",
"=",
"np",
".",
"hstack",
"(",
"(",
"Y_10",
",",
"Y_11",
")",
")",
"Y_new",
"=",
"np",
".",
"vstack",
"(",
"(",
"Y_new_top",
",",
"Y_new_bot",
")",
")",
"Z_new_top",
"=",
"np",
".",
"hstack",
"(",
"(",
"Z_00",
",",
"Z_01",
")",
")",
"Z_new_bot",
"=",
"np",
".",
"hstack",
"(",
"(",
"Z_10",
",",
"Z_11",
")",
")",
"Z_new",
"=",
"np",
".",
"vstack",
"(",
"(",
"Z_new_top",
",",
"Z_new_bot",
")",
")",
"return",
"X_new",
",",
"Y_new",
",",
"Z_new"
] | Longitude/Lagitude overlap (4 images) | [
"Longitude",
"/",
"Lagitude",
"overlap",
"(",
"4",
"images",
")"
] | train | https://github.com/cthorey/pdsimage/blob/f71de6dfddd3d538d76da229b4b9605c40f3fbac/pdsimage/PDS_Extractor.py#L807-L857 | 0.002449 |
tanghaibao/jcvi | jcvi/formats/bed.py | clr | def clr(args):
"""
%prog clr [bamfile|bedpefile] ref.fasta
Use mates from BEDPE to extract ranges where the ref is covered by mates.
This is useful in detection of chimeric contigs.
"""
p = OptionParser(clr.__doc__)
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
if bedpe.endswith(".bam"):
bedpefile = bedpe.replace(".bam", ".bedpe")
if need_update(bedpe, bedpefile):
cmd = "bamToBed -bedpe -i {0}".format(bedpe)
sh(cmd, outfile=bedpefile)
bedpe = bedpefile
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen)
rmdup = filtered + ".sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
converted = rmdup + ".converted"
if need_update(rmdup, converted):
fp = open(rmdup)
fw = open(converted, "w")
for row in fp:
r = BedpeLine(row)
print(r.bedline, file=fw)
fw.close()
merged = converted + ".merge.bed"
if need_update(converted, merged):
mergeBed(converted) | python | def clr(args):
"""
%prog clr [bamfile|bedpefile] ref.fasta
Use mates from BEDPE to extract ranges where the ref is covered by mates.
This is useful in detection of chimeric contigs.
"""
p = OptionParser(clr.__doc__)
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
if bedpe.endswith(".bam"):
bedpefile = bedpe.replace(".bam", ".bedpe")
if need_update(bedpe, bedpefile):
cmd = "bamToBed -bedpe -i {0}".format(bedpe)
sh(cmd, outfile=bedpefile)
bedpe = bedpefile
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen)
rmdup = filtered + ".sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
converted = rmdup + ".converted"
if need_update(rmdup, converted):
fp = open(rmdup)
fw = open(converted, "w")
for row in fp:
r = BedpeLine(row)
print(r.bedline, file=fw)
fw.close()
merged = converted + ".merge.bed"
if need_update(converted, merged):
mergeBed(converted) | [
"def",
"clr",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"clr",
".",
"__doc__",
")",
"p",
".",
"set_bedpe",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"bedpe",
",",
"ref",
"=",
"args",
"if",
"bedpe",
".",
"endswith",
"(",
"\".bam\"",
")",
":",
"bedpefile",
"=",
"bedpe",
".",
"replace",
"(",
"\".bam\"",
",",
"\".bedpe\"",
")",
"if",
"need_update",
"(",
"bedpe",
",",
"bedpefile",
")",
":",
"cmd",
"=",
"\"bamToBed -bedpe -i {0}\"",
".",
"format",
"(",
"bedpe",
")",
"sh",
"(",
"cmd",
",",
"outfile",
"=",
"bedpefile",
")",
"bedpe",
"=",
"bedpefile",
"filtered",
"=",
"bedpe",
"+",
"\".filtered\"",
"if",
"need_update",
"(",
"bedpe",
",",
"filtered",
")",
":",
"filter_bedpe",
"(",
"bedpe",
",",
"filtered",
",",
"ref",
",",
"rc",
"=",
"opts",
".",
"rc",
",",
"minlen",
"=",
"opts",
".",
"minlen",
",",
"maxlen",
"=",
"opts",
".",
"maxlen",
")",
"rmdup",
"=",
"filtered",
"+",
"\".sorted.rmdup\"",
"if",
"need_update",
"(",
"filtered",
",",
"rmdup",
")",
":",
"rmdup_bedpe",
"(",
"filtered",
",",
"rmdup",
",",
"dupwiggle",
"=",
"opts",
".",
"dup",
")",
"converted",
"=",
"rmdup",
"+",
"\".converted\"",
"if",
"need_update",
"(",
"rmdup",
",",
"converted",
")",
":",
"fp",
"=",
"open",
"(",
"rmdup",
")",
"fw",
"=",
"open",
"(",
"converted",
",",
"\"w\"",
")",
"for",
"row",
"in",
"fp",
":",
"r",
"=",
"BedpeLine",
"(",
"row",
")",
"print",
"(",
"r",
".",
"bedline",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"merged",
"=",
"converted",
"+",
"\".merge.bed\"",
"if",
"need_update",
"(",
"converted",
",",
"merged",
")",
":",
"mergeBed",
"(",
"converted",
")"
] | %prog clr [bamfile|bedpefile] ref.fasta
Use mates from BEDPE to extract ranges where the ref is covered by mates.
This is useful in detection of chimeric contigs. | [
"%prog",
"clr",
"[",
"bamfile|bedpefile",
"]",
"ref",
".",
"fasta"
] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L605-L647 | 0.000769 |
krukas/Trionyx | trionyx/trionyx/views/accounts.py | UpdateUserAccountView.post | def post(self, request, *args, **kwargs):
"""Add user id to kwargs"""
kwargs['pk'] = request.user.id
self.kwargs['pk'] = request.user.id
return super().post(request, *args, **kwargs) | python | def post(self, request, *args, **kwargs):
"""Add user id to kwargs"""
kwargs['pk'] = request.user.id
self.kwargs['pk'] = request.user.id
return super().post(request, *args, **kwargs) | [
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'pk'",
"]",
"=",
"request",
".",
"user",
".",
"id",
"self",
".",
"kwargs",
"[",
"'pk'",
"]",
"=",
"request",
".",
"user",
".",
"id",
"return",
"super",
"(",
")",
".",
"post",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Add user id to kwargs | [
"Add",
"user",
"id",
"to",
"kwargs"
] | train | https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/accounts.py#L44-L48 | 0.009346 |
rbuffat/pyepw | pyepw/epw.py | WeatherData.horizontal_infrared_radiation_intensity | def horizontal_infrared_radiation_intensity(self, value=9999.0):
"""Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `horizontal_infrared_radiation_intensity`'.format(value))
if value < 0.0:
raise ValueError(
'value need to be greater or equal 0.0 '
'for field `horizontal_infrared_radiation_intensity`')
self._horizontal_infrared_radiation_intensity = value | python | def horizontal_infrared_radiation_intensity(self, value=9999.0):
"""Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `horizontal_infrared_radiation_intensity`'.format(value))
if value < 0.0:
raise ValueError(
'value need to be greater or equal 0.0 '
'for field `horizontal_infrared_radiation_intensity`')
self._horizontal_infrared_radiation_intensity = value | [
"def",
"horizontal_infrared_radiation_intensity",
"(",
"self",
",",
"value",
"=",
"9999.0",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'value {} need to be of type float '",
"'for field `horizontal_infrared_radiation_intensity`'",
".",
"format",
"(",
"value",
")",
")",
"if",
"value",
"<",
"0.0",
":",
"raise",
"ValueError",
"(",
"'value need to be greater or equal 0.0 '",
"'for field `horizontal_infrared_radiation_intensity`'",
")",
"self",
".",
"_horizontal_infrared_radiation_intensity",
"=",
"value"
] | Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | [
"Corresponds",
"to",
"IDD",
"Field",
"horizontal_infrared_radiation_intensity"
] | train | https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L6175-L6202 | 0.003571 |
watson-developer-cloud/python-sdk | ibm_watson/natural_language_understanding_v1.py | Author._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'name'",
")",
"and",
"self",
".",
"name",
"is",
"not",
"None",
":",
"_dict",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L633-L638 | 0.00905 |
datajoint/datajoint-python | datajoint/expression.py | QueryExpression.preview | def preview(self, limit=None, width=None):
"""
returns a preview of the contents of the query.
"""
heading = self.heading
rel = self.proj(*heading.non_blobs)
if limit is None:
limit = config['display.limit']
if width is None:
width = config['display.width']
tuples = rel.fetch(limit=limit+1, format="array")
has_more = len(tuples) > limit
tuples = tuples[:limit]
columns = heading.names
widths = {f: min(max([len(f)] +
[len(str(e)) for e in tuples[f]] if f in tuples.dtype.names else [len('=BLOB=')]) + 4, width) for f in columns}
templates = {f: '%%-%d.%ds' % (widths[f], widths[f]) for f in columns}
return (
' '.join([templates[f] % ('*' + f if f in rel.primary_key else f) for f in columns]) + '\n' +
' '.join(['+' + '-' * (widths[column] - 2) + '+' for column in columns]) + '\n' +
'\n'.join(' '.join(templates[f] % (tup[f] if f in tup.dtype.names else '=BLOB=')
for f in columns) for tup in tuples) +
('\n ...\n' if has_more else '\n') +
(' (Total: %d)\n' % len(rel) if config['display.show_tuple_count'] else '')) | python | def preview(self, limit=None, width=None):
"""
returns a preview of the contents of the query.
"""
heading = self.heading
rel = self.proj(*heading.non_blobs)
if limit is None:
limit = config['display.limit']
if width is None:
width = config['display.width']
tuples = rel.fetch(limit=limit+1, format="array")
has_more = len(tuples) > limit
tuples = tuples[:limit]
columns = heading.names
widths = {f: min(max([len(f)] +
[len(str(e)) for e in tuples[f]] if f in tuples.dtype.names else [len('=BLOB=')]) + 4, width) for f in columns}
templates = {f: '%%-%d.%ds' % (widths[f], widths[f]) for f in columns}
return (
' '.join([templates[f] % ('*' + f if f in rel.primary_key else f) for f in columns]) + '\n' +
' '.join(['+' + '-' * (widths[column] - 2) + '+' for column in columns]) + '\n' +
'\n'.join(' '.join(templates[f] % (tup[f] if f in tup.dtype.names else '=BLOB=')
for f in columns) for tup in tuples) +
('\n ...\n' if has_more else '\n') +
(' (Total: %d)\n' % len(rel) if config['display.show_tuple_count'] else '')) | [
"def",
"preview",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"width",
"=",
"None",
")",
":",
"heading",
"=",
"self",
".",
"heading",
"rel",
"=",
"self",
".",
"proj",
"(",
"*",
"heading",
".",
"non_blobs",
")",
"if",
"limit",
"is",
"None",
":",
"limit",
"=",
"config",
"[",
"'display.limit'",
"]",
"if",
"width",
"is",
"None",
":",
"width",
"=",
"config",
"[",
"'display.width'",
"]",
"tuples",
"=",
"rel",
".",
"fetch",
"(",
"limit",
"=",
"limit",
"+",
"1",
",",
"format",
"=",
"\"array\"",
")",
"has_more",
"=",
"len",
"(",
"tuples",
")",
">",
"limit",
"tuples",
"=",
"tuples",
"[",
":",
"limit",
"]",
"columns",
"=",
"heading",
".",
"names",
"widths",
"=",
"{",
"f",
":",
"min",
"(",
"max",
"(",
"[",
"len",
"(",
"f",
")",
"]",
"+",
"[",
"len",
"(",
"str",
"(",
"e",
")",
")",
"for",
"e",
"in",
"tuples",
"[",
"f",
"]",
"]",
"if",
"f",
"in",
"tuples",
".",
"dtype",
".",
"names",
"else",
"[",
"len",
"(",
"'=BLOB='",
")",
"]",
")",
"+",
"4",
",",
"width",
")",
"for",
"f",
"in",
"columns",
"}",
"templates",
"=",
"{",
"f",
":",
"'%%-%d.%ds'",
"%",
"(",
"widths",
"[",
"f",
"]",
",",
"widths",
"[",
"f",
"]",
")",
"for",
"f",
"in",
"columns",
"}",
"return",
"(",
"' '",
".",
"join",
"(",
"[",
"templates",
"[",
"f",
"]",
"%",
"(",
"'*'",
"+",
"f",
"if",
"f",
"in",
"rel",
".",
"primary_key",
"else",
"f",
")",
"for",
"f",
"in",
"columns",
"]",
")",
"+",
"'\\n'",
"+",
"' '",
".",
"join",
"(",
"[",
"'+'",
"+",
"'-'",
"*",
"(",
"widths",
"[",
"column",
"]",
"-",
"2",
")",
"+",
"'+'",
"for",
"column",
"in",
"columns",
"]",
")",
"+",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"' '",
".",
"join",
"(",
"templates",
"[",
"f",
"]",
"%",
"(",
"tup",
"[",
"f",
"]",
"if",
"f",
"in",
"tup",
".",
"dtype",
".",
"names",
"else",
"'=BLOB='",
")",
"for",
"f",
"in",
"columns",
")",
"for",
"tup",
"in",
"tuples",
")",
"+",
"(",
"'\\n ...\\n'",
"if",
"has_more",
"else",
"'\\n'",
")",
"+",
"(",
"' (Total: %d)\\n'",
"%",
"len",
"(",
"rel",
")",
"if",
"config",
"[",
"'display.show_tuple_count'",
"]",
"else",
"''",
")",
")"
] | returns a preview of the contents of the query. | [
"returns",
"a",
"preview",
"of",
"the",
"contents",
"of",
"the",
"query",
"."
] | train | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L382-L405 | 0.007223 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler._compile_batch_fluents | def _compile_batch_fluents(self,
fluents: List[Tuple[str, TensorFluent]],
batch_size: int) -> Sequence[tf.Tensor]:
'''Compiles `fluents` into tensors with given `batch_size`.
Returns:
Sequence[tf.Tensor]: A tuple of tensors with first dimension
corresponding to the batch size.
'''
batch_fluents = []
with self.graph.as_default():
for name, fluent in fluents:
name_scope = utils.identifier(name)
with tf.name_scope(name_scope):
t = tf.stack([fluent.tensor] * batch_size)
batch_fluents.append(t)
return tuple(batch_fluents) | python | def _compile_batch_fluents(self,
fluents: List[Tuple[str, TensorFluent]],
batch_size: int) -> Sequence[tf.Tensor]:
'''Compiles `fluents` into tensors with given `batch_size`.
Returns:
Sequence[tf.Tensor]: A tuple of tensors with first dimension
corresponding to the batch size.
'''
batch_fluents = []
with self.graph.as_default():
for name, fluent in fluents:
name_scope = utils.identifier(name)
with tf.name_scope(name_scope):
t = tf.stack([fluent.tensor] * batch_size)
batch_fluents.append(t)
return tuple(batch_fluents) | [
"def",
"_compile_batch_fluents",
"(",
"self",
",",
"fluents",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"TensorFluent",
"]",
"]",
",",
"batch_size",
":",
"int",
")",
"->",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
":",
"batch_fluents",
"=",
"[",
"]",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"for",
"name",
",",
"fluent",
"in",
"fluents",
":",
"name_scope",
"=",
"utils",
".",
"identifier",
"(",
"name",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name_scope",
")",
":",
"t",
"=",
"tf",
".",
"stack",
"(",
"[",
"fluent",
".",
"tensor",
"]",
"*",
"batch_size",
")",
"batch_fluents",
".",
"append",
"(",
"t",
")",
"return",
"tuple",
"(",
"batch_fluents",
")"
] | Compiles `fluents` into tensors with given `batch_size`.
Returns:
Sequence[tf.Tensor]: A tuple of tensors with first dimension
corresponding to the batch size. | [
"Compiles",
"fluents",
"into",
"tensors",
"with",
"given",
"batch_size",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L571-L587 | 0.004292 |
tornadoweb/tornado | tornado/web.py | RequestHandler.compute_etag | def compute_etag(self) -> Optional[str]:
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest() | python | def compute_etag(self) -> Optional[str]:
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest() | [
"def",
"compute_etag",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"hasher",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"for",
"part",
"in",
"self",
".",
"_write_buffer",
":",
"hasher",
".",
"update",
"(",
"part",
")",
"return",
"'\"%s\"'",
"%",
"hasher",
".",
"hexdigest",
"(",
")"
] | Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support. | [
"Computes",
"the",
"etag",
"header",
"to",
"be",
"used",
"for",
"this",
"request",
"."
] | train | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L1591-L1602 | 0.00431 |
tensorflow/cleverhans | examples/nips17_adversarial_competition/eval_infra/code/master.py | main | def main(args):
"""Main function which runs master."""
if args.blacklisted_submissions:
logging.warning('BLACKLISTED SUBMISSIONS: %s',
args.blacklisted_submissions)
if args.limited_dataset:
logging.info('Using limited dataset: 3 batches * 10 images')
max_dataset_num_images = 30
batch_size = 10
else:
logging.info('Using full dataset. Batch size: %d', DEFAULT_BATCH_SIZE)
max_dataset_num_images = None
batch_size = DEFAULT_BATCH_SIZE
random.seed()
print('\nRound: {0}\n'.format(args.round_name))
eval_master = EvaluationMaster(
storage_client=eval_lib.CompetitionStorageClient(
args.project_id, args.storage_bucket),
datastore_client=eval_lib.CompetitionDatastoreClient(
args.project_id, args.round_name),
round_name=args.round_name,
dataset_name=args.dataset_name,
blacklisted_submissions=args.blacklisted_submissions,
results_dir=args.results_dir,
num_defense_shards=args.num_defense_shards,
verbose=args.verbose,
batch_size=batch_size,
max_dataset_num_images=max_dataset_num_images)
if args.command == 'attack':
eval_master.prepare_attacks()
elif args.command == 'defense':
eval_master.prepare_defenses()
elif args.command == 'cleanup_defenses':
eval_master.cleanup_defenses()
elif args.command == 'results':
eval_master.compute_results()
elif args.command == 'status':
eval_master.show_status()
elif args.command == 'cleanup_datastore':
eval_master.cleanup_datastore()
elif args.command == 'cleanup_failed_attacks':
eval_master.cleanup_failed_attacks()
elif args.command == 'cleanup_attacks_with_zero_images':
eval_master.cleanup_attacks_with_zero_images()
else:
print('Invalid command: ', args.command)
print('')
print(USAGE) | python | def main(args):
"""Main function which runs master."""
if args.blacklisted_submissions:
logging.warning('BLACKLISTED SUBMISSIONS: %s',
args.blacklisted_submissions)
if args.limited_dataset:
logging.info('Using limited dataset: 3 batches * 10 images')
max_dataset_num_images = 30
batch_size = 10
else:
logging.info('Using full dataset. Batch size: %d', DEFAULT_BATCH_SIZE)
max_dataset_num_images = None
batch_size = DEFAULT_BATCH_SIZE
random.seed()
print('\nRound: {0}\n'.format(args.round_name))
eval_master = EvaluationMaster(
storage_client=eval_lib.CompetitionStorageClient(
args.project_id, args.storage_bucket),
datastore_client=eval_lib.CompetitionDatastoreClient(
args.project_id, args.round_name),
round_name=args.round_name,
dataset_name=args.dataset_name,
blacklisted_submissions=args.blacklisted_submissions,
results_dir=args.results_dir,
num_defense_shards=args.num_defense_shards,
verbose=args.verbose,
batch_size=batch_size,
max_dataset_num_images=max_dataset_num_images)
if args.command == 'attack':
eval_master.prepare_attacks()
elif args.command == 'defense':
eval_master.prepare_defenses()
elif args.command == 'cleanup_defenses':
eval_master.cleanup_defenses()
elif args.command == 'results':
eval_master.compute_results()
elif args.command == 'status':
eval_master.show_status()
elif args.command == 'cleanup_datastore':
eval_master.cleanup_datastore()
elif args.command == 'cleanup_failed_attacks':
eval_master.cleanup_failed_attacks()
elif args.command == 'cleanup_attacks_with_zero_images':
eval_master.cleanup_attacks_with_zero_images()
else:
print('Invalid command: ', args.command)
print('')
print(USAGE) | [
"def",
"main",
"(",
"args",
")",
":",
"if",
"args",
".",
"blacklisted_submissions",
":",
"logging",
".",
"warning",
"(",
"'BLACKLISTED SUBMISSIONS: %s'",
",",
"args",
".",
"blacklisted_submissions",
")",
"if",
"args",
".",
"limited_dataset",
":",
"logging",
".",
"info",
"(",
"'Using limited dataset: 3 batches * 10 images'",
")",
"max_dataset_num_images",
"=",
"30",
"batch_size",
"=",
"10",
"else",
":",
"logging",
".",
"info",
"(",
"'Using full dataset. Batch size: %d'",
",",
"DEFAULT_BATCH_SIZE",
")",
"max_dataset_num_images",
"=",
"None",
"batch_size",
"=",
"DEFAULT_BATCH_SIZE",
"random",
".",
"seed",
"(",
")",
"print",
"(",
"'\\nRound: {0}\\n'",
".",
"format",
"(",
"args",
".",
"round_name",
")",
")",
"eval_master",
"=",
"EvaluationMaster",
"(",
"storage_client",
"=",
"eval_lib",
".",
"CompetitionStorageClient",
"(",
"args",
".",
"project_id",
",",
"args",
".",
"storage_bucket",
")",
",",
"datastore_client",
"=",
"eval_lib",
".",
"CompetitionDatastoreClient",
"(",
"args",
".",
"project_id",
",",
"args",
".",
"round_name",
")",
",",
"round_name",
"=",
"args",
".",
"round_name",
",",
"dataset_name",
"=",
"args",
".",
"dataset_name",
",",
"blacklisted_submissions",
"=",
"args",
".",
"blacklisted_submissions",
",",
"results_dir",
"=",
"args",
".",
"results_dir",
",",
"num_defense_shards",
"=",
"args",
".",
"num_defense_shards",
",",
"verbose",
"=",
"args",
".",
"verbose",
",",
"batch_size",
"=",
"batch_size",
",",
"max_dataset_num_images",
"=",
"max_dataset_num_images",
")",
"if",
"args",
".",
"command",
"==",
"'attack'",
":",
"eval_master",
".",
"prepare_attacks",
"(",
")",
"elif",
"args",
".",
"command",
"==",
"'defense'",
":",
"eval_master",
".",
"prepare_defenses",
"(",
")",
"elif",
"args",
".",
"command",
"==",
"'cleanup_defenses'",
":",
"eval_master",
".",
"cleanup_defenses",
"(",
")",
"elif",
"args",
".",
"command",
"==",
"'results'",
":",
"eval_master",
".",
"compute_results",
"(",
")",
"elif",
"args",
".",
"command",
"==",
"'status'",
":",
"eval_master",
".",
"show_status",
"(",
")",
"elif",
"args",
".",
"command",
"==",
"'cleanup_datastore'",
":",
"eval_master",
".",
"cleanup_datastore",
"(",
")",
"elif",
"args",
".",
"command",
"==",
"'cleanup_failed_attacks'",
":",
"eval_master",
".",
"cleanup_failed_attacks",
"(",
")",
"elif",
"args",
".",
"command",
"==",
"'cleanup_attacks_with_zero_images'",
":",
"eval_master",
".",
"cleanup_attacks_with_zero_images",
"(",
")",
"else",
":",
"print",
"(",
"'Invalid command: '",
",",
"args",
".",
"command",
")",
"print",
"(",
"''",
")",
"print",
"(",
"USAGE",
")"
] | Main function which runs master. | [
"Main",
"function",
"which",
"runs",
"master",
"."
] | train | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/master.py#L688-L735 | 0.009269 |
CityOfZion/neo-python | neo/Implementations/Notifications/LevelDB/NotificationDB.py | NotificationDB.start | def start(self):
"""
Handle EventHub events for SmartContract decorators
"""
self._events_to_write = []
self._new_contracts_to_write = []
@events.on(SmartContractEvent.CONTRACT_CREATED)
@events.on(SmartContractEvent.CONTRACT_MIGRATED)
def call_on_success_event(sc_event: SmartContractEvent):
self.on_smart_contract_created(sc_event)
@events.on(SmartContractEvent.RUNTIME_NOTIFY)
def call_on_event(sc_event: NotifyEvent):
self.on_smart_contract_event(sc_event)
Blockchain.Default().PersistCompleted.on_change += self.on_persist_completed | python | def start(self):
"""
Handle EventHub events for SmartContract decorators
"""
self._events_to_write = []
self._new_contracts_to_write = []
@events.on(SmartContractEvent.CONTRACT_CREATED)
@events.on(SmartContractEvent.CONTRACT_MIGRATED)
def call_on_success_event(sc_event: SmartContractEvent):
self.on_smart_contract_created(sc_event)
@events.on(SmartContractEvent.RUNTIME_NOTIFY)
def call_on_event(sc_event: NotifyEvent):
self.on_smart_contract_event(sc_event)
Blockchain.Default().PersistCompleted.on_change += self.on_persist_completed | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"_events_to_write",
"=",
"[",
"]",
"self",
".",
"_new_contracts_to_write",
"=",
"[",
"]",
"@",
"events",
".",
"on",
"(",
"SmartContractEvent",
".",
"CONTRACT_CREATED",
")",
"@",
"events",
".",
"on",
"(",
"SmartContractEvent",
".",
"CONTRACT_MIGRATED",
")",
"def",
"call_on_success_event",
"(",
"sc_event",
":",
"SmartContractEvent",
")",
":",
"self",
".",
"on_smart_contract_created",
"(",
"sc_event",
")",
"@",
"events",
".",
"on",
"(",
"SmartContractEvent",
".",
"RUNTIME_NOTIFY",
")",
"def",
"call_on_event",
"(",
"sc_event",
":",
"NotifyEvent",
")",
":",
"self",
".",
"on_smart_contract_event",
"(",
"sc_event",
")",
"Blockchain",
".",
"Default",
"(",
")",
".",
"PersistCompleted",
".",
"on_change",
"+=",
"self",
".",
"on_persist_completed"
] | Handle EventHub events for SmartContract decorators | [
"Handle",
"EventHub",
"events",
"for",
"SmartContract",
"decorators"
] | train | https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Implementations/Notifications/LevelDB/NotificationDB.py#L78-L94 | 0.004608 |
apache/incubator-mxnet | python/mxnet/executor_manager.py | _check_arguments | def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name) | python | def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name) | [
"def",
"_check_arguments",
"(",
"symbol",
")",
":",
"arg_set",
"=",
"set",
"(",
")",
"arg_names",
"=",
"symbol",
".",
"list_arguments",
"(",
")",
"for",
"name",
"in",
"arg_names",
":",
"if",
"name",
"in",
"arg_set",
":",
"raise",
"ValueError",
"(",
"(",
"'Find duplicated argument name \\\"%s\\\", '",
"+",
"'please make the weight name non-duplicated(using name arguments), '",
"+",
"'arguments are %s'",
")",
"%",
"(",
"name",
",",
"str",
"(",
"arg_names",
")",
")",
")",
"arg_set",
".",
"add",
"(",
"name",
")",
"aux_set",
"=",
"set",
"(",
")",
"aux_names",
"=",
"symbol",
".",
"list_auxiliary_states",
"(",
")",
"for",
"name",
"in",
"aux_names",
":",
"if",
"name",
"in",
"aux_set",
":",
"raise",
"ValueError",
"(",
"(",
"'Find duplicated auxiliary param name \\\"%s\\\", '",
"+",
"'please make the weight name non-duplicated(using name arguments), '",
"+",
"'arguments are %s, auxiliary params are %s'",
")",
"%",
"(",
"name",
",",
"str",
"(",
"arg_names",
")",
",",
"str",
"(",
"aux_names",
")",
")",
")",
"aux_set",
".",
"add",
"(",
"name",
")"
] | Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration. | [
"Check",
"the",
"argument",
"names",
"of",
"symbol",
".",
"This",
"function",
"checks",
"the",
"duplication",
"of",
"arguments",
"in",
"Symbol",
".",
"The",
"check",
"is",
"done",
"for",
"feedforward",
"net",
"for",
"now",
"."
] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L68-L96 | 0.003546 |
aiortc/aioice | aioice/ice.py | Connection._find_pair | def _find_pair(self, protocol, remote_candidate):
"""
Find a candidate pair in the check list.
"""
for pair in self._check_list:
if (pair.protocol == protocol and pair.remote_candidate == remote_candidate):
return pair
return None | python | def _find_pair(self, protocol, remote_candidate):
"""
Find a candidate pair in the check list.
"""
for pair in self._check_list:
if (pair.protocol == protocol and pair.remote_candidate == remote_candidate):
return pair
return None | [
"def",
"_find_pair",
"(",
"self",
",",
"protocol",
",",
"remote_candidate",
")",
":",
"for",
"pair",
"in",
"self",
".",
"_check_list",
":",
"if",
"(",
"pair",
".",
"protocol",
"==",
"protocol",
"and",
"pair",
".",
"remote_candidate",
"==",
"remote_candidate",
")",
":",
"return",
"pair",
"return",
"None"
] | Find a candidate pair in the check list. | [
"Find",
"a",
"candidate",
"pair",
"in",
"the",
"check",
"list",
"."
] | train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L663-L670 | 0.010067 |
aiogram/aiogram | aiogram/bot/bot.py | Bot.set_chat_sticker_set | async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String],
sticker_set_name: base.String) -> base.Boolean:
"""
Use this method to set a new group sticker set for a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Use the field can_set_sticker_set optionally returned in getChat requests to check
if the bot can use this method.
Source: https://core.telegram.org/bots/api#setchatstickerset
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param sticker_set_name: Name of the sticker set to be set as the group sticker set
:type sticker_set_name: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload)
return result | python | async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String],
sticker_set_name: base.String) -> base.Boolean:
"""
Use this method to set a new group sticker set for a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Use the field can_set_sticker_set optionally returned in getChat requests to check
if the bot can use this method.
Source: https://core.telegram.org/bots/api#setchatstickerset
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param sticker_set_name: Name of the sticker set to be set as the group sticker set
:type sticker_set_name: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload)
return result | [
"async",
"def",
"set_chat_sticker_set",
"(",
"self",
",",
"chat_id",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"Integer",
",",
"base",
".",
"String",
"]",
",",
"sticker_set_name",
":",
"base",
".",
"String",
")",
"->",
"base",
".",
"Boolean",
":",
"payload",
"=",
"generate_payload",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"result",
"=",
"await",
"self",
".",
"request",
"(",
"api",
".",
"Methods",
".",
"SET_CHAT_STICKER_SET",
",",
"payload",
")",
"return",
"result"
] | Use this method to set a new group sticker set for a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Use the field can_set_sticker_set optionally returned in getChat requests to check
if the bot can use this method.
Source: https://core.telegram.org/bots/api#setchatstickerset
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param sticker_set_name: Name of the sticker set to be set as the group sticker set
:type sticker_set_name: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean` | [
"Use",
"this",
"method",
"to",
"set",
"a",
"new",
"group",
"sticker",
"set",
"for",
"a",
"supergroup",
".",
"The",
"bot",
"must",
"be",
"an",
"administrator",
"in",
"the",
"chat",
"for",
"this",
"to",
"work",
"and",
"must",
"have",
"the",
"appropriate",
"admin",
"rights",
"."
] | train | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L1325-L1346 | 0.008029 |
jaijuneja/PyTLDR | pytldr/summarize/textrank.py | TextRankSummarizer.summarize | def summarize(self, text, length=5, weighting='frequency', norm=None):
"""
Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
# Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized
# by the length of their associated sentences (such that each vector of sentence terms sums to 1).
word_matrix = self._compute_matrix(sentences, weighting=weighting, norm=norm)
# Build the similarity graph by calculating the number of overlapping words between all
# combinations of sentences.
similarity_matrix = (word_matrix * word_matrix.T)
similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(
((score, ndx) for ndx, score in scores.items()), reverse=True
)
top_sentences = [ranked_sentences[i][1] for i in range(length)]
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | python | def summarize(self, text, length=5, weighting='frequency', norm=None):
"""
Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
# Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized
# by the length of their associated sentences (such that each vector of sentence terms sums to 1).
word_matrix = self._compute_matrix(sentences, weighting=weighting, norm=norm)
# Build the similarity graph by calculating the number of overlapping words between all
# combinations of sentences.
similarity_matrix = (word_matrix * word_matrix.T)
similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(
((score, ndx) for ndx, score in scores.items()), reverse=True
)
top_sentences = [ranked_sentences[i][1] for i in range(length)]
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | [
"def",
"summarize",
"(",
"self",
",",
"text",
",",
"length",
"=",
"5",
",",
"weighting",
"=",
"'frequency'",
",",
"norm",
"=",
"None",
")",
":",
"text",
"=",
"self",
".",
"_parse_input",
"(",
"text",
")",
"sentences",
",",
"unprocessed_sentences",
"=",
"self",
".",
"_tokenizer",
".",
"tokenize_sentences",
"(",
"text",
")",
"length",
"=",
"self",
".",
"_parse_summary_length",
"(",
"length",
",",
"len",
"(",
"sentences",
")",
")",
"if",
"length",
"==",
"len",
"(",
"sentences",
")",
":",
"return",
"unprocessed_sentences",
"# Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized",
"# by the length of their associated sentences (such that each vector of sentence terms sums to 1).",
"word_matrix",
"=",
"self",
".",
"_compute_matrix",
"(",
"sentences",
",",
"weighting",
"=",
"weighting",
",",
"norm",
"=",
"norm",
")",
"# Build the similarity graph by calculating the number of overlapping words between all",
"# combinations of sentences.",
"similarity_matrix",
"=",
"(",
"word_matrix",
"*",
"word_matrix",
".",
"T",
")",
"similarity_graph",
"=",
"networkx",
".",
"from_scipy_sparse_matrix",
"(",
"similarity_matrix",
")",
"scores",
"=",
"networkx",
".",
"pagerank",
"(",
"similarity_graph",
")",
"ranked_sentences",
"=",
"sorted",
"(",
"(",
"(",
"score",
",",
"ndx",
")",
"for",
"ndx",
",",
"score",
"in",
"scores",
".",
"items",
"(",
")",
")",
",",
"reverse",
"=",
"True",
")",
"top_sentences",
"=",
"[",
"ranked_sentences",
"[",
"i",
"]",
"[",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"length",
")",
"]",
"top_sentences",
".",
"sort",
"(",
")",
"return",
"[",
"unprocessed_sentences",
"[",
"i",
"]",
"for",
"i",
"in",
"top_sentences",
"]"
] | Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary | [
"Implements",
"the",
"TextRank",
"summarization",
"algorithm",
"which",
"follows",
"closely",
"to",
"the",
"PageRank",
"algorithm",
"for",
"ranking",
"web",
"pages",
"."
] | train | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/summarize/textrank.py#L9-L49 | 0.00597 |
cocaine/cocaine-tools | cocaine/tools/dispatch.py | unicorn_edit | def unicorn_edit(path, **kwargs):
"""Edit Unicorn node interactively.
"""
ctx = Context(**kwargs)
ctx.timeout = None
ctx.execute_action('unicorn:edit', **{
'unicorn': ctx.repo.create_secure_service('unicorn'),
'path': path,
}) | python | def unicorn_edit(path, **kwargs):
"""Edit Unicorn node interactively.
"""
ctx = Context(**kwargs)
ctx.timeout = None
ctx.execute_action('unicorn:edit', **{
'unicorn': ctx.repo.create_secure_service('unicorn'),
'path': path,
}) | [
"def",
"unicorn_edit",
"(",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"Context",
"(",
"*",
"*",
"kwargs",
")",
"ctx",
".",
"timeout",
"=",
"None",
"ctx",
".",
"execute_action",
"(",
"'unicorn:edit'",
",",
"*",
"*",
"{",
"'unicorn'",
":",
"ctx",
".",
"repo",
".",
"create_secure_service",
"(",
"'unicorn'",
")",
",",
"'path'",
":",
"path",
",",
"}",
")"
] | Edit Unicorn node interactively. | [
"Edit",
"Unicorn",
"node",
"interactively",
"."
] | train | https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1472-L1480 | 0.003759 |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/feed/feed_client.py | FeedClient.set_global_permissions | def set_global_permissions(self, global_permissions):
"""SetGlobalPermissions.
[Preview API] Set service-wide permissions that govern feed creation.
:param [GlobalPermission] global_permissions: New permissions for the organization.
:rtype: [GlobalPermission]
"""
content = self._serialize.body(global_permissions, '[GlobalPermission]')
response = self._send(http_method='PATCH',
location_id='a74419ef-b477-43df-8758-3cd1cd5f56c6',
version='5.0-preview.1',
content=content)
return self._deserialize('[GlobalPermission]', self._unwrap_collection(response)) | python | def set_global_permissions(self, global_permissions):
"""SetGlobalPermissions.
[Preview API] Set service-wide permissions that govern feed creation.
:param [GlobalPermission] global_permissions: New permissions for the organization.
:rtype: [GlobalPermission]
"""
content = self._serialize.body(global_permissions, '[GlobalPermission]')
response = self._send(http_method='PATCH',
location_id='a74419ef-b477-43df-8758-3cd1cd5f56c6',
version='5.0-preview.1',
content=content)
return self._deserialize('[GlobalPermission]', self._unwrap_collection(response)) | [
"def",
"set_global_permissions",
"(",
"self",
",",
"global_permissions",
")",
":",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"global_permissions",
",",
"'[GlobalPermission]'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'PATCH'",
",",
"location_id",
"=",
"'a74419ef-b477-43df-8758-3cd1cd5f56c6'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[GlobalPermission]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] | SetGlobalPermissions.
[Preview API] Set service-wide permissions that govern feed creation.
:param [GlobalPermission] global_permissions: New permissions for the organization.
:rtype: [GlobalPermission] | [
"SetGlobalPermissions",
".",
"[",
"Preview",
"API",
"]",
"Set",
"service",
"-",
"wide",
"permissions",
"that",
"govern",
"feed",
"creation",
".",
":",
"param",
"[",
"GlobalPermission",
"]",
"global_permissions",
":",
"New",
"permissions",
"for",
"the",
"organization",
".",
":",
"rtype",
":",
"[",
"GlobalPermission",
"]"
] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/feed/feed_client.py#L174-L185 | 0.008463 |
Chilipp/psy-simple | psy_simple/widgets/texts.py | FontPropertiesWidget.refresh | def refresh(self):
"""Refresh the widgets from the current font"""
font = self.current_font
# refresh btn_bold
self.btn_bold.blockSignals(True)
self.btn_bold.setChecked(font.weight() > 50)
self.btn_bold.blockSignals(False)
# refresh btn_italic
self.btn_italic.blockSignals(True)
self.btn_italic.setChecked(font.italic())
self.btn_italic.blockSignals(False)
# refresh font size
self.spin_box.blockSignals(True)
self.spin_box.setValue(font.pointSize())
self.spin_box.blockSignals(False) | python | def refresh(self):
"""Refresh the widgets from the current font"""
font = self.current_font
# refresh btn_bold
self.btn_bold.blockSignals(True)
self.btn_bold.setChecked(font.weight() > 50)
self.btn_bold.blockSignals(False)
# refresh btn_italic
self.btn_italic.blockSignals(True)
self.btn_italic.setChecked(font.italic())
self.btn_italic.blockSignals(False)
# refresh font size
self.spin_box.blockSignals(True)
self.spin_box.setValue(font.pointSize())
self.spin_box.blockSignals(False) | [
"def",
"refresh",
"(",
"self",
")",
":",
"font",
"=",
"self",
".",
"current_font",
"# refresh btn_bold",
"self",
".",
"btn_bold",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"btn_bold",
".",
"setChecked",
"(",
"font",
".",
"weight",
"(",
")",
">",
"50",
")",
"self",
".",
"btn_bold",
".",
"blockSignals",
"(",
"False",
")",
"# refresh btn_italic",
"self",
".",
"btn_italic",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"btn_italic",
".",
"setChecked",
"(",
"font",
".",
"italic",
"(",
")",
")",
"self",
".",
"btn_italic",
".",
"blockSignals",
"(",
"False",
")",
"# refresh font size",
"self",
".",
"spin_box",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"spin_box",
".",
"setValue",
"(",
"font",
".",
"pointSize",
"(",
")",
")",
"self",
".",
"spin_box",
".",
"blockSignals",
"(",
"False",
")"
] | Refresh the widgets from the current font | [
"Refresh",
"the",
"widgets",
"from",
"the",
"current",
"font"
] | train | https://github.com/Chilipp/psy-simple/blob/7d916406a6d3c3c27c0b7102f98fef07a4da0a61/psy_simple/widgets/texts.py#L420-L437 | 0.003339 |
SiLab-Bonn/pyBAR | pybar/analysis/analysis.py | histogram_cluster_table | def histogram_cluster_table(analyzed_data_file, output_file, chunk_size=10000000):
'''Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array.
The 3rd dimension of the occupancy array is the number of different scan parameters used
Parameters
----------
analyzed_data_file : string
HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step.
Returns
-------
occupancy_array: numpy.array with dimensions (col, row, #scan_parameter)
'''
with tb.open_file(analyzed_data_file, mode="r") as in_file_h5:
with tb.open_file(output_file, mode="w") as out_file_h5:
histogram = PyDataHistograming()
histogram.create_occupancy_hist(True)
scan_parameters = None
event_number_indices = None
scan_parameter_indices = None
try:
meta_data = in_file_h5.root.meta_data[:]
scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data)
if scan_parameters is not None:
scan_parameter_indices = np.array(range(0, len(scan_parameters)), dtype='u4')
event_number_indices = np.ascontiguousarray(scan_parameters['event_number']).astype(np.uint64)
histogram.add_meta_event_index(event_number_indices, array_length=len(scan_parameters['event_number']))
histogram.add_scan_parameter(scan_parameter_indices)
logging.info("Add %d different scan parameter(s) for analysis", len(scan_parameters))
else:
logging.info("No scan parameter data provided")
histogram.set_no_scan_parameter()
except tb.exceptions.NoSuchNodeError:
logging.info("No meta data provided, use no scan parameter")
histogram.set_no_scan_parameter()
logging.info('Histogram cluster seeds...')
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
total_cluster = 0 # to check analysis
for cluster, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, chunk_size=chunk_size):
total_cluster += len(cluster)
histogram.add_cluster_seed_hits(cluster, len(cluster))
progress_bar.update(index)
progress_bar.finish()
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data
occupancy_array = histogram.get_occupancy().T
occupancy_array_table = out_file_h5.create_carray(out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(occupancy_array.dtype), shape=occupancy_array.shape, filters=filter_table)
occupancy_array_table[:] = occupancy_array
if total_cluster != np.sum(occupancy_array):
logging.warning('Analysis shows inconsistent number of cluster used. Check needed!')
in_file_h5.root.meta_data.copy(out_file_h5.root) | python | def histogram_cluster_table(analyzed_data_file, output_file, chunk_size=10000000):
'''Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array.
The 3rd dimension of the occupancy array is the number of different scan parameters used
Parameters
----------
analyzed_data_file : string
HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step.
Returns
-------
occupancy_array: numpy.array with dimensions (col, row, #scan_parameter)
'''
with tb.open_file(analyzed_data_file, mode="r") as in_file_h5:
with tb.open_file(output_file, mode="w") as out_file_h5:
histogram = PyDataHistograming()
histogram.create_occupancy_hist(True)
scan_parameters = None
event_number_indices = None
scan_parameter_indices = None
try:
meta_data = in_file_h5.root.meta_data[:]
scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data)
if scan_parameters is not None:
scan_parameter_indices = np.array(range(0, len(scan_parameters)), dtype='u4')
event_number_indices = np.ascontiguousarray(scan_parameters['event_number']).astype(np.uint64)
histogram.add_meta_event_index(event_number_indices, array_length=len(scan_parameters['event_number']))
histogram.add_scan_parameter(scan_parameter_indices)
logging.info("Add %d different scan parameter(s) for analysis", len(scan_parameters))
else:
logging.info("No scan parameter data provided")
histogram.set_no_scan_parameter()
except tb.exceptions.NoSuchNodeError:
logging.info("No meta data provided, use no scan parameter")
histogram.set_no_scan_parameter()
logging.info('Histogram cluster seeds...')
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
total_cluster = 0 # to check analysis
for cluster, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, chunk_size=chunk_size):
total_cluster += len(cluster)
histogram.add_cluster_seed_hits(cluster, len(cluster))
progress_bar.update(index)
progress_bar.finish()
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data
occupancy_array = histogram.get_occupancy().T
occupancy_array_table = out_file_h5.create_carray(out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(occupancy_array.dtype), shape=occupancy_array.shape, filters=filter_table)
occupancy_array_table[:] = occupancy_array
if total_cluster != np.sum(occupancy_array):
logging.warning('Analysis shows inconsistent number of cluster used. Check needed!')
in_file_h5.root.meta_data.copy(out_file_h5.root) | [
"def",
"histogram_cluster_table",
"(",
"analyzed_data_file",
",",
"output_file",
",",
"chunk_size",
"=",
"10000000",
")",
":",
"with",
"tb",
".",
"open_file",
"(",
"analyzed_data_file",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"in_file_h5",
":",
"with",
"tb",
".",
"open_file",
"(",
"output_file",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"out_file_h5",
":",
"histogram",
"=",
"PyDataHistograming",
"(",
")",
"histogram",
".",
"create_occupancy_hist",
"(",
"True",
")",
"scan_parameters",
"=",
"None",
"event_number_indices",
"=",
"None",
"scan_parameter_indices",
"=",
"None",
"try",
":",
"meta_data",
"=",
"in_file_h5",
".",
"root",
".",
"meta_data",
"[",
":",
"]",
"scan_parameters",
"=",
"analysis_utils",
".",
"get_unique_scan_parameter_combinations",
"(",
"meta_data",
")",
"if",
"scan_parameters",
"is",
"not",
"None",
":",
"scan_parameter_indices",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"scan_parameters",
")",
")",
",",
"dtype",
"=",
"'u4'",
")",
"event_number_indices",
"=",
"np",
".",
"ascontiguousarray",
"(",
"scan_parameters",
"[",
"'event_number'",
"]",
")",
".",
"astype",
"(",
"np",
".",
"uint64",
")",
"histogram",
".",
"add_meta_event_index",
"(",
"event_number_indices",
",",
"array_length",
"=",
"len",
"(",
"scan_parameters",
"[",
"'event_number'",
"]",
")",
")",
"histogram",
".",
"add_scan_parameter",
"(",
"scan_parameter_indices",
")",
"logging",
".",
"info",
"(",
"\"Add %d different scan parameter(s) for analysis\"",
",",
"len",
"(",
"scan_parameters",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"No scan parameter data provided\"",
")",
"histogram",
".",
"set_no_scan_parameter",
"(",
")",
"except",
"tb",
".",
"exceptions",
".",
"NoSuchNodeError",
":",
"logging",
".",
"info",
"(",
"\"No meta data provided, use no scan parameter\"",
")",
"histogram",
".",
"set_no_scan_parameter",
"(",
")",
"logging",
".",
"info",
"(",
"'Histogram cluster seeds...'",
")",
"progress_bar",
"=",
"progressbar",
".",
"ProgressBar",
"(",
"widgets",
"=",
"[",
"''",
",",
"progressbar",
".",
"Percentage",
"(",
")",
",",
"' '",
",",
"progressbar",
".",
"Bar",
"(",
"marker",
"=",
"'*'",
",",
"left",
"=",
"'|'",
",",
"right",
"=",
"'|'",
")",
",",
"' '",
",",
"progressbar",
".",
"AdaptiveETA",
"(",
")",
"]",
",",
"maxval",
"=",
"in_file_h5",
".",
"root",
".",
"Cluster",
".",
"shape",
"[",
"0",
"]",
",",
"term_width",
"=",
"80",
")",
"progress_bar",
".",
"start",
"(",
")",
"total_cluster",
"=",
"0",
"# to check analysis",
"for",
"cluster",
",",
"index",
"in",
"analysis_utils",
".",
"data_aligned_at_events",
"(",
"in_file_h5",
".",
"root",
".",
"Cluster",
",",
"chunk_size",
"=",
"chunk_size",
")",
":",
"total_cluster",
"+=",
"len",
"(",
"cluster",
")",
"histogram",
".",
"add_cluster_seed_hits",
"(",
"cluster",
",",
"len",
"(",
"cluster",
")",
")",
"progress_bar",
".",
"update",
"(",
"index",
")",
"progress_bar",
".",
"finish",
"(",
")",
"filter_table",
"=",
"tb",
".",
"Filters",
"(",
"complib",
"=",
"'blosc'",
",",
"complevel",
"=",
"5",
",",
"fletcher32",
"=",
"False",
")",
"# compression of the written data",
"occupancy_array",
"=",
"histogram",
".",
"get_occupancy",
"(",
")",
".",
"T",
"occupancy_array_table",
"=",
"out_file_h5",
".",
"create_carray",
"(",
"out_file_h5",
".",
"root",
",",
"name",
"=",
"'HistOcc'",
",",
"title",
"=",
"'Occupancy Histogram'",
",",
"atom",
"=",
"tb",
".",
"Atom",
".",
"from_dtype",
"(",
"occupancy_array",
".",
"dtype",
")",
",",
"shape",
"=",
"occupancy_array",
".",
"shape",
",",
"filters",
"=",
"filter_table",
")",
"occupancy_array_table",
"[",
":",
"]",
"=",
"occupancy_array",
"if",
"total_cluster",
"!=",
"np",
".",
"sum",
"(",
"occupancy_array",
")",
":",
"logging",
".",
"warning",
"(",
"'Analysis shows inconsistent number of cluster used. Check needed!'",
")",
"in_file_h5",
".",
"root",
".",
"meta_data",
".",
"copy",
"(",
"out_file_h5",
".",
"root",
")"
] | Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array.
The 3rd dimension of the occupancy array is the number of different scan parameters used
Parameters
----------
analyzed_data_file : string
HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step.
Returns
-------
occupancy_array: numpy.array with dimensions (col, row, #scan_parameter) | [
"Reads",
"in",
"the",
"cluster",
"info",
"table",
"in",
"chunks",
"and",
"histograms",
"the",
"seed",
"pixels",
"into",
"one",
"occupancy",
"array",
".",
"The",
"3rd",
"dimension",
"of",
"the",
"occupancy",
"array",
"is",
"the",
"number",
"of",
"different",
"scan",
"parameters",
"used"
] | train | https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L428-L482 | 0.004416 |
tensorflow/tensor2tensor | tensor2tensor/layers/common_layers.py | shift_right | def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets | python | def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets | [
"def",
"shift_right",
"(",
"x",
",",
"pad_value",
"=",
"None",
")",
":",
"if",
"pad_value",
"is",
"None",
":",
"shifted_targets",
"=",
"tf",
".",
"pad",
"(",
"x",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"[",
":",
",",
":",
"-",
"1",
",",
":",
",",
":",
"]",
"else",
":",
"shifted_targets",
"=",
"tf",
".",
"concat",
"(",
"[",
"pad_value",
",",
"x",
"]",
",",
"axis",
"=",
"1",
")",
"[",
":",
",",
":",
"-",
"1",
",",
":",
",",
":",
"]",
"return",
"shifted_targets"
] | Shift the second dimension of x right by one. | [
"Shift",
"the",
"second",
"dimension",
"of",
"x",
"right",
"by",
"one",
"."
] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L390-L396 | 0.016892 |
veripress/veripress | veripress/model/storages.py | FileStorage.fix_page_relative_url | def fix_page_relative_url(rel_url):
"""
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not)
"""
rel_url = rel_url.lstrip('/') # trim all heading '/'
endswith_slash = rel_url.endswith('/')
rel_url = rel_url.rstrip('/') + (
'/' if endswith_slash else '') # preserve only one trailing '/'
if not rel_url or rel_url == '/':
return None, False
file_path = os.path.join(current_app.instance_path, 'pages',
rel_url.replace('/', os.path.sep))
if rel_url.endswith('/'):
index_html_file_path = os.path.join(file_path, 'index.html')
if os.path.isfile(index_html_file_path):
# index.html exists
return index_html_file_path, True
return rel_url, False
elif os.path.isfile(file_path):
ext = os.path.splitext(file_path)[1][1:]
if get_standard_format_name(ext) is not None:
# is source of custom page
if current_app.config['PAGE_SOURCE_ACCESSIBLE']:
return file_path, True
else:
# is other direct files
return file_path, True
elif os.path.isdir(file_path):
return rel_url + '/', False
sp = rel_url.rsplit('/', 1)
m = re.match(r'(.+)\.html?', sp[-1])
if m:
sp[-1] = m.group(1) + '.html'
else:
sp[-1] += '.html'
return '/'.join(sp), False | python | def fix_page_relative_url(rel_url):
"""
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not)
"""
rel_url = rel_url.lstrip('/') # trim all heading '/'
endswith_slash = rel_url.endswith('/')
rel_url = rel_url.rstrip('/') + (
'/' if endswith_slash else '') # preserve only one trailing '/'
if not rel_url or rel_url == '/':
return None, False
file_path = os.path.join(current_app.instance_path, 'pages',
rel_url.replace('/', os.path.sep))
if rel_url.endswith('/'):
index_html_file_path = os.path.join(file_path, 'index.html')
if os.path.isfile(index_html_file_path):
# index.html exists
return index_html_file_path, True
return rel_url, False
elif os.path.isfile(file_path):
ext = os.path.splitext(file_path)[1][1:]
if get_standard_format_name(ext) is not None:
# is source of custom page
if current_app.config['PAGE_SOURCE_ACCESSIBLE']:
return file_path, True
else:
# is other direct files
return file_path, True
elif os.path.isdir(file_path):
return rel_url + '/', False
sp = rel_url.rsplit('/', 1)
m = re.match(r'(.+)\.html?', sp[-1])
if m:
sp[-1] = m.group(1) + '.html'
else:
sp[-1] += '.html'
return '/'.join(sp), False | [
"def",
"fix_page_relative_url",
"(",
"rel_url",
")",
":",
"rel_url",
"=",
"rel_url",
".",
"lstrip",
"(",
"'/'",
")",
"# trim all heading '/'",
"endswith_slash",
"=",
"rel_url",
".",
"endswith",
"(",
"'/'",
")",
"rel_url",
"=",
"rel_url",
".",
"rstrip",
"(",
"'/'",
")",
"+",
"(",
"'/'",
"if",
"endswith_slash",
"else",
"''",
")",
"# preserve only one trailing '/'",
"if",
"not",
"rel_url",
"or",
"rel_url",
"==",
"'/'",
":",
"return",
"None",
",",
"False",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"instance_path",
",",
"'pages'",
",",
"rel_url",
".",
"replace",
"(",
"'/'",
",",
"os",
".",
"path",
".",
"sep",
")",
")",
"if",
"rel_url",
".",
"endswith",
"(",
"'/'",
")",
":",
"index_html_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"file_path",
",",
"'index.html'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"index_html_file_path",
")",
":",
"# index.html exists",
"return",
"index_html_file_path",
",",
"True",
"return",
"rel_url",
",",
"False",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_path",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"if",
"get_standard_format_name",
"(",
"ext",
")",
"is",
"not",
"None",
":",
"# is source of custom page",
"if",
"current_app",
".",
"config",
"[",
"'PAGE_SOURCE_ACCESSIBLE'",
"]",
":",
"return",
"file_path",
",",
"True",
"else",
":",
"# is other direct files",
"return",
"file_path",
",",
"True",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"file_path",
")",
":",
"return",
"rel_url",
"+",
"'/'",
",",
"False",
"sp",
"=",
"rel_url",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"m",
"=",
"re",
".",
"match",
"(",
"r'(.+)\\.html?'",
",",
"sp",
"[",
"-",
"1",
"]",
")",
"if",
"m",
":",
"sp",
"[",
"-",
"1",
"]",
"=",
"m",
".",
"group",
"(",
"1",
")",
"+",
"'.html'",
"else",
":",
"sp",
"[",
"-",
"1",
"]",
"+=",
"'.html'",
"return",
"'/'",
".",
"join",
"(",
"sp",
")",
",",
"False"
] | Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not) | [
"Fix",
"page",
"relative",
"url",
"to",
"a",
"standard",
"uniform",
"format",
"."
] | train | https://github.com/veripress/veripress/blob/9e3df3a10eb1db32da596bf52118fe6acbe4b14a/veripress/model/storages.py#L250-L299 | 0.001068 |
JarryShaw/PyPCAPKit | src/protocols/link/l2tp.py | L2TP.read_l2tp | def read_l2tp(self, length):
"""Read Layer Two Tunnelling Protocol.
Structure of L2TP header [RFC 2661]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Tunnel ID | Session ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Ns (opt) | Nr (opt) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Offset Size (opt) | Offset pad... (opt)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 l2tp.flags Flags and Version Info
0 0 l2tp.flags.type Type (0/1)
0 1 l2tp.flags.len Length
0 2 - Reserved (must be zero)
0 4 l2tp.flags.seq Sequence
0 5 - Reserved (must be zero)
0 6 l2tp.flags.offset Offset
0 7 l2tp.flags.prio Priority
1 8 - Reserved (must be zero)
1 12 l2tp.ver Version (2)
2 16 l2tp.length Length (optional by len)
4 32 l2tp.tunnelid Tunnel ID
6 48 l2tp.sessionid Session ID
8 64 l2tp.ns Sequence Number (optional by seq)
10 80 l2tp.nr Next Sequence Number (optional by seq)
12 96 l2tp.offset Offset Size (optional by offset)
"""
if length is None:
length = len(self)
_flag = self._read_binary(1)
_vers = self._read_fileng(1).hex()[1]
_hlen = self._read_unpack(2) if int(_flag[1]) else None
_tnnl = self._read_unpack(2)
_sssn = self._read_unpack(2)
_nseq = self._read_unpack(2) if int(_flag[4]) else None
_nrec = self._read_unpack(2) if int(_flag[4]) else None
_size = self._read_unpack(2) if int(_flag[6]) else 0
l2tp = dict(
flags=dict(
type='Control' if int(_flag[0]) else 'Data',
len=True if int(_flag[1]) else False,
seq=True if int(_flag[4]) else False,
offset=True if int(_flag[6]) else False,
prio=True if int(_flag[7]) else False,
),
ver=int(_vers, base=16),
length=_hlen,
tunnelid=_tnnl,
sessionid=_sssn,
ns=_nseq,
nr=_nrec,
offset=8*_size or None,
)
hdr_len = _hlen or (6 + 2*(int(_flag[1]) + 2*int(_flag[4]) + int(_flag[6])))
l2tp['hdr_len'] = hdr_len + _size * 8
# if _size:
# l2tp['padding'] = self._read_fileng(_size * 8)
length -= l2tp['hdr_len']
l2tp['packet'] = self._read_packet(header=l2tp['hdr_len'], payload=length)
return self._decode_next_layer(l2tp, length) | python | def read_l2tp(self, length):
"""Read Layer Two Tunnelling Protocol.
Structure of L2TP header [RFC 2661]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Tunnel ID | Session ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Ns (opt) | Nr (opt) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Offset Size (opt) | Offset pad... (opt)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 l2tp.flags Flags and Version Info
0 0 l2tp.flags.type Type (0/1)
0 1 l2tp.flags.len Length
0 2 - Reserved (must be zero)
0 4 l2tp.flags.seq Sequence
0 5 - Reserved (must be zero)
0 6 l2tp.flags.offset Offset
0 7 l2tp.flags.prio Priority
1 8 - Reserved (must be zero)
1 12 l2tp.ver Version (2)
2 16 l2tp.length Length (optional by len)
4 32 l2tp.tunnelid Tunnel ID
6 48 l2tp.sessionid Session ID
8 64 l2tp.ns Sequence Number (optional by seq)
10 80 l2tp.nr Next Sequence Number (optional by seq)
12 96 l2tp.offset Offset Size (optional by offset)
"""
if length is None:
length = len(self)
_flag = self._read_binary(1)
_vers = self._read_fileng(1).hex()[1]
_hlen = self._read_unpack(2) if int(_flag[1]) else None
_tnnl = self._read_unpack(2)
_sssn = self._read_unpack(2)
_nseq = self._read_unpack(2) if int(_flag[4]) else None
_nrec = self._read_unpack(2) if int(_flag[4]) else None
_size = self._read_unpack(2) if int(_flag[6]) else 0
l2tp = dict(
flags=dict(
type='Control' if int(_flag[0]) else 'Data',
len=True if int(_flag[1]) else False,
seq=True if int(_flag[4]) else False,
offset=True if int(_flag[6]) else False,
prio=True if int(_flag[7]) else False,
),
ver=int(_vers, base=16),
length=_hlen,
tunnelid=_tnnl,
sessionid=_sssn,
ns=_nseq,
nr=_nrec,
offset=8*_size or None,
)
hdr_len = _hlen or (6 + 2*(int(_flag[1]) + 2*int(_flag[4]) + int(_flag[6])))
l2tp['hdr_len'] = hdr_len + _size * 8
# if _size:
# l2tp['padding'] = self._read_fileng(_size * 8)
length -= l2tp['hdr_len']
l2tp['packet'] = self._read_packet(header=l2tp['hdr_len'], payload=length)
return self._decode_next_layer(l2tp, length) | [
"def",
"read_l2tp",
"(",
"self",
",",
"length",
")",
":",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"len",
"(",
"self",
")",
"_flag",
"=",
"self",
".",
"_read_binary",
"(",
"1",
")",
"_vers",
"=",
"self",
".",
"_read_fileng",
"(",
"1",
")",
".",
"hex",
"(",
")",
"[",
"1",
"]",
"_hlen",
"=",
"self",
".",
"_read_unpack",
"(",
"2",
")",
"if",
"int",
"(",
"_flag",
"[",
"1",
"]",
")",
"else",
"None",
"_tnnl",
"=",
"self",
".",
"_read_unpack",
"(",
"2",
")",
"_sssn",
"=",
"self",
".",
"_read_unpack",
"(",
"2",
")",
"_nseq",
"=",
"self",
".",
"_read_unpack",
"(",
"2",
")",
"if",
"int",
"(",
"_flag",
"[",
"4",
"]",
")",
"else",
"None",
"_nrec",
"=",
"self",
".",
"_read_unpack",
"(",
"2",
")",
"if",
"int",
"(",
"_flag",
"[",
"4",
"]",
")",
"else",
"None",
"_size",
"=",
"self",
".",
"_read_unpack",
"(",
"2",
")",
"if",
"int",
"(",
"_flag",
"[",
"6",
"]",
")",
"else",
"0",
"l2tp",
"=",
"dict",
"(",
"flags",
"=",
"dict",
"(",
"type",
"=",
"'Control'",
"if",
"int",
"(",
"_flag",
"[",
"0",
"]",
")",
"else",
"'Data'",
",",
"len",
"=",
"True",
"if",
"int",
"(",
"_flag",
"[",
"1",
"]",
")",
"else",
"False",
",",
"seq",
"=",
"True",
"if",
"int",
"(",
"_flag",
"[",
"4",
"]",
")",
"else",
"False",
",",
"offset",
"=",
"True",
"if",
"int",
"(",
"_flag",
"[",
"6",
"]",
")",
"else",
"False",
",",
"prio",
"=",
"True",
"if",
"int",
"(",
"_flag",
"[",
"7",
"]",
")",
"else",
"False",
",",
")",
",",
"ver",
"=",
"int",
"(",
"_vers",
",",
"base",
"=",
"16",
")",
",",
"length",
"=",
"_hlen",
",",
"tunnelid",
"=",
"_tnnl",
",",
"sessionid",
"=",
"_sssn",
",",
"ns",
"=",
"_nseq",
",",
"nr",
"=",
"_nrec",
",",
"offset",
"=",
"8",
"*",
"_size",
"or",
"None",
",",
")",
"hdr_len",
"=",
"_hlen",
"or",
"(",
"6",
"+",
"2",
"*",
"(",
"int",
"(",
"_flag",
"[",
"1",
"]",
")",
"+",
"2",
"*",
"int",
"(",
"_flag",
"[",
"4",
"]",
")",
"+",
"int",
"(",
"_flag",
"[",
"6",
"]",
")",
")",
")",
"l2tp",
"[",
"'hdr_len'",
"]",
"=",
"hdr_len",
"+",
"_size",
"*",
"8",
"# if _size:",
"# l2tp['padding'] = self._read_fileng(_size * 8)",
"length",
"-=",
"l2tp",
"[",
"'hdr_len'",
"]",
"l2tp",
"[",
"'packet'",
"]",
"=",
"self",
".",
"_read_packet",
"(",
"header",
"=",
"l2tp",
"[",
"'hdr_len'",
"]",
",",
"payload",
"=",
"length",
")",
"return",
"self",
".",
"_decode_next_layer",
"(",
"l2tp",
",",
"length",
")"
] | Read Layer Two Tunnelling Protocol.
Structure of L2TP header [RFC 2661]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Tunnel ID | Session ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Ns (opt) | Nr (opt) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Offset Size (opt) | Offset pad... (opt)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 l2tp.flags Flags and Version Info
0 0 l2tp.flags.type Type (0/1)
0 1 l2tp.flags.len Length
0 2 - Reserved (must be zero)
0 4 l2tp.flags.seq Sequence
0 5 - Reserved (must be zero)
0 6 l2tp.flags.offset Offset
0 7 l2tp.flags.prio Priority
1 8 - Reserved (must be zero)
1 12 l2tp.ver Version (2)
2 16 l2tp.length Length (optional by len)
4 32 l2tp.tunnelid Tunnel ID
6 48 l2tp.sessionid Session ID
8 64 l2tp.ns Sequence Number (optional by seq)
10 80 l2tp.nr Next Sequence Number (optional by seq)
12 96 l2tp.offset Offset Size (optional by offset) | [
"Read",
"Layer",
"Two",
"Tunnelling",
"Protocol",
"."
] | train | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/link/l2tp.py#L84-L156 | 0.002188 |
markovmodel/PyEMMA | pyemma/util/_config.py | Config.default_config_file | def default_config_file(self):
""" default config file living in PyEMMA package """
import os.path as p
import pyemma
return p.join(pyemma.__path__[0], Config.DEFAULT_CONFIG_FILE_NAME) | python | def default_config_file(self):
""" default config file living in PyEMMA package """
import os.path as p
import pyemma
return p.join(pyemma.__path__[0], Config.DEFAULT_CONFIG_FILE_NAME) | [
"def",
"default_config_file",
"(",
"self",
")",
":",
"import",
"os",
".",
"path",
"as",
"p",
"import",
"pyemma",
"return",
"p",
".",
"join",
"(",
"pyemma",
".",
"__path__",
"[",
"0",
"]",
",",
"Config",
".",
"DEFAULT_CONFIG_FILE_NAME",
")"
] | default config file living in PyEMMA package | [
"default",
"config",
"file",
"living",
"in",
"PyEMMA",
"package"
] | train | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/_config.py#L176-L180 | 0.009259 |
mila-iqia/fuel | fuel/utils/lock.py | release_readlock | def release_readlock(lockdir_name):
"""Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock
"""
# Make sure the lock still exists before deleting it
if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name):
os.rmdir(lockdir_name) | python | def release_readlock(lockdir_name):
"""Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock
"""
# Make sure the lock still exists before deleting it
if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name):
os.rmdir(lockdir_name) | [
"def",
"release_readlock",
"(",
"lockdir_name",
")",
":",
"# Make sure the lock still exists before deleting it",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"lockdir_name",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"lockdir_name",
")",
":",
"os",
".",
"rmdir",
"(",
"lockdir_name",
")"
] | Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock | [
"Release",
"a",
"previously",
"obtained",
"readlock",
"."
] | train | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L392-L403 | 0.002849 |
night-crawler/django-docker-helpers | django_docker_helpers/config/backends/base.py | BaseParser.client | def client(self):
"""
Helper property to lazy initialize and cache client. Runs
:meth:`~django_docker_helpers.config.backends.base.BaseParser.get_client`.
:return: an instance of backend-specific client
"""
if self._client is not None:
return self._client
self._client = self.get_client()
return self._client | python | def client(self):
"""
Helper property to lazy initialize and cache client. Runs
:meth:`~django_docker_helpers.config.backends.base.BaseParser.get_client`.
:return: an instance of backend-specific client
"""
if self._client is not None:
return self._client
self._client = self.get_client()
return self._client | [
"def",
"client",
"(",
"self",
")",
":",
"if",
"self",
".",
"_client",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_client",
"self",
".",
"_client",
"=",
"self",
".",
"get_client",
"(",
")",
"return",
"self",
".",
"_client"
] | Helper property to lazy initialize and cache client. Runs
:meth:`~django_docker_helpers.config.backends.base.BaseParser.get_client`.
:return: an instance of backend-specific client | [
"Helper",
"property",
"to",
"lazy",
"initialize",
"and",
"cache",
"client",
".",
"Runs",
":",
"meth",
":",
"~django_docker_helpers",
".",
"config",
".",
"backends",
".",
"base",
".",
"BaseParser",
".",
"get_client",
"."
] | train | https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/base.py#L95-L106 | 0.005181 |
Hackerfleet/hfos | hfos/tool/__init__.py | _ask | def _ask(question, default=None, data_type='str', show_hint=False):
"""Interactively ask the user for data"""
data = default
if data_type == 'bool':
data = None
default_string = "Y" if default else "N"
while data not in ('Y', 'J', 'N', '1', '0'):
data = input("%s? [%s]: " % (question, default_string)).upper()
if data == '':
return default
return data in ('Y', 'J', '1')
elif data_type in ('str', 'unicode'):
if show_hint:
msg = "%s? [%s] (%s): " % (question, default, data_type)
else:
msg = question
data = input(msg)
if len(data) == 0:
data = default
elif data_type == 'int':
if show_hint:
msg = "%s? [%s] (%s): " % (question, default, data_type)
else:
msg = question
data = input(msg)
if len(data) == 0:
data = int(default)
else:
data = int(data)
return data | python | def _ask(question, default=None, data_type='str', show_hint=False):
"""Interactively ask the user for data"""
data = default
if data_type == 'bool':
data = None
default_string = "Y" if default else "N"
while data not in ('Y', 'J', 'N', '1', '0'):
data = input("%s? [%s]: " % (question, default_string)).upper()
if data == '':
return default
return data in ('Y', 'J', '1')
elif data_type in ('str', 'unicode'):
if show_hint:
msg = "%s? [%s] (%s): " % (question, default, data_type)
else:
msg = question
data = input(msg)
if len(data) == 0:
data = default
elif data_type == 'int':
if show_hint:
msg = "%s? [%s] (%s): " % (question, default, data_type)
else:
msg = question
data = input(msg)
if len(data) == 0:
data = int(default)
else:
data = int(data)
return data | [
"def",
"_ask",
"(",
"question",
",",
"default",
"=",
"None",
",",
"data_type",
"=",
"'str'",
",",
"show_hint",
"=",
"False",
")",
":",
"data",
"=",
"default",
"if",
"data_type",
"==",
"'bool'",
":",
"data",
"=",
"None",
"default_string",
"=",
"\"Y\"",
"if",
"default",
"else",
"\"N\"",
"while",
"data",
"not",
"in",
"(",
"'Y'",
",",
"'J'",
",",
"'N'",
",",
"'1'",
",",
"'0'",
")",
":",
"data",
"=",
"input",
"(",
"\"%s? [%s]: \"",
"%",
"(",
"question",
",",
"default_string",
")",
")",
".",
"upper",
"(",
")",
"if",
"data",
"==",
"''",
":",
"return",
"default",
"return",
"data",
"in",
"(",
"'Y'",
",",
"'J'",
",",
"'1'",
")",
"elif",
"data_type",
"in",
"(",
"'str'",
",",
"'unicode'",
")",
":",
"if",
"show_hint",
":",
"msg",
"=",
"\"%s? [%s] (%s): \"",
"%",
"(",
"question",
",",
"default",
",",
"data_type",
")",
"else",
":",
"msg",
"=",
"question",
"data",
"=",
"input",
"(",
"msg",
")",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"data",
"=",
"default",
"elif",
"data_type",
"==",
"'int'",
":",
"if",
"show_hint",
":",
"msg",
"=",
"\"%s? [%s] (%s): \"",
"%",
"(",
"question",
",",
"default",
",",
"data_type",
")",
"else",
":",
"msg",
"=",
"question",
"data",
"=",
"input",
"(",
"msg",
")",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"data",
"=",
"int",
"(",
"default",
")",
"else",
":",
"data",
"=",
"int",
"(",
"data",
")",
"return",
"data"
] | Interactively ask the user for data | [
"Interactively",
"ask",
"the",
"user",
"for",
"data"
] | train | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/__init__.py#L140-L180 | 0.000976 |
PyHDI/Pyverilog | pyverilog/vparser/parser.py | VerilogParser.p_concat | def p_concat(self, p):
'concat : LBRACE concatlist RBRACE'
p[0] = Concat(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | python | def p_concat(self, p):
'concat : LBRACE concatlist RBRACE'
p[0] = Concat(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | [
"def",
"p_concat",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"Concat",
"(",
"p",
"[",
"2",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
".",
"set_lineno",
"(",
"0",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | concat : LBRACE concatlist RBRACE | [
"concat",
":",
"LBRACE",
"concatlist",
"RBRACE"
] | train | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1161-L1164 | 0.013245 |
bitesofcode/projexui | projexui/widgets/xcalendarwidget/xcalendarscene.py | XCalendarScene.drawBackground | def drawBackground( self, painter, rect ):
"""
Draws the background of the scene using painter.
:param painter | <QPainter>
rect | <QRectF>
"""
if ( self._rebuildRequired ):
self.rebuild()
super(XCalendarScene, self).drawBackground(painter, rect)
palette = self.palette()
# draw custom options
if ( 'curr_date' in self._buildData ):
clr = palette.color(QPalette.Highlight)
clr.setAlpha(40)
painter.setBrush(clr)
painter.setPen(Qt.NoPen)
painter.drawRect(self._buildData['curr_date'])
painter.setBrush(Qt.NoBrush)
if ( 'today' in self._buildData ):
painter.setPen(Qt.NoPen)
clr = palette.color(QPalette.AlternateBase)
clr.setAlpha(120)
painter.setBrush(clr)
painter.drawRect(self._buildData['today'])
painter.setBrush(Qt.NoBrush)
# draw the grid
painter.setPen(palette.color(QPalette.Mid))
painter.drawLines(self._buildData.get('grid', []))
# draw text fields
painter.setPen(palette.color(QPalette.Text))
for data in self._buildData.get('regular_text', []):
painter.drawText(*data)
# draw mid text fields
painter.setPen(palette.color(QPalette.Mid))
for data in self._buildData.get('mid_text', []):
painter.drawText(*data) | python | def drawBackground( self, painter, rect ):
"""
Draws the background of the scene using painter.
:param painter | <QPainter>
rect | <QRectF>
"""
if ( self._rebuildRequired ):
self.rebuild()
super(XCalendarScene, self).drawBackground(painter, rect)
palette = self.palette()
# draw custom options
if ( 'curr_date' in self._buildData ):
clr = palette.color(QPalette.Highlight)
clr.setAlpha(40)
painter.setBrush(clr)
painter.setPen(Qt.NoPen)
painter.drawRect(self._buildData['curr_date'])
painter.setBrush(Qt.NoBrush)
if ( 'today' in self._buildData ):
painter.setPen(Qt.NoPen)
clr = palette.color(QPalette.AlternateBase)
clr.setAlpha(120)
painter.setBrush(clr)
painter.drawRect(self._buildData['today'])
painter.setBrush(Qt.NoBrush)
# draw the grid
painter.setPen(palette.color(QPalette.Mid))
painter.drawLines(self._buildData.get('grid', []))
# draw text fields
painter.setPen(palette.color(QPalette.Text))
for data in self._buildData.get('regular_text', []):
painter.drawText(*data)
# draw mid text fields
painter.setPen(palette.color(QPalette.Mid))
for data in self._buildData.get('mid_text', []):
painter.drawText(*data) | [
"def",
"drawBackground",
"(",
"self",
",",
"painter",
",",
"rect",
")",
":",
"if",
"(",
"self",
".",
"_rebuildRequired",
")",
":",
"self",
".",
"rebuild",
"(",
")",
"super",
"(",
"XCalendarScene",
",",
"self",
")",
".",
"drawBackground",
"(",
"painter",
",",
"rect",
")",
"palette",
"=",
"self",
".",
"palette",
"(",
")",
"# draw custom options\r",
"if",
"(",
"'curr_date'",
"in",
"self",
".",
"_buildData",
")",
":",
"clr",
"=",
"palette",
".",
"color",
"(",
"QPalette",
".",
"Highlight",
")",
"clr",
".",
"setAlpha",
"(",
"40",
")",
"painter",
".",
"setBrush",
"(",
"clr",
")",
"painter",
".",
"setPen",
"(",
"Qt",
".",
"NoPen",
")",
"painter",
".",
"drawRect",
"(",
"self",
".",
"_buildData",
"[",
"'curr_date'",
"]",
")",
"painter",
".",
"setBrush",
"(",
"Qt",
".",
"NoBrush",
")",
"if",
"(",
"'today'",
"in",
"self",
".",
"_buildData",
")",
":",
"painter",
".",
"setPen",
"(",
"Qt",
".",
"NoPen",
")",
"clr",
"=",
"palette",
".",
"color",
"(",
"QPalette",
".",
"AlternateBase",
")",
"clr",
".",
"setAlpha",
"(",
"120",
")",
"painter",
".",
"setBrush",
"(",
"clr",
")",
"painter",
".",
"drawRect",
"(",
"self",
".",
"_buildData",
"[",
"'today'",
"]",
")",
"painter",
".",
"setBrush",
"(",
"Qt",
".",
"NoBrush",
")",
"# draw the grid\r",
"painter",
".",
"setPen",
"(",
"palette",
".",
"color",
"(",
"QPalette",
".",
"Mid",
")",
")",
"painter",
".",
"drawLines",
"(",
"self",
".",
"_buildData",
".",
"get",
"(",
"'grid'",
",",
"[",
"]",
")",
")",
"# draw text fields\r",
"painter",
".",
"setPen",
"(",
"palette",
".",
"color",
"(",
"QPalette",
".",
"Text",
")",
")",
"for",
"data",
"in",
"self",
".",
"_buildData",
".",
"get",
"(",
"'regular_text'",
",",
"[",
"]",
")",
":",
"painter",
".",
"drawText",
"(",
"*",
"data",
")",
"# draw mid text fields\r",
"painter",
".",
"setPen",
"(",
"palette",
".",
"color",
"(",
"QPalette",
".",
"Mid",
")",
")",
"for",
"data",
"in",
"self",
".",
"_buildData",
".",
"get",
"(",
"'mid_text'",
",",
"[",
"]",
")",
":",
"painter",
".",
"drawText",
"(",
"*",
"data",
")"
] | Draws the background of the scene using painter.
:param painter | <QPainter>
rect | <QRectF> | [
"Draws",
"the",
"background",
"of",
"the",
"scene",
"using",
"painter",
".",
":",
"param",
"painter",
"|",
"<QPainter",
">",
"rect",
"|",
"<QRectF",
">"
] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendarscene.py#L143-L186 | 0.011335 |
googlefonts/fontbakery | Lib/fontbakery/profiles/googlefonts.py | com_google_fonts_check_metadata_valid_full_name_values | def com_google_fonts_check_metadata_valid_full_name_values(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.full_name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
if familynames == []:
yield SKIP, "No FONT_FAMILYNAME"
else:
familynames = typographic_familynames
if familynames == []:
yield SKIP, "No TYPOGRAPHIC_FAMILYNAME"
for font_familyname in familynames:
if font_familyname in font_metadata.full_name:
yield PASS, ("METADATA.pb font.full_name field contains"
" font name in right format."
" ('{}' in '{}')").format(font_familyname,
font_metadata.full_name)
else:
yield FAIL, ("METADATA.pb font.full_name field (\"{}\")"
" does not match correct font name format (\"{}\")."
"").format(font_metadata.full_name,
font_familyname) | python | def com_google_fonts_check_metadata_valid_full_name_values(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.full_name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
if familynames == []:
yield SKIP, "No FONT_FAMILYNAME"
else:
familynames = typographic_familynames
if familynames == []:
yield SKIP, "No TYPOGRAPHIC_FAMILYNAME"
for font_familyname in familynames:
if font_familyname in font_metadata.full_name:
yield PASS, ("METADATA.pb font.full_name field contains"
" font name in right format."
" ('{}' in '{}')").format(font_familyname,
font_metadata.full_name)
else:
yield FAIL, ("METADATA.pb font.full_name field (\"{}\")"
" does not match correct font name format (\"{}\")."
"").format(font_metadata.full_name,
font_familyname) | [
"def",
"com_google_fonts_check_metadata_valid_full_name_values",
"(",
"style",
",",
"font_metadata",
",",
"font_familynames",
",",
"typographic_familynames",
")",
":",
"from",
"fontbakery",
".",
"constants",
"import",
"RIBBI_STYLE_NAMES",
"if",
"style",
"in",
"RIBBI_STYLE_NAMES",
":",
"familynames",
"=",
"font_familynames",
"if",
"familynames",
"==",
"[",
"]",
":",
"yield",
"SKIP",
",",
"\"No FONT_FAMILYNAME\"",
"else",
":",
"familynames",
"=",
"typographic_familynames",
"if",
"familynames",
"==",
"[",
"]",
":",
"yield",
"SKIP",
",",
"\"No TYPOGRAPHIC_FAMILYNAME\"",
"for",
"font_familyname",
"in",
"familynames",
":",
"if",
"font_familyname",
"in",
"font_metadata",
".",
"full_name",
":",
"yield",
"PASS",
",",
"(",
"\"METADATA.pb font.full_name field contains\"",
"\" font name in right format.\"",
"\" ('{}' in '{}')\"",
")",
".",
"format",
"(",
"font_familyname",
",",
"font_metadata",
".",
"full_name",
")",
"else",
":",
"yield",
"FAIL",
",",
"(",
"\"METADATA.pb font.full_name field (\\\"{}\\\")\"",
"\" does not match correct font name format (\\\"{}\\\").\"",
"\"\"",
")",
".",
"format",
"(",
"font_metadata",
".",
"full_name",
",",
"font_familyname",
")"
] | METADATA.pb font.full_name field contains font name in right format? | [
"METADATA",
".",
"pb",
"font",
".",
"full_name",
"field",
"contains",
"font",
"name",
"in",
"right",
"format?"
] | train | https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1821-L1846 | 0.008682 |
wonambi-python/wonambi | wonambi/widgets/analysis.py | AnalysisDialog.plot_freq | def plot_freq(self, x, y, title='', ylabel=None, scale='semilogy'):
"""Plot mean frequency spectrum and display in dialog.
Parameters
----------
x : list
vector with frequencies
y : ndarray
vector with amplitudes
title : str
plot title
ylabel : str
plot y label
scale : str
semilogy, loglog or linear
"""
freq = self.frequency
scaling = freq['scaling'].get_value()
if ylabel is None:
if freq['complex'].get_value():
ylabel = 'Amplitude (uV)'
elif 'power' == scaling:
ylabel = 'Power spectral density (uV ** 2 / Hz)'
elif 'energy' == scaling:
ylabel = 'Energy spectral density (uV ** 2)'
self.parent.plot_dialog = PlotDialog(self.parent)
self.parent.plot_dialog.canvas.plot(x, y, title, ylabel, scale=scale)
self.parent.show_plot_dialog() | python | def plot_freq(self, x, y, title='', ylabel=None, scale='semilogy'):
"""Plot mean frequency spectrum and display in dialog.
Parameters
----------
x : list
vector with frequencies
y : ndarray
vector with amplitudes
title : str
plot title
ylabel : str
plot y label
scale : str
semilogy, loglog or linear
"""
freq = self.frequency
scaling = freq['scaling'].get_value()
if ylabel is None:
if freq['complex'].get_value():
ylabel = 'Amplitude (uV)'
elif 'power' == scaling:
ylabel = 'Power spectral density (uV ** 2 / Hz)'
elif 'energy' == scaling:
ylabel = 'Energy spectral density (uV ** 2)'
self.parent.plot_dialog = PlotDialog(self.parent)
self.parent.plot_dialog.canvas.plot(x, y, title, ylabel, scale=scale)
self.parent.show_plot_dialog() | [
"def",
"plot_freq",
"(",
"self",
",",
"x",
",",
"y",
",",
"title",
"=",
"''",
",",
"ylabel",
"=",
"None",
",",
"scale",
"=",
"'semilogy'",
")",
":",
"freq",
"=",
"self",
".",
"frequency",
"scaling",
"=",
"freq",
"[",
"'scaling'",
"]",
".",
"get_value",
"(",
")",
"if",
"ylabel",
"is",
"None",
":",
"if",
"freq",
"[",
"'complex'",
"]",
".",
"get_value",
"(",
")",
":",
"ylabel",
"=",
"'Amplitude (uV)'",
"elif",
"'power'",
"==",
"scaling",
":",
"ylabel",
"=",
"'Power spectral density (uV ** 2 / Hz)'",
"elif",
"'energy'",
"==",
"scaling",
":",
"ylabel",
"=",
"'Energy spectral density (uV ** 2)'",
"self",
".",
"parent",
".",
"plot_dialog",
"=",
"PlotDialog",
"(",
"self",
".",
"parent",
")",
"self",
".",
"parent",
".",
"plot_dialog",
".",
"canvas",
".",
"plot",
"(",
"x",
",",
"y",
",",
"title",
",",
"ylabel",
",",
"scale",
"=",
"scale",
")",
"self",
".",
"parent",
".",
"show_plot_dialog",
"(",
")"
] | Plot mean frequency spectrum and display in dialog.
Parameters
----------
x : list
vector with frequencies
y : ndarray
vector with amplitudes
title : str
plot title
ylabel : str
plot y label
scale : str
semilogy, loglog or linear | [
"Plot",
"mean",
"frequency",
"spectrum",
"and",
"display",
"in",
"dialog",
"."
] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/analysis.py#L1880-L1909 | 0.001992 |
bjmorgan/vasppy | vasppy/cell.py | Cell.dr | def dr( self, r1, r2, cutoff=None ):
"""
Calculate the distance between two fractional coordinates in the cell.
Args:
r1 (np.array): fractional coordinates for position 1.
r2 (np.array): fractional coordinates for position 2.
cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).
Returns:
(float): the distance between r1 and r2.
"""
delta_r_cartesian = ( r1 - r2 ).dot( self.matrix )
delta_r_squared = sum( delta_r_cartesian**2 )
if cutoff != None:
cutoff_squared = cutoff ** 2
if delta_r_squared > cutoff_squared:
return None
return( math.sqrt( delta_r_squared ) ) | python | def dr( self, r1, r2, cutoff=None ):
"""
Calculate the distance between two fractional coordinates in the cell.
Args:
r1 (np.array): fractional coordinates for position 1.
r2 (np.array): fractional coordinates for position 2.
cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).
Returns:
(float): the distance between r1 and r2.
"""
delta_r_cartesian = ( r1 - r2 ).dot( self.matrix )
delta_r_squared = sum( delta_r_cartesian**2 )
if cutoff != None:
cutoff_squared = cutoff ** 2
if delta_r_squared > cutoff_squared:
return None
return( math.sqrt( delta_r_squared ) ) | [
"def",
"dr",
"(",
"self",
",",
"r1",
",",
"r2",
",",
"cutoff",
"=",
"None",
")",
":",
"delta_r_cartesian",
"=",
"(",
"r1",
"-",
"r2",
")",
".",
"dot",
"(",
"self",
".",
"matrix",
")",
"delta_r_squared",
"=",
"sum",
"(",
"delta_r_cartesian",
"**",
"2",
")",
"if",
"cutoff",
"!=",
"None",
":",
"cutoff_squared",
"=",
"cutoff",
"**",
"2",
"if",
"delta_r_squared",
">",
"cutoff_squared",
":",
"return",
"None",
"return",
"(",
"math",
".",
"sqrt",
"(",
"delta_r_squared",
")",
")"
] | Calculate the distance between two fractional coordinates in the cell.
Args:
r1 (np.array): fractional coordinates for position 1.
r2 (np.array): fractional coordinates for position 2.
cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).
Returns:
(float): the distance between r1 and r2. | [
"Calculate",
"the",
"distance",
"between",
"two",
"fractional",
"coordinates",
"in",
"the",
"cell",
".",
"Args",
":",
"r1",
"(",
"np",
".",
"array",
")",
":",
"fractional",
"coordinates",
"for",
"position",
"1",
".",
"r2",
"(",
"np",
".",
"array",
")",
":",
"fractional",
"coordinates",
"for",
"position",
"2",
".",
"cutoff",
"(",
"optional",
":",
"Bool",
")",
":",
"If",
"set",
"returns",
"None",
"for",
"distances",
"greater",
"than",
"the",
"cutoff",
".",
"Default",
"None",
"(",
"unset",
")",
"."
] | train | https://github.com/bjmorgan/vasppy/blob/cc2d1449697b17ee1c43715a02cddcb1139a6834/vasppy/cell.py#L61-L79 | 0.022843 |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/device_directory/models/device_data.py | DeviceData.mechanism | def mechanism(self, mechanism):
"""
Sets the mechanism of this DeviceData.
The ID of the channel used to communicate with the device.
:param mechanism: The mechanism of this DeviceData.
:type: str
"""
allowed_values = ["connector", "direct"]
if mechanism not in allowed_values:
raise ValueError(
"Invalid value for `mechanism` ({0}), must be one of {1}"
.format(mechanism, allowed_values)
)
self._mechanism = mechanism | python | def mechanism(self, mechanism):
"""
Sets the mechanism of this DeviceData.
The ID of the channel used to communicate with the device.
:param mechanism: The mechanism of this DeviceData.
:type: str
"""
allowed_values = ["connector", "direct"]
if mechanism not in allowed_values:
raise ValueError(
"Invalid value for `mechanism` ({0}), must be one of {1}"
.format(mechanism, allowed_values)
)
self._mechanism = mechanism | [
"def",
"mechanism",
"(",
"self",
",",
"mechanism",
")",
":",
"allowed_values",
"=",
"[",
"\"connector\"",
",",
"\"direct\"",
"]",
"if",
"mechanism",
"not",
"in",
"allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `mechanism` ({0}), must be one of {1}\"",
".",
"format",
"(",
"mechanism",
",",
"allowed_values",
")",
")",
"self",
".",
"_mechanism",
"=",
"mechanism"
] | Sets the mechanism of this DeviceData.
The ID of the channel used to communicate with the device.
:param mechanism: The mechanism of this DeviceData.
:type: str | [
"Sets",
"the",
"mechanism",
"of",
"this",
"DeviceData",
".",
"The",
"ID",
"of",
"the",
"channel",
"used",
"to",
"communicate",
"with",
"the",
"device",
"."
] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/device_directory/models/device_data.py#L722-L737 | 0.00365 |
rackerlabs/python-lunrclient | lunrclient/tools.py | Tools.read | def read(self, device=None, offset=0, bs=None, count=1):
"""
Using DIRECT_O read from the block device specified to stdout
(Without any optional arguments will read the first 4k from the device)
"""
volume = self.get_volume(device)
block_size = bs or BLOCK_SIZE
offset = int(offset) * block_size
count = int(count)
print("Offset: ", offset)
total = 0
with directio.open(volume['path'], buffered=block_size) as file:
file.seek(offset)
for i in range(0, count):
total += os.write(sys.stdout.fileno(), file.read(block_size))
os.write(sys.stdout.fileno(), "\nRead: %d Bytes\n" % total) | python | def read(self, device=None, offset=0, bs=None, count=1):
"""
Using DIRECT_O read from the block device specified to stdout
(Without any optional arguments will read the first 4k from the device)
"""
volume = self.get_volume(device)
block_size = bs or BLOCK_SIZE
offset = int(offset) * block_size
count = int(count)
print("Offset: ", offset)
total = 0
with directio.open(volume['path'], buffered=block_size) as file:
file.seek(offset)
for i in range(0, count):
total += os.write(sys.stdout.fileno(), file.read(block_size))
os.write(sys.stdout.fileno(), "\nRead: %d Bytes\n" % total) | [
"def",
"read",
"(",
"self",
",",
"device",
"=",
"None",
",",
"offset",
"=",
"0",
",",
"bs",
"=",
"None",
",",
"count",
"=",
"1",
")",
":",
"volume",
"=",
"self",
".",
"get_volume",
"(",
"device",
")",
"block_size",
"=",
"bs",
"or",
"BLOCK_SIZE",
"offset",
"=",
"int",
"(",
"offset",
")",
"*",
"block_size",
"count",
"=",
"int",
"(",
"count",
")",
"print",
"(",
"\"Offset: \"",
",",
"offset",
")",
"total",
"=",
"0",
"with",
"directio",
".",
"open",
"(",
"volume",
"[",
"'path'",
"]",
",",
"buffered",
"=",
"block_size",
")",
"as",
"file",
":",
"file",
".",
"seek",
"(",
"offset",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"count",
")",
":",
"total",
"+=",
"os",
".",
"write",
"(",
"sys",
".",
"stdout",
".",
"fileno",
"(",
")",
",",
"file",
".",
"read",
"(",
"block_size",
")",
")",
"os",
".",
"write",
"(",
"sys",
".",
"stdout",
".",
"fileno",
"(",
")",
",",
"\"\\nRead: %d Bytes\\n\"",
"%",
"total",
")"
] | Using DIRECT_O read from the block device specified to stdout
(Without any optional arguments will read the first 4k from the device) | [
"Using",
"DIRECT_O",
"read",
"from",
"the",
"block",
"device",
"specified",
"to",
"stdout",
"(",
"Without",
"any",
"optional",
"arguments",
"will",
"read",
"the",
"first",
"4k",
"from",
"the",
"device",
")"
] | train | https://github.com/rackerlabs/python-lunrclient/blob/f26a450a422600f492480bfa42cbee50a5c7016f/lunrclient/tools.py#L117-L134 | 0.002782 |
pysal/esda | esda/tabular.py | _bivariate_handler | def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim',
outvals=None, **kwargs):
"""
Compute a descriptive bivariate statistic over two sets of columns, `x` and
`y`, contained in `df`.
Parameters
----------
df : pandas.DataFrame
dataframe in which columns `x` and `y` are contained
x : string or list of strings
one or more column names to use as variates in the bivariate
statistics
y : string or list of strings
one or more column names to use as variates in the bivariate
statistics
w : pysal.weights.W
spatial weights object corresponding to the dataframe `df`
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
real_swapname = kwargs.pop('swapname', '')
if isinstance(y, str):
y = [y]
if isinstance(x, str):
x = [x]
if not inplace:
new_df = df.copy()
_bivariate_handler(new_df, x, y=y, w=w, inplace=True,
swapname=real_swapname,
pvalue=pvalue, outvals=outvals, **kwargs)
return new_df
if y is None:
y = x
for xi,yi in _it.product(x,y):
if xi == yi:
continue
_univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True,
pvalue=pvalue, outvals=outvals, swapname='', **kwargs)
if real_swapname is not '':
df.columns = [_swap_ending(col, real_swapname)
if col.endswith('_statistic')
else col for col in df.columns] | python | def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim',
outvals=None, **kwargs):
"""
Compute a descriptive bivariate statistic over two sets of columns, `x` and
`y`, contained in `df`.
Parameters
----------
df : pandas.DataFrame
dataframe in which columns `x` and `y` are contained
x : string or list of strings
one or more column names to use as variates in the bivariate
statistics
y : string or list of strings
one or more column names to use as variates in the bivariate
statistics
w : pysal.weights.W
spatial weights object corresponding to the dataframe `df`
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
real_swapname = kwargs.pop('swapname', '')
if isinstance(y, str):
y = [y]
if isinstance(x, str):
x = [x]
if not inplace:
new_df = df.copy()
_bivariate_handler(new_df, x, y=y, w=w, inplace=True,
swapname=real_swapname,
pvalue=pvalue, outvals=outvals, **kwargs)
return new_df
if y is None:
y = x
for xi,yi in _it.product(x,y):
if xi == yi:
continue
_univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True,
pvalue=pvalue, outvals=outvals, swapname='', **kwargs)
if real_swapname is not '':
df.columns = [_swap_ending(col, real_swapname)
if col.endswith('_statistic')
else col for col in df.columns] | [
"def",
"_bivariate_handler",
"(",
"df",
",",
"x",
",",
"y",
"=",
"None",
",",
"w",
"=",
"None",
",",
"inplace",
"=",
"True",
",",
"pvalue",
"=",
"'sim'",
",",
"outvals",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"real_swapname",
"=",
"kwargs",
".",
"pop",
"(",
"'swapname'",
",",
"''",
")",
"if",
"isinstance",
"(",
"y",
",",
"str",
")",
":",
"y",
"=",
"[",
"y",
"]",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"x",
"=",
"[",
"x",
"]",
"if",
"not",
"inplace",
":",
"new_df",
"=",
"df",
".",
"copy",
"(",
")",
"_bivariate_handler",
"(",
"new_df",
",",
"x",
",",
"y",
"=",
"y",
",",
"w",
"=",
"w",
",",
"inplace",
"=",
"True",
",",
"swapname",
"=",
"real_swapname",
",",
"pvalue",
"=",
"pvalue",
",",
"outvals",
"=",
"outvals",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_df",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"x",
"for",
"xi",
",",
"yi",
"in",
"_it",
".",
"product",
"(",
"x",
",",
"y",
")",
":",
"if",
"xi",
"==",
"yi",
":",
"continue",
"_univariate_handler",
"(",
"df",
",",
"cols",
"=",
"xi",
",",
"w",
"=",
"w",
",",
"y",
"=",
"df",
"[",
"yi",
"]",
",",
"inplace",
"=",
"True",
",",
"pvalue",
"=",
"pvalue",
",",
"outvals",
"=",
"outvals",
",",
"swapname",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
"if",
"real_swapname",
"is",
"not",
"''",
":",
"df",
".",
"columns",
"=",
"[",
"_swap_ending",
"(",
"col",
",",
"real_swapname",
")",
"if",
"col",
".",
"endswith",
"(",
"'_statistic'",
")",
"else",
"col",
"for",
"col",
"in",
"df",
".",
"columns",
"]"
] | Compute a descriptive bivariate statistic over two sets of columns, `x` and
`y`, contained in `df`.
Parameters
----------
df : pandas.DataFrame
dataframe in which columns `x` and `y` are contained
x : string or list of strings
one or more column names to use as variates in the bivariate
statistics
y : string or list of strings
one or more column names to use as variates in the bivariate
statistics
w : pysal.weights.W
spatial weights object corresponding to the dataframe `df`
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic | [
"Compute",
"a",
"descriptive",
"bivariate",
"statistic",
"over",
"two",
"sets",
"of",
"columns",
"x",
"and",
"y",
"contained",
"in",
"df",
"."
] | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/tabular.py#L100-L154 | 0.002095 |
apache/incubator-mxnet | example/rcnn/symdata/anchor.py | AnchorGenerator._generate_base_anchors | def _generate_base_anchors(base_size, scales, ratios):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = AnchorGenerator._ratio_enum(base_anchor, ratios)
anchors = np.vstack([AnchorGenerator._scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | python | def _generate_base_anchors(base_size, scales, ratios):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = AnchorGenerator._ratio_enum(base_anchor, ratios)
anchors = np.vstack([AnchorGenerator._scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | [
"def",
"_generate_base_anchors",
"(",
"base_size",
",",
"scales",
",",
"ratios",
")",
":",
"base_anchor",
"=",
"np",
".",
"array",
"(",
"[",
"1",
",",
"1",
",",
"base_size",
",",
"base_size",
"]",
")",
"-",
"1",
"ratio_anchors",
"=",
"AnchorGenerator",
".",
"_ratio_enum",
"(",
"base_anchor",
",",
"ratios",
")",
"anchors",
"=",
"np",
".",
"vstack",
"(",
"[",
"AnchorGenerator",
".",
"_scale_enum",
"(",
"ratio_anchors",
"[",
"i",
",",
":",
"]",
",",
"scales",
")",
"for",
"i",
"in",
"range",
"(",
"ratio_anchors",
".",
"shape",
"[",
"0",
"]",
")",
"]",
")",
"return",
"anchors"
] | Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window. | [
"Generate",
"anchor",
"(",
"reference",
")",
"windows",
"by",
"enumerating",
"aspect",
"ratios",
"X",
"scales",
"wrt",
"a",
"reference",
"(",
"0",
"0",
"15",
"15",
")",
"window",
"."
] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rcnn/symdata/anchor.py#L44-L53 | 0.005725 |
AnalogJ/lexicon | lexicon/providers/nsone.py | Provider._find_record | def _find_record(self, domain, _type=None):
"""search for a record on NS1 across zones. returns None if not found."""
def _is_matching(record):
"""filter function for records"""
if domain and record.get('domain', None) != domain:
return False
if _type and record.get('type', None) != _type:
return False
return True
payload = self._get('/search?q={0}&type=record'.format(domain))
for record in payload:
if _is_matching(record):
match = record
break
else:
# no such domain on ns1
return None
record = self._get(
'/zones/{0}/{1}/{2}'.format(match['zone'], match['domain'], match['type']))
if record.get('message', None):
return None # {"message":"record not found"}
short_answers = [x['answer'][0] for x in record['answers']]
# ensure a compatibility level with self._list_records
record['short_answers'] = short_answers
return record | python | def _find_record(self, domain, _type=None):
"""search for a record on NS1 across zones. returns None if not found."""
def _is_matching(record):
"""filter function for records"""
if domain and record.get('domain', None) != domain:
return False
if _type and record.get('type', None) != _type:
return False
return True
payload = self._get('/search?q={0}&type=record'.format(domain))
for record in payload:
if _is_matching(record):
match = record
break
else:
# no such domain on ns1
return None
record = self._get(
'/zones/{0}/{1}/{2}'.format(match['zone'], match['domain'], match['type']))
if record.get('message', None):
return None # {"message":"record not found"}
short_answers = [x['answer'][0] for x in record['answers']]
# ensure a compatibility level with self._list_records
record['short_answers'] = short_answers
return record | [
"def",
"_find_record",
"(",
"self",
",",
"domain",
",",
"_type",
"=",
"None",
")",
":",
"def",
"_is_matching",
"(",
"record",
")",
":",
"\"\"\"filter function for records\"\"\"",
"if",
"domain",
"and",
"record",
".",
"get",
"(",
"'domain'",
",",
"None",
")",
"!=",
"domain",
":",
"return",
"False",
"if",
"_type",
"and",
"record",
".",
"get",
"(",
"'type'",
",",
"None",
")",
"!=",
"_type",
":",
"return",
"False",
"return",
"True",
"payload",
"=",
"self",
".",
"_get",
"(",
"'/search?q={0}&type=record'",
".",
"format",
"(",
"domain",
")",
")",
"for",
"record",
"in",
"payload",
":",
"if",
"_is_matching",
"(",
"record",
")",
":",
"match",
"=",
"record",
"break",
"else",
":",
"# no such domain on ns1",
"return",
"None",
"record",
"=",
"self",
".",
"_get",
"(",
"'/zones/{0}/{1}/{2}'",
".",
"format",
"(",
"match",
"[",
"'zone'",
"]",
",",
"match",
"[",
"'domain'",
"]",
",",
"match",
"[",
"'type'",
"]",
")",
")",
"if",
"record",
".",
"get",
"(",
"'message'",
",",
"None",
")",
":",
"return",
"None",
"# {\"message\":\"record not found\"}",
"short_answers",
"=",
"[",
"x",
"[",
"'answer'",
"]",
"[",
"0",
"]",
"for",
"x",
"in",
"record",
"[",
"'answers'",
"]",
"]",
"# ensure a compatibility level with self._list_records",
"record",
"[",
"'short_answers'",
"]",
"=",
"short_answers",
"return",
"record"
] | search for a record on NS1 across zones. returns None if not found. | [
"search",
"for",
"a",
"record",
"on",
"NS1",
"across",
"zones",
".",
"returns",
"None",
"if",
"not",
"found",
"."
] | train | https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/nsone.py#L93-L122 | 0.003643 |
impact27/registrator | registrator/image.py | find_shift_dft | def find_shift_dft(im0, im1, isccs=False, subpix=True):
"""Find the shift between two images using the DFT method
Parameters
----------
im0: 2d array
First image
im1: 2d array
Second image
isccs: Boolean, default false
Set to True if the images are alredy DFT and in CCS representation
subpix: boolean, default True
Set to True (default) if you want subpixel precision
Returns
-------
[y, x]: 2 numbers
The offset
Notes
-----
This algorithm detect a shift using the global phase difference of the DFTs
If the images are already DFT and in the CCS format, set isccs to true.
In that case the images should have the same size.
If subpix is True, a gaussian fit is used for subpix precision
"""
# sanitize input
im0 = np.asarray(im0, dtype=np.float32)
im1 = np.asarray(im1, dtype=np.float32)
# check input
if not isccs:
im0, im1 = dft_optsize_same(im0, im1)
else:
# Work only if the shapes are the same
assert(im0.shape == im1.shape)
# f0*conj(f1)
mulSpec = cv2.mulSpectrums(im0, im1, flags=0, conjB=True)
# norm(f0)*norm(f1)
normccs = cv2.sqrt(cv2.mulSpectrums(im0, im0, flags=0, conjB=True) *
cv2.mulSpectrums(im1, im1, flags=0, conjB=True))
# compute the inverse DFT
xc = cv2.dft(ccs_normalize(mulSpec, normccs),
flags=cv2.DFT_REAL_OUTPUT | cv2.DFT_INVERSE)
# Blur xc to remove some noise and improve the subpixel detection
# workaround as GaussianBlur doesn't work with BORDER_WRAP
blurRadii = 2
xc = cv2.copyMakeBorder(xc, blurRadii, blurRadii, blurRadii, blurRadii,
borderType=cv2.BORDER_WRAP)
xc = cv2.GaussianBlur(xc, (2 * blurRadii + 1, 2 * blurRadii + 1), 1.5)
xc = xc[blurRadii:-blurRadii, blurRadii:-blurRadii]
# save shape
shape = np.asarray(xc.shape)
# find max
idx = np.asarray(np.unravel_index(np.argmax(xc), shape))
"""
from matplotlib import pyplot as plt
from numpy.fft import fftshift
plt.figure()
plt.imshow(np.log(np.abs(fftshift(im0))))
plt.figure()
plt.imshow(np.log(np.abs(fftshift(im1))))
plt.figure()
plt.imshow(fftshift(ccs_normalize(mulSpec,normccs)))
plt.figure()
extent= (-np.shape(xc)[1]/2, np.shape(xc)[1]/2, -np.shape(xc)[0]/2, np.shape(xc)[0]/2 )
plt.imshow(np.log(np.abs(fftshift(xc))),extent = extent)
#"""
# plt.imshow(fftshift(xc))
# print(idx)
# plt.figure()
# if toremove:
# plt.figure(1)
# l=len(xc[:,0])
# plt.plot(np.arange(l)/l,xc[:,0])
# print(l,xc[-1,0])
# plt.figure(2)
#"""
if subpix:
# update idx
idx = np.asarray([get_peak_pos(xc[:, idx[1]], wrap=True),
get_peak_pos(xc[idx[0], :], wrap=True)])
else:
# restrics to reasonable values
idx[idx > shape // 2] -= shape[idx > shape // 2]
return idx | python | def find_shift_dft(im0, im1, isccs=False, subpix=True):
"""Find the shift between two images using the DFT method
Parameters
----------
im0: 2d array
First image
im1: 2d array
Second image
isccs: Boolean, default false
Set to True if the images are alredy DFT and in CCS representation
subpix: boolean, default True
Set to True (default) if you want subpixel precision
Returns
-------
[y, x]: 2 numbers
The offset
Notes
-----
This algorithm detect a shift using the global phase difference of the DFTs
If the images are already DFT and in the CCS format, set isccs to true.
In that case the images should have the same size.
If subpix is True, a gaussian fit is used for subpix precision
"""
# sanitize input
im0 = np.asarray(im0, dtype=np.float32)
im1 = np.asarray(im1, dtype=np.float32)
# check input
if not isccs:
im0, im1 = dft_optsize_same(im0, im1)
else:
# Work only if the shapes are the same
assert(im0.shape == im1.shape)
# f0*conj(f1)
mulSpec = cv2.mulSpectrums(im0, im1, flags=0, conjB=True)
# norm(f0)*norm(f1)
normccs = cv2.sqrt(cv2.mulSpectrums(im0, im0, flags=0, conjB=True) *
cv2.mulSpectrums(im1, im1, flags=0, conjB=True))
# compute the inverse DFT
xc = cv2.dft(ccs_normalize(mulSpec, normccs),
flags=cv2.DFT_REAL_OUTPUT | cv2.DFT_INVERSE)
# Blur xc to remove some noise and improve the subpixel detection
# workaround as GaussianBlur doesn't work with BORDER_WRAP
blurRadii = 2
xc = cv2.copyMakeBorder(xc, blurRadii, blurRadii, blurRadii, blurRadii,
borderType=cv2.BORDER_WRAP)
xc = cv2.GaussianBlur(xc, (2 * blurRadii + 1, 2 * blurRadii + 1), 1.5)
xc = xc[blurRadii:-blurRadii, blurRadii:-blurRadii]
# save shape
shape = np.asarray(xc.shape)
# find max
idx = np.asarray(np.unravel_index(np.argmax(xc), shape))
"""
from matplotlib import pyplot as plt
from numpy.fft import fftshift
plt.figure()
plt.imshow(np.log(np.abs(fftshift(im0))))
plt.figure()
plt.imshow(np.log(np.abs(fftshift(im1))))
plt.figure()
plt.imshow(fftshift(ccs_normalize(mulSpec,normccs)))
plt.figure()
extent= (-np.shape(xc)[1]/2, np.shape(xc)[1]/2, -np.shape(xc)[0]/2, np.shape(xc)[0]/2 )
plt.imshow(np.log(np.abs(fftshift(xc))),extent = extent)
#"""
# plt.imshow(fftshift(xc))
# print(idx)
# plt.figure()
# if toremove:
# plt.figure(1)
# l=len(xc[:,0])
# plt.plot(np.arange(l)/l,xc[:,0])
# print(l,xc[-1,0])
# plt.figure(2)
#"""
if subpix:
# update idx
idx = np.asarray([get_peak_pos(xc[:, idx[1]], wrap=True),
get_peak_pos(xc[idx[0], :], wrap=True)])
else:
# restrics to reasonable values
idx[idx > shape // 2] -= shape[idx > shape // 2]
return idx | [
"def",
"find_shift_dft",
"(",
"im0",
",",
"im1",
",",
"isccs",
"=",
"False",
",",
"subpix",
"=",
"True",
")",
":",
"# sanitize input",
"im0",
"=",
"np",
".",
"asarray",
"(",
"im0",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"im1",
"=",
"np",
".",
"asarray",
"(",
"im1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# check input",
"if",
"not",
"isccs",
":",
"im0",
",",
"im1",
"=",
"dft_optsize_same",
"(",
"im0",
",",
"im1",
")",
"else",
":",
"# Work only if the shapes are the same",
"assert",
"(",
"im0",
".",
"shape",
"==",
"im1",
".",
"shape",
")",
"# f0*conj(f1)",
"mulSpec",
"=",
"cv2",
".",
"mulSpectrums",
"(",
"im0",
",",
"im1",
",",
"flags",
"=",
"0",
",",
"conjB",
"=",
"True",
")",
"# norm(f0)*norm(f1)",
"normccs",
"=",
"cv2",
".",
"sqrt",
"(",
"cv2",
".",
"mulSpectrums",
"(",
"im0",
",",
"im0",
",",
"flags",
"=",
"0",
",",
"conjB",
"=",
"True",
")",
"*",
"cv2",
".",
"mulSpectrums",
"(",
"im1",
",",
"im1",
",",
"flags",
"=",
"0",
",",
"conjB",
"=",
"True",
")",
")",
"# compute the inverse DFT",
"xc",
"=",
"cv2",
".",
"dft",
"(",
"ccs_normalize",
"(",
"mulSpec",
",",
"normccs",
")",
",",
"flags",
"=",
"cv2",
".",
"DFT_REAL_OUTPUT",
"|",
"cv2",
".",
"DFT_INVERSE",
")",
"# Blur xc to remove some noise and improve the subpixel detection",
"# workaround as GaussianBlur doesn't work with BORDER_WRAP",
"blurRadii",
"=",
"2",
"xc",
"=",
"cv2",
".",
"copyMakeBorder",
"(",
"xc",
",",
"blurRadii",
",",
"blurRadii",
",",
"blurRadii",
",",
"blurRadii",
",",
"borderType",
"=",
"cv2",
".",
"BORDER_WRAP",
")",
"xc",
"=",
"cv2",
".",
"GaussianBlur",
"(",
"xc",
",",
"(",
"2",
"*",
"blurRadii",
"+",
"1",
",",
"2",
"*",
"blurRadii",
"+",
"1",
")",
",",
"1.5",
")",
"xc",
"=",
"xc",
"[",
"blurRadii",
":",
"-",
"blurRadii",
",",
"blurRadii",
":",
"-",
"blurRadii",
"]",
"# save shape",
"shape",
"=",
"np",
".",
"asarray",
"(",
"xc",
".",
"shape",
")",
"# find max",
"idx",
"=",
"np",
".",
"asarray",
"(",
"np",
".",
"unravel_index",
"(",
"np",
".",
"argmax",
"(",
"xc",
")",
",",
"shape",
")",
")",
"\"\"\"\n from matplotlib import pyplot as plt\n from numpy.fft import fftshift\n plt.figure()\n plt.imshow(np.log(np.abs(fftshift(im0))))\n plt.figure()\n plt.imshow(np.log(np.abs(fftshift(im1))))\n plt.figure()\n plt.imshow(fftshift(ccs_normalize(mulSpec,normccs)))\n plt.figure()\n extent= (-np.shape(xc)[1]/2, np.shape(xc)[1]/2, -np.shape(xc)[0]/2, np.shape(xc)[0]/2 )\n plt.imshow(np.log(np.abs(fftshift(xc))),extent = extent)\n\n #\"\"\"",
"# plt.imshow(fftshift(xc))",
"# print(idx)",
"# plt.figure()",
"# if toremove:",
"# plt.figure(1)",
"# l=len(xc[:,0])",
"# plt.plot(np.arange(l)/l,xc[:,0])",
"# print(l,xc[-1,0])",
"# plt.figure(2)",
"#\"\"\"",
"if",
"subpix",
":",
"# update idx",
"idx",
"=",
"np",
".",
"asarray",
"(",
"[",
"get_peak_pos",
"(",
"xc",
"[",
":",
",",
"idx",
"[",
"1",
"]",
"]",
",",
"wrap",
"=",
"True",
")",
",",
"get_peak_pos",
"(",
"xc",
"[",
"idx",
"[",
"0",
"]",
",",
":",
"]",
",",
"wrap",
"=",
"True",
")",
"]",
")",
"else",
":",
"# restrics to reasonable values",
"idx",
"[",
"idx",
">",
"shape",
"//",
"2",
"]",
"-=",
"shape",
"[",
"idx",
">",
"shape",
"//",
"2",
"]",
"return",
"idx"
] | Find the shift between two images using the DFT method
Parameters
----------
im0: 2d array
First image
im1: 2d array
Second image
isccs: Boolean, default false
Set to True if the images are alredy DFT and in CCS representation
subpix: boolean, default True
Set to True (default) if you want subpixel precision
Returns
-------
[y, x]: 2 numbers
The offset
Notes
-----
This algorithm detect a shift using the global phase difference of the DFTs
If the images are already DFT and in the CCS format, set isccs to true.
In that case the images should have the same size.
If subpix is True, a gaussian fit is used for subpix precision | [
"Find",
"the",
"shift",
"between",
"two",
"images",
"using",
"the",
"DFT",
"method"
] | train | https://github.com/impact27/registrator/blob/04c099d83e0466207dc5b2e40d9b03db020d4dad/registrator/image.py#L158-L250 | 0.001326 |
fabioz/PyDev.Debugger | _pydevd_bundle/pydevd_comm.py | InternalLoadFullValue.do_it | def do_it(self, dbg):
'''Starts a thread that will load values asynchronously'''
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
scope, attrs = variable.split('\t', 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd) | python | def do_it(self, dbg):
'''Starts a thread that will load values asynchronously'''
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
scope, attrs = variable.split('\t', 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd) | [
"def",
"do_it",
"(",
"self",
",",
"dbg",
")",
":",
"try",
":",
"var_objects",
"=",
"[",
"]",
"for",
"variable",
"in",
"self",
".",
"vars",
":",
"variable",
"=",
"variable",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"variable",
")",
">",
"0",
":",
"if",
"'\\t'",
"in",
"variable",
":",
"# there are attributes beyond scope",
"scope",
",",
"attrs",
"=",
"variable",
".",
"split",
"(",
"'\\t'",
",",
"1",
")",
"name",
"=",
"attrs",
"[",
"0",
"]",
"else",
":",
"scope",
",",
"attrs",
"=",
"(",
"variable",
",",
"None",
")",
"name",
"=",
"scope",
"var_obj",
"=",
"pydevd_vars",
".",
"getVariable",
"(",
"dbg",
",",
"self",
".",
"thread_id",
",",
"self",
".",
"frame_id",
",",
"scope",
",",
"attrs",
")",
"var_objects",
".",
"append",
"(",
"(",
"var_obj",
",",
"name",
")",
")",
"t",
"=",
"GetValueAsyncThreadDebug",
"(",
"dbg",
",",
"self",
".",
"sequence",
",",
"var_objects",
")",
"t",
".",
"start",
"(",
")",
"except",
":",
"exc",
"=",
"get_exception_traceback_str",
"(",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"'%s\\n'",
"%",
"(",
"exc",
",",
")",
")",
"cmd",
"=",
"dbg",
".",
"cmd_factory",
".",
"make_error_message",
"(",
"self",
".",
"sequence",
",",
"\"Error evaluating variable %s \"",
"%",
"exc",
")",
"dbg",
".",
"writer",
".",
"add_command",
"(",
"cmd",
")"
] | Starts a thread that will load values asynchronously | [
"Starts",
"a",
"thread",
"that",
"will",
"load",
"values",
"asynchronously"
] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_comm.py#L1385-L1407 | 0.004695 |
fhs/pyhdf | pyhdf/SD.py | SD.setfillmode | def setfillmode(self, fill_mode):
"""Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode
"""
if not fill_mode in [SDC.FILL, SDC.NOFILL]:
raise HDF4Error("bad fill mode")
old_mode = _C.SDsetfillmode(self._id, fill_mode)
_checkErr('setfillmode', old_mode, 'cannot execute')
return old_mode | python | def setfillmode(self, fill_mode):
"""Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode
"""
if not fill_mode in [SDC.FILL, SDC.NOFILL]:
raise HDF4Error("bad fill mode")
old_mode = _C.SDsetfillmode(self._id, fill_mode)
_checkErr('setfillmode', old_mode, 'cannot execute')
return old_mode | [
"def",
"setfillmode",
"(",
"self",
",",
"fill_mode",
")",
":",
"if",
"not",
"fill_mode",
"in",
"[",
"SDC",
".",
"FILL",
",",
"SDC",
".",
"NOFILL",
"]",
":",
"raise",
"HDF4Error",
"(",
"\"bad fill mode\"",
")",
"old_mode",
"=",
"_C",
".",
"SDsetfillmode",
"(",
"self",
".",
"_id",
",",
"fill_mode",
")",
"_checkErr",
"(",
"'setfillmode'",
",",
"old_mode",
",",
"'cannot execute'",
")",
"return",
"old_mode"
] | Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode | [
"Set",
"the",
"fill",
"mode",
"for",
"all",
"the",
"datasets",
"in",
"the",
"file",
"."
] | train | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1536-L1558 | 0.003636 |
adamhajari/spyre | spyre/server.py | App.getDownload | def getDownload(self, params):
"""Override this function
arguments: params (dict)
returns: path to file or buffer to be downloaded (string or buffer)
"""
df = self.getData(params)
buffer = io.StringIO()
df.to_csv(buffer, index=False, encoding='utf-8')
filepath = buffer
return filepath | python | def getDownload(self, params):
"""Override this function
arguments: params (dict)
returns: path to file or buffer to be downloaded (string or buffer)
"""
df = self.getData(params)
buffer = io.StringIO()
df.to_csv(buffer, index=False, encoding='utf-8')
filepath = buffer
return filepath | [
"def",
"getDownload",
"(",
"self",
",",
"params",
")",
":",
"df",
"=",
"self",
".",
"getData",
"(",
"params",
")",
"buffer",
"=",
"io",
".",
"StringIO",
"(",
")",
"df",
".",
"to_csv",
"(",
"buffer",
",",
"index",
"=",
"False",
",",
"encoding",
"=",
"'utf-8'",
")",
"filepath",
"=",
"buffer",
"return",
"filepath"
] | Override this function
arguments: params (dict)
returns: path to file or buffer to be downloaded (string or buffer) | [
"Override",
"this",
"function"
] | train | https://github.com/adamhajari/spyre/blob/5dd9f6de072e99af636ab7e7393d249761c56e69/spyre/server.py#L367-L377 | 0.005587 |
mitsei/dlkit | dlkit/json_/assessment/objects.py | AssessmentTaken.get_taker_id | def get_taker_id(self):
"""Gets the ``Id`` of the resource who took or is taking this assessment.
return: (osid.id.Id) - the resource ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
if self._my_map['takerId']:
return Id(self._my_map['takerId'])
else:
return Id(self._my_map['takingAgentId']) | python | def get_taker_id(self):
"""Gets the ``Id`` of the resource who took or is taking this assessment.
return: (osid.id.Id) - the resource ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
if self._my_map['takerId']:
return Id(self._my_map['takerId'])
else:
return Id(self._my_map['takingAgentId']) | [
"def",
"get_taker_id",
"(",
"self",
")",
":",
"if",
"self",
".",
"_my_map",
"[",
"'takerId'",
"]",
":",
"return",
"Id",
"(",
"self",
".",
"_my_map",
"[",
"'takerId'",
"]",
")",
"else",
":",
"return",
"Id",
"(",
"self",
".",
"_my_map",
"[",
"'takingAgentId'",
"]",
")"
] | Gets the ``Id`` of the resource who took or is taking this assessment.
return: (osid.id.Id) - the resource ``Id``
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"Id",
"of",
"the",
"resource",
"who",
"took",
"or",
"is",
"taking",
"this",
"assessment",
"."
] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L2197-L2207 | 0.007732 |
pymc-devs/pymc | pymc/NormalApproximation.py | MAP.fit | def fit(self, method='fmin_powell', iterlim=1000, tol=.0001, verbose=0,
no_callback=False, **kwargs):
"""
N.fit(method='fmin_powell', iterlim=1000, tol=.001):
Causes the normal approximation object to fit itself.
method: May be one of the following, from the scipy.optimize package:
-fmin_l_bfgs_b
-fmin_ncg
-fmin_cg
-fmin_powell
-fmin
no_callback: Boolean indicating whether or not to use a callback
function. If True and a callback keyword is provided in kwargs, then
the user-supplied callback will be used. Otherwise, if False,
and verbose > 0, a default callback will print iteration progress.
The kwargs are passed to the scipy.optimize functions. See there
for more information.
"""
self.tol = tol
self.method = method
self.verbose = verbose
p = zeros(self.len, dtype=float)
for stochastic in self.stochastics:
p[self._slices[stochastic]] = ravel(stochastic.value)
if not self.method == 'newton':
if not scipy_imported:
raise ImportError('Scipy is required to use EM and NormApprox')
default_callback = (verbose > 0 and not no_callback)
if default_callback and 'callback' in kwargs:
raise ValueError("For user-provided callback and verbose output"
" set use_callback to True")
if default_callback:
def callback(p):
try:
print_('Current log-probability : %f' % self.logp)
except ZeroProbability:
print_('Current log-probability : %f' % -Inf)
elif 'callback' in kwargs:
callback = kwargs.pop('callback')
else:
def callback(p):
pass
if self.method == 'fmin_ncg':
p = fmin_ncg(f=self.func,
x0=p,
fprime=self.gradfunc,
fhess=self.hessfunc,
epsilon=self.eps,
maxiter=iterlim,
callback=callback,
avextol=tol,
disp=verbose,
**kwargs)
elif self.method == 'fmin':
p = fmin(func=self.func,
x0=p,
callback=callback,
maxiter=iterlim,
ftol=tol,
disp=verbose, **kwargs)
elif self.method == 'fmin_powell':
p = fmin_powell(func=self.func,
x0=p,
callback=callback,
maxiter=iterlim,
ftol=tol,
disp=verbose, **kwargs)
elif self.method == 'fmin_cg':
p = fmin_cg(f=self.func, x0=p,
fprime=self.gradfunc,
epsilon=self.eps,
callback=callback,
maxiter=iterlim,
gtol=tol,
disp=verbose, **kwargs)
elif self.method == 'fmin_l_bfgs_b':
from scipy import __version__ as sp_version
from distutils.version import LooseVersion
if LooseVersion(sp_version) >= LooseVersion('0.12.0'):
p = fmin_l_bfgs_b(func=self.func,
x0=p,
fprime=self.gradfunc,
epsilon=self.eps,
callback=callback,
pgtol=tol,
iprint=verbose - 1,
**kwargs)[0]
else:
if verbose > 0:
from warnings import warn
warn("Callbacks are not available for fmin_l_bfgs_b in "
"SciPy < 0.12.0. Optimization progress will not be"
"displayed.", UserWarning)
p = fmin_l_bfgs_b(func=self.func,
x0=p,
fprime=self.gradfunc,
epsilon=self.eps,
pgtol=tol,
iprint=verbose - 1,
**kwargs)[0]
else:
raise ValueError('Method unknown.')
self._set_stochastics(p)
self._mu = p
try:
self.logp_at_max = self.logp
except:
raise RuntimeError(
'Posterior probability optimization converged to value with zero probability.')
lnL = sum([x.logp for x in self.observed_stochastics]
) # log-likelihood of observed stochastics
self.lnL = lnL
try:
self.AIC = 2. * (self.len - lnL) # 2k - 2 ln(L)
self.AICc = self.AIC + ((2 * self.len * (self.len + 1)) / float(self.data_len - self.len - 1))
except Exception as e:
print('Cannot calculate AIC:', e)
self.AICc = self.AIC = -Inf
try:
self.BIC = self.len * log(
self.data_len) - 2. * lnL # k ln(n) - 2 ln(L)
except FloatingPointError as e:
print('Cannot calculate BIC:', e)
self.BIC = -Inf
self.fitted = True | python | def fit(self, method='fmin_powell', iterlim=1000, tol=.0001, verbose=0,
no_callback=False, **kwargs):
"""
N.fit(method='fmin_powell', iterlim=1000, tol=.001):
Causes the normal approximation object to fit itself.
method: May be one of the following, from the scipy.optimize package:
-fmin_l_bfgs_b
-fmin_ncg
-fmin_cg
-fmin_powell
-fmin
no_callback: Boolean indicating whether or not to use a callback
function. If True and a callback keyword is provided in kwargs, then
the user-supplied callback will be used. Otherwise, if False,
and verbose > 0, a default callback will print iteration progress.
The kwargs are passed to the scipy.optimize functions. See there
for more information.
"""
self.tol = tol
self.method = method
self.verbose = verbose
p = zeros(self.len, dtype=float)
for stochastic in self.stochastics:
p[self._slices[stochastic]] = ravel(stochastic.value)
if not self.method == 'newton':
if not scipy_imported:
raise ImportError('Scipy is required to use EM and NormApprox')
default_callback = (verbose > 0 and not no_callback)
if default_callback and 'callback' in kwargs:
raise ValueError("For user-provided callback and verbose output"
" set use_callback to True")
if default_callback:
def callback(p):
try:
print_('Current log-probability : %f' % self.logp)
except ZeroProbability:
print_('Current log-probability : %f' % -Inf)
elif 'callback' in kwargs:
callback = kwargs.pop('callback')
else:
def callback(p):
pass
if self.method == 'fmin_ncg':
p = fmin_ncg(f=self.func,
x0=p,
fprime=self.gradfunc,
fhess=self.hessfunc,
epsilon=self.eps,
maxiter=iterlim,
callback=callback,
avextol=tol,
disp=verbose,
**kwargs)
elif self.method == 'fmin':
p = fmin(func=self.func,
x0=p,
callback=callback,
maxiter=iterlim,
ftol=tol,
disp=verbose, **kwargs)
elif self.method == 'fmin_powell':
p = fmin_powell(func=self.func,
x0=p,
callback=callback,
maxiter=iterlim,
ftol=tol,
disp=verbose, **kwargs)
elif self.method == 'fmin_cg':
p = fmin_cg(f=self.func, x0=p,
fprime=self.gradfunc,
epsilon=self.eps,
callback=callback,
maxiter=iterlim,
gtol=tol,
disp=verbose, **kwargs)
elif self.method == 'fmin_l_bfgs_b':
from scipy import __version__ as sp_version
from distutils.version import LooseVersion
if LooseVersion(sp_version) >= LooseVersion('0.12.0'):
p = fmin_l_bfgs_b(func=self.func,
x0=p,
fprime=self.gradfunc,
epsilon=self.eps,
callback=callback,
pgtol=tol,
iprint=verbose - 1,
**kwargs)[0]
else:
if verbose > 0:
from warnings import warn
warn("Callbacks are not available for fmin_l_bfgs_b in "
"SciPy < 0.12.0. Optimization progress will not be"
"displayed.", UserWarning)
p = fmin_l_bfgs_b(func=self.func,
x0=p,
fprime=self.gradfunc,
epsilon=self.eps,
pgtol=tol,
iprint=verbose - 1,
**kwargs)[0]
else:
raise ValueError('Method unknown.')
self._set_stochastics(p)
self._mu = p
try:
self.logp_at_max = self.logp
except:
raise RuntimeError(
'Posterior probability optimization converged to value with zero probability.')
lnL = sum([x.logp for x in self.observed_stochastics]
) # log-likelihood of observed stochastics
self.lnL = lnL
try:
self.AIC = 2. * (self.len - lnL) # 2k - 2 ln(L)
self.AICc = self.AIC + ((2 * self.len * (self.len + 1)) / float(self.data_len - self.len - 1))
except Exception as e:
print('Cannot calculate AIC:', e)
self.AICc = self.AIC = -Inf
try:
self.BIC = self.len * log(
self.data_len) - 2. * lnL # k ln(n) - 2 ln(L)
except FloatingPointError as e:
print('Cannot calculate BIC:', e)
self.BIC = -Inf
self.fitted = True | [
"def",
"fit",
"(",
"self",
",",
"method",
"=",
"'fmin_powell'",
",",
"iterlim",
"=",
"1000",
",",
"tol",
"=",
".0001",
",",
"verbose",
"=",
"0",
",",
"no_callback",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"tol",
"=",
"tol",
"self",
".",
"method",
"=",
"method",
"self",
".",
"verbose",
"=",
"verbose",
"p",
"=",
"zeros",
"(",
"self",
".",
"len",
",",
"dtype",
"=",
"float",
")",
"for",
"stochastic",
"in",
"self",
".",
"stochastics",
":",
"p",
"[",
"self",
".",
"_slices",
"[",
"stochastic",
"]",
"]",
"=",
"ravel",
"(",
"stochastic",
".",
"value",
")",
"if",
"not",
"self",
".",
"method",
"==",
"'newton'",
":",
"if",
"not",
"scipy_imported",
":",
"raise",
"ImportError",
"(",
"'Scipy is required to use EM and NormApprox'",
")",
"default_callback",
"=",
"(",
"verbose",
">",
"0",
"and",
"not",
"no_callback",
")",
"if",
"default_callback",
"and",
"'callback'",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"For user-provided callback and verbose output\"",
"\" set use_callback to True\"",
")",
"if",
"default_callback",
":",
"def",
"callback",
"(",
"p",
")",
":",
"try",
":",
"print_",
"(",
"'Current log-probability : %f'",
"%",
"self",
".",
"logp",
")",
"except",
"ZeroProbability",
":",
"print_",
"(",
"'Current log-probability : %f'",
"%",
"-",
"Inf",
")",
"elif",
"'callback'",
"in",
"kwargs",
":",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
")",
"else",
":",
"def",
"callback",
"(",
"p",
")",
":",
"pass",
"if",
"self",
".",
"method",
"==",
"'fmin_ncg'",
":",
"p",
"=",
"fmin_ncg",
"(",
"f",
"=",
"self",
".",
"func",
",",
"x0",
"=",
"p",
",",
"fprime",
"=",
"self",
".",
"gradfunc",
",",
"fhess",
"=",
"self",
".",
"hessfunc",
",",
"epsilon",
"=",
"self",
".",
"eps",
",",
"maxiter",
"=",
"iterlim",
",",
"callback",
"=",
"callback",
",",
"avextol",
"=",
"tol",
",",
"disp",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"elif",
"self",
".",
"method",
"==",
"'fmin'",
":",
"p",
"=",
"fmin",
"(",
"func",
"=",
"self",
".",
"func",
",",
"x0",
"=",
"p",
",",
"callback",
"=",
"callback",
",",
"maxiter",
"=",
"iterlim",
",",
"ftol",
"=",
"tol",
",",
"disp",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"elif",
"self",
".",
"method",
"==",
"'fmin_powell'",
":",
"p",
"=",
"fmin_powell",
"(",
"func",
"=",
"self",
".",
"func",
",",
"x0",
"=",
"p",
",",
"callback",
"=",
"callback",
",",
"maxiter",
"=",
"iterlim",
",",
"ftol",
"=",
"tol",
",",
"disp",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"elif",
"self",
".",
"method",
"==",
"'fmin_cg'",
":",
"p",
"=",
"fmin_cg",
"(",
"f",
"=",
"self",
".",
"func",
",",
"x0",
"=",
"p",
",",
"fprime",
"=",
"self",
".",
"gradfunc",
",",
"epsilon",
"=",
"self",
".",
"eps",
",",
"callback",
"=",
"callback",
",",
"maxiter",
"=",
"iterlim",
",",
"gtol",
"=",
"tol",
",",
"disp",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"elif",
"self",
".",
"method",
"==",
"'fmin_l_bfgs_b'",
":",
"from",
"scipy",
"import",
"__version__",
"as",
"sp_version",
"from",
"distutils",
".",
"version",
"import",
"LooseVersion",
"if",
"LooseVersion",
"(",
"sp_version",
")",
">=",
"LooseVersion",
"(",
"'0.12.0'",
")",
":",
"p",
"=",
"fmin_l_bfgs_b",
"(",
"func",
"=",
"self",
".",
"func",
",",
"x0",
"=",
"p",
",",
"fprime",
"=",
"self",
".",
"gradfunc",
",",
"epsilon",
"=",
"self",
".",
"eps",
",",
"callback",
"=",
"callback",
",",
"pgtol",
"=",
"tol",
",",
"iprint",
"=",
"verbose",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"else",
":",
"if",
"verbose",
">",
"0",
":",
"from",
"warnings",
"import",
"warn",
"warn",
"(",
"\"Callbacks are not available for fmin_l_bfgs_b in \"",
"\"SciPy < 0.12.0. Optimization progress will not be\"",
"\"displayed.\"",
",",
"UserWarning",
")",
"p",
"=",
"fmin_l_bfgs_b",
"(",
"func",
"=",
"self",
".",
"func",
",",
"x0",
"=",
"p",
",",
"fprime",
"=",
"self",
".",
"gradfunc",
",",
"epsilon",
"=",
"self",
".",
"eps",
",",
"pgtol",
"=",
"tol",
",",
"iprint",
"=",
"verbose",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Method unknown.'",
")",
"self",
".",
"_set_stochastics",
"(",
"p",
")",
"self",
".",
"_mu",
"=",
"p",
"try",
":",
"self",
".",
"logp_at_max",
"=",
"self",
".",
"logp",
"except",
":",
"raise",
"RuntimeError",
"(",
"'Posterior probability optimization converged to value with zero probability.'",
")",
"lnL",
"=",
"sum",
"(",
"[",
"x",
".",
"logp",
"for",
"x",
"in",
"self",
".",
"observed_stochastics",
"]",
")",
"# log-likelihood of observed stochastics",
"self",
".",
"lnL",
"=",
"lnL",
"try",
":",
"self",
".",
"AIC",
"=",
"2.",
"*",
"(",
"self",
".",
"len",
"-",
"lnL",
")",
"# 2k - 2 ln(L)",
"self",
".",
"AICc",
"=",
"self",
".",
"AIC",
"+",
"(",
"(",
"2",
"*",
"self",
".",
"len",
"*",
"(",
"self",
".",
"len",
"+",
"1",
")",
")",
"/",
"float",
"(",
"self",
".",
"data_len",
"-",
"self",
".",
"len",
"-",
"1",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Cannot calculate AIC:'",
",",
"e",
")",
"self",
".",
"AICc",
"=",
"self",
".",
"AIC",
"=",
"-",
"Inf",
"try",
":",
"self",
".",
"BIC",
"=",
"self",
".",
"len",
"*",
"log",
"(",
"self",
".",
"data_len",
")",
"-",
"2.",
"*",
"lnL",
"# k ln(n) - 2 ln(L)",
"except",
"FloatingPointError",
"as",
"e",
":",
"print",
"(",
"'Cannot calculate BIC:'",
",",
"e",
")",
"self",
".",
"BIC",
"=",
"-",
"Inf",
"self",
".",
"fitted",
"=",
"True"
] | N.fit(method='fmin_powell', iterlim=1000, tol=.001):
Causes the normal approximation object to fit itself.
method: May be one of the following, from the scipy.optimize package:
-fmin_l_bfgs_b
-fmin_ncg
-fmin_cg
-fmin_powell
-fmin
no_callback: Boolean indicating whether or not to use a callback
function. If True and a callback keyword is provided in kwargs, then
the user-supplied callback will be used. Otherwise, if False,
and verbose > 0, a default callback will print iteration progress.
The kwargs are passed to the scipy.optimize functions. See there
for more information. | [
"N",
".",
"fit",
"(",
"method",
"=",
"fmin_powell",
"iterlim",
"=",
"1000",
"tol",
"=",
".",
"001",
")",
":"
] | train | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L243-L385 | 0.001087 |
numenta/htmresearch | htmresearch/frameworks/location/path_integration_union_narrowing.py | PIUNCorticalColumn.reset | def reset(self):
"""
Clear all cell activity.
"""
self.L4.reset()
for module in self.L6aModules:
module.reset() | python | def reset(self):
"""
Clear all cell activity.
"""
self.L4.reset()
for module in self.L6aModules:
module.reset() | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"L4",
".",
"reset",
"(",
")",
"for",
"module",
"in",
"self",
".",
"L6aModules",
":",
"module",
".",
"reset",
"(",
")"
] | Clear all cell activity. | [
"Clear",
"all",
"cell",
"activity",
"."
] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/location/path_integration_union_narrowing.py#L255-L261 | 0.014599 |
Azure/azure-event-hubs-python | azure/eventhub/common.py | EventData.enqueued_time | def enqueued_time(self):
"""
The enqueued timestamp of the event data object.
:rtype: datetime.datetime
"""
timestamp = self._annotations.get(EventData.PROP_TIMESTAMP, None)
if timestamp:
return datetime.datetime.utcfromtimestamp(float(timestamp)/1000)
return None | python | def enqueued_time(self):
"""
The enqueued timestamp of the event data object.
:rtype: datetime.datetime
"""
timestamp = self._annotations.get(EventData.PROP_TIMESTAMP, None)
if timestamp:
return datetime.datetime.utcfromtimestamp(float(timestamp)/1000)
return None | [
"def",
"enqueued_time",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_annotations",
".",
"get",
"(",
"EventData",
".",
"PROP_TIMESTAMP",
",",
"None",
")",
"if",
"timestamp",
":",
"return",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"float",
"(",
"timestamp",
")",
"/",
"1000",
")",
"return",
"None"
] | The enqueued timestamp of the event data object.
:rtype: datetime.datetime | [
"The",
"enqueued",
"timestamp",
"of",
"the",
"event",
"data",
"object",
"."
] | train | https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventhub/common.py#L135-L144 | 0.006006 |
cloudsmith-io/cloudsmith-cli | cloudsmith_cli/core/api/init.py | get_api_client | def get_api_client(cls):
"""Get an API client (with configuration)."""
config = cloudsmith_api.Configuration()
client = cls()
client.config = config
client.api_client.rest_client = RestClient()
user_agent = getattr(config, "user_agent", None)
if user_agent:
client.api_client.user_agent = user_agent
headers = getattr(config, "headers", None)
if headers:
for k, v in six.iteritems(headers):
client.api_client.set_default_header(k, v)
return client | python | def get_api_client(cls):
"""Get an API client (with configuration)."""
config = cloudsmith_api.Configuration()
client = cls()
client.config = config
client.api_client.rest_client = RestClient()
user_agent = getattr(config, "user_agent", None)
if user_agent:
client.api_client.user_agent = user_agent
headers = getattr(config, "headers", None)
if headers:
for k, v in six.iteritems(headers):
client.api_client.set_default_header(k, v)
return client | [
"def",
"get_api_client",
"(",
"cls",
")",
":",
"config",
"=",
"cloudsmith_api",
".",
"Configuration",
"(",
")",
"client",
"=",
"cls",
"(",
")",
"client",
".",
"config",
"=",
"config",
"client",
".",
"api_client",
".",
"rest_client",
"=",
"RestClient",
"(",
")",
"user_agent",
"=",
"getattr",
"(",
"config",
",",
"\"user_agent\"",
",",
"None",
")",
"if",
"user_agent",
":",
"client",
".",
"api_client",
".",
"user_agent",
"=",
"user_agent",
"headers",
"=",
"getattr",
"(",
"config",
",",
"\"headers\"",
",",
"None",
")",
"if",
"headers",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"headers",
")",
":",
"client",
".",
"api_client",
".",
"set_default_header",
"(",
"k",
",",
"v",
")",
"return",
"client"
] | Get an API client (with configuration). | [
"Get",
"an",
"API",
"client",
"(",
"with",
"configuration",
")",
"."
] | train | https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/core/api/init.py#L50-L66 | 0.001931 |
cbclab/MOT | mot/optimize/__init__.py | minimize | def minimize(func, x0, data=None, method=None, lower_bounds=None, upper_bounds=None, constraints_func=None,
nmr_observations=None, cl_runtime_info=None, options=None):
"""Minimization of one or more variables.
For an easy wrapper of function maximization, see :func:`maximize`.
All boundary conditions are enforced using the penalty method. That is, we optimize the objective function:
.. math::
F(x) = f(x) \mu \sum \max(0, g_i(x))^2
where :math:`F(x)` is the new objective function, :math:`f(x)` is the old objective function, :math:`g_i` are
the boundary functions defined as :math:`g_i(x) \leq 0` and :math:`\mu` is the penalty weight.
The penalty weight is by default :math:`\mu = 1e20` and can be set
using the ``options`` dictionary as ``penalty_weight``.
Args:
func (mot.lib.cl_function.CLFunction): A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* objective_list);
The objective list needs to be filled when the provided pointer is not null. It should contain
the function values for each observation. This list is used by non-linear least-squares routines,
and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine.
x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p'
independent variables.
data (mot.lib.kernel_data.KernelData): the kernel data we will load. This is returned to the likelihood function
as the ``void* data`` pointer.
method (str): Type of solver. Should be one of:
- 'Levenberg-Marquardt'
- 'Nelder-Mead'
- 'Powell'
- 'Subplex'
If not given, defaults to 'Powell'.
lower_bounds (tuple): per parameter a lower bound, if given, the optimizer ensures ``a <= x`` with
a the lower bound and x the parameter. If not given, -infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
upper_bounds (tuple): per parameter an upper bound, if given, the optimizer ensures ``x >= b`` with
b the upper bound and x the parameter. If not given, +infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraints);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraints[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
nmr_observations (int): the number of observations returned by the optimization function.
This is only needed for the ``Levenberg-Marquardt`` method.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the CL runtime information
options (dict): A dictionary of solver options. All methods accept the following generic options:
- patience (int): Maximum number of iterations to perform.
- penalty_weight (float): the weight of the penalty term for the boundary conditions
Returns:
mot.optimize.base.OptimizeResults:
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array.
"""
if not method:
method = 'Powell'
cl_runtime_info = cl_runtime_info or CLRuntimeInfo()
if len(x0.shape) < 2:
x0 = x0[..., None]
lower_bounds = _bounds_to_array(lower_bounds or np.ones(x0.shape[1]) * -np.inf)
upper_bounds = _bounds_to_array(upper_bounds or np.ones(x0.shape[1]) * np.inf)
if method == 'Powell':
return _minimize_powell(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Nelder-Mead':
return _minimize_nmsimplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Levenberg-Marquardt':
return _minimize_levenberg_marquardt(func, x0, nmr_observations, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Subplex':
return _minimize_subplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
raise ValueError('Could not find the specified method "{}".'.format(method)) | python | def minimize(func, x0, data=None, method=None, lower_bounds=None, upper_bounds=None, constraints_func=None,
nmr_observations=None, cl_runtime_info=None, options=None):
"""Minimization of one or more variables.
For an easy wrapper of function maximization, see :func:`maximize`.
All boundary conditions are enforced using the penalty method. That is, we optimize the objective function:
.. math::
F(x) = f(x) \mu \sum \max(0, g_i(x))^2
where :math:`F(x)` is the new objective function, :math:`f(x)` is the old objective function, :math:`g_i` are
the boundary functions defined as :math:`g_i(x) \leq 0` and :math:`\mu` is the penalty weight.
The penalty weight is by default :math:`\mu = 1e20` and can be set
using the ``options`` dictionary as ``penalty_weight``.
Args:
func (mot.lib.cl_function.CLFunction): A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* objective_list);
The objective list needs to be filled when the provided pointer is not null. It should contain
the function values for each observation. This list is used by non-linear least-squares routines,
and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine.
x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p'
independent variables.
data (mot.lib.kernel_data.KernelData): the kernel data we will load. This is returned to the likelihood function
as the ``void* data`` pointer.
method (str): Type of solver. Should be one of:
- 'Levenberg-Marquardt'
- 'Nelder-Mead'
- 'Powell'
- 'Subplex'
If not given, defaults to 'Powell'.
lower_bounds (tuple): per parameter a lower bound, if given, the optimizer ensures ``a <= x`` with
a the lower bound and x the parameter. If not given, -infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
upper_bounds (tuple): per parameter an upper bound, if given, the optimizer ensures ``x >= b`` with
b the upper bound and x the parameter. If not given, +infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraints);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraints[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
nmr_observations (int): the number of observations returned by the optimization function.
This is only needed for the ``Levenberg-Marquardt`` method.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the CL runtime information
options (dict): A dictionary of solver options. All methods accept the following generic options:
- patience (int): Maximum number of iterations to perform.
- penalty_weight (float): the weight of the penalty term for the boundary conditions
Returns:
mot.optimize.base.OptimizeResults:
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array.
"""
if not method:
method = 'Powell'
cl_runtime_info = cl_runtime_info or CLRuntimeInfo()
if len(x0.shape) < 2:
x0 = x0[..., None]
lower_bounds = _bounds_to_array(lower_bounds or np.ones(x0.shape[1]) * -np.inf)
upper_bounds = _bounds_to_array(upper_bounds or np.ones(x0.shape[1]) * np.inf)
if method == 'Powell':
return _minimize_powell(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Nelder-Mead':
return _minimize_nmsimplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Levenberg-Marquardt':
return _minimize_levenberg_marquardt(func, x0, nmr_observations, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Subplex':
return _minimize_subplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
raise ValueError('Could not find the specified method "{}".'.format(method)) | [
"def",
"minimize",
"(",
"func",
",",
"x0",
",",
"data",
"=",
"None",
",",
"method",
"=",
"None",
",",
"lower_bounds",
"=",
"None",
",",
"upper_bounds",
"=",
"None",
",",
"constraints_func",
"=",
"None",
",",
"nmr_observations",
"=",
"None",
",",
"cl_runtime_info",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"if",
"not",
"method",
":",
"method",
"=",
"'Powell'",
"cl_runtime_info",
"=",
"cl_runtime_info",
"or",
"CLRuntimeInfo",
"(",
")",
"if",
"len",
"(",
"x0",
".",
"shape",
")",
"<",
"2",
":",
"x0",
"=",
"x0",
"[",
"...",
",",
"None",
"]",
"lower_bounds",
"=",
"_bounds_to_array",
"(",
"lower_bounds",
"or",
"np",
".",
"ones",
"(",
"x0",
".",
"shape",
"[",
"1",
"]",
")",
"*",
"-",
"np",
".",
"inf",
")",
"upper_bounds",
"=",
"_bounds_to_array",
"(",
"upper_bounds",
"or",
"np",
".",
"ones",
"(",
"x0",
".",
"shape",
"[",
"1",
"]",
")",
"*",
"np",
".",
"inf",
")",
"if",
"method",
"==",
"'Powell'",
":",
"return",
"_minimize_powell",
"(",
"func",
",",
"x0",
",",
"cl_runtime_info",
",",
"lower_bounds",
",",
"upper_bounds",
",",
"constraints_func",
"=",
"constraints_func",
",",
"data",
"=",
"data",
",",
"options",
"=",
"options",
")",
"elif",
"method",
"==",
"'Nelder-Mead'",
":",
"return",
"_minimize_nmsimplex",
"(",
"func",
",",
"x0",
",",
"cl_runtime_info",
",",
"lower_bounds",
",",
"upper_bounds",
",",
"constraints_func",
"=",
"constraints_func",
",",
"data",
"=",
"data",
",",
"options",
"=",
"options",
")",
"elif",
"method",
"==",
"'Levenberg-Marquardt'",
":",
"return",
"_minimize_levenberg_marquardt",
"(",
"func",
",",
"x0",
",",
"nmr_observations",
",",
"cl_runtime_info",
",",
"lower_bounds",
",",
"upper_bounds",
",",
"constraints_func",
"=",
"constraints_func",
",",
"data",
"=",
"data",
",",
"options",
"=",
"options",
")",
"elif",
"method",
"==",
"'Subplex'",
":",
"return",
"_minimize_subplex",
"(",
"func",
",",
"x0",
",",
"cl_runtime_info",
",",
"lower_bounds",
",",
"upper_bounds",
",",
"constraints_func",
"=",
"constraints_func",
",",
"data",
"=",
"data",
",",
"options",
"=",
"options",
")",
"raise",
"ValueError",
"(",
"'Could not find the specified method \"{}\".'",
".",
"format",
"(",
"method",
")",
")"
] | Minimization of one or more variables.
For an easy wrapper of function maximization, see :func:`maximize`.
All boundary conditions are enforced using the penalty method. That is, we optimize the objective function:
.. math::
F(x) = f(x) \mu \sum \max(0, g_i(x))^2
where :math:`F(x)` is the new objective function, :math:`f(x)` is the old objective function, :math:`g_i` are
the boundary functions defined as :math:`g_i(x) \leq 0` and :math:`\mu` is the penalty weight.
The penalty weight is by default :math:`\mu = 1e20` and can be set
using the ``options`` dictionary as ``penalty_weight``.
Args:
func (mot.lib.cl_function.CLFunction): A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* objective_list);
The objective list needs to be filled when the provided pointer is not null. It should contain
the function values for each observation. This list is used by non-linear least-squares routines,
and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine.
x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p'
independent variables.
data (mot.lib.kernel_data.KernelData): the kernel data we will load. This is returned to the likelihood function
as the ``void* data`` pointer.
method (str): Type of solver. Should be one of:
- 'Levenberg-Marquardt'
- 'Nelder-Mead'
- 'Powell'
- 'Subplex'
If not given, defaults to 'Powell'.
lower_bounds (tuple): per parameter a lower bound, if given, the optimizer ensures ``a <= x`` with
a the lower bound and x the parameter. If not given, -infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
upper_bounds (tuple): per parameter an upper bound, if given, the optimizer ensures ``x >= b`` with
b the upper bound and x the parameter. If not given, +infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraints);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraints[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
nmr_observations (int): the number of observations returned by the optimization function.
This is only needed for the ``Levenberg-Marquardt`` method.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the CL runtime information
options (dict): A dictionary of solver options. All methods accept the following generic options:
- patience (int): Maximum number of iterations to perform.
- penalty_weight (float): the weight of the penalty term for the boundary conditions
Returns:
mot.optimize.base.OptimizeResults:
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array. | [
"Minimization",
"of",
"one",
"or",
"more",
"variables",
"."
] | train | https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/optimize/__init__.py#L16-L119 | 0.007291 |
raiden-network/raiden | raiden/network/proxies/token_network.py | TokenNetwork.can_transfer | def can_transfer(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
""" Returns True if the channel is opened and the node has deposit in
it.
Note: Having a deposit does not imply having a balance for off-chain
transfers. """
opened = self.channel_is_opened(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
if opened is False:
return False
deposit = self._detail_participant(
channel_identifier=channel_identifier,
participant=participant1,
partner=participant2,
block_identifier=block_identifier,
).deposit
return deposit > 0 | python | def can_transfer(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID,
) -> bool:
""" Returns True if the channel is opened and the node has deposit in
it.
Note: Having a deposit does not imply having a balance for off-chain
transfers. """
opened = self.channel_is_opened(
participant1=participant1,
participant2=participant2,
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
if opened is False:
return False
deposit = self._detail_participant(
channel_identifier=channel_identifier,
participant=participant1,
partner=participant2,
block_identifier=block_identifier,
).deposit
return deposit > 0 | [
"def",
"can_transfer",
"(",
"self",
",",
"participant1",
":",
"Address",
",",
"participant2",
":",
"Address",
",",
"block_identifier",
":",
"BlockSpecification",
",",
"channel_identifier",
":",
"ChannelID",
",",
")",
"->",
"bool",
":",
"opened",
"=",
"self",
".",
"channel_is_opened",
"(",
"participant1",
"=",
"participant1",
",",
"participant2",
"=",
"participant2",
",",
"block_identifier",
"=",
"block_identifier",
",",
"channel_identifier",
"=",
"channel_identifier",
",",
")",
"if",
"opened",
"is",
"False",
":",
"return",
"False",
"deposit",
"=",
"self",
".",
"_detail_participant",
"(",
"channel_identifier",
"=",
"channel_identifier",
",",
"participant",
"=",
"participant1",
",",
"partner",
"=",
"participant2",
",",
"block_identifier",
"=",
"block_identifier",
",",
")",
".",
"deposit",
"return",
"deposit",
">",
"0"
] | Returns True if the channel is opened and the node has deposit in
it.
Note: Having a deposit does not imply having a balance for off-chain
transfers. | [
"Returns",
"True",
"if",
"the",
"channel",
"is",
"opened",
"and",
"the",
"node",
"has",
"deposit",
"in",
"it",
"."
] | train | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/token_network.py#L599-L627 | 0.003175 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine.tagger | def tagger(self):
"""
A property to link into IntentEngine's intent_parsers.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains intent_parsers from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].tagger | python | def tagger(self):
"""
A property to link into IntentEngine's intent_parsers.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains intent_parsers from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].tagger | [
"def",
"tagger",
"(",
"self",
")",
":",
"domain",
"=",
"0",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"return",
"self",
".",
"domains",
"[",
"domain",
"]",
".",
"tagger"
] | A property to link into IntentEngine's intent_parsers.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains intent_parsers from its IntentEngine | [
"A",
"property",
"to",
"link",
"into",
"IntentEngine",
"s",
"intent_parsers",
"."
] | train | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L228-L240 | 0.006787 |
nicolargo/glances | glances/plugins/glances_sensors.py | Plugin.msg_curse | def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist and display plugin enable...
if not self.stats or self.is_disable():
return ret
# Max size for the interface name
name_max_width = max_width - 12
# Header
msg = '{:{width}}'.format('SENSORS', width=name_max_width)
ret.append(self.curse_add_line(msg, "TITLE"))
# Stats
for i in self.stats:
# Do not display anything if no battery are detected
if i['type'] == 'battery' and i['value'] == []:
continue
# New line
ret.append(self.curse_new_line())
msg = '{:{width}}'.format(i["label"][:name_max_width],
width=name_max_width)
ret.append(self.curse_add_line(msg))
if i['value'] in (b'ERR', b'SLP', b'UNK', b'NOS'):
msg = '{:>13}'.format(i['value'])
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key='value',
option='decoration')))
else:
if (args.fahrenheit and i['type'] != 'battery' and
i['type'] != 'fan_speed'):
value = to_fahrenheit(i['value'])
unit = 'F'
else:
value = i['value']
unit = i['unit']
try:
msg = '{:>13.0f}{}'.format(value, unit)
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key='value',
option='decoration')))
except (TypeError, ValueError):
pass
return ret | python | def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist and display plugin enable...
if not self.stats or self.is_disable():
return ret
# Max size for the interface name
name_max_width = max_width - 12
# Header
msg = '{:{width}}'.format('SENSORS', width=name_max_width)
ret.append(self.curse_add_line(msg, "TITLE"))
# Stats
for i in self.stats:
# Do not display anything if no battery are detected
if i['type'] == 'battery' and i['value'] == []:
continue
# New line
ret.append(self.curse_new_line())
msg = '{:{width}}'.format(i["label"][:name_max_width],
width=name_max_width)
ret.append(self.curse_add_line(msg))
if i['value'] in (b'ERR', b'SLP', b'UNK', b'NOS'):
msg = '{:>13}'.format(i['value'])
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key='value',
option='decoration')))
else:
if (args.fahrenheit and i['type'] != 'battery' and
i['type'] != 'fan_speed'):
value = to_fahrenheit(i['value'])
unit = 'F'
else:
value = i['value']
unit = i['unit']
try:
msg = '{:>13.0f}{}'.format(value, unit)
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key='value',
option='decoration')))
except (TypeError, ValueError):
pass
return ret | [
"def",
"msg_curse",
"(",
"self",
",",
"args",
"=",
"None",
",",
"max_width",
"=",
"None",
")",
":",
"# Init the return message",
"ret",
"=",
"[",
"]",
"# Only process if stats exist and display plugin enable...",
"if",
"not",
"self",
".",
"stats",
"or",
"self",
".",
"is_disable",
"(",
")",
":",
"return",
"ret",
"# Max size for the interface name",
"name_max_width",
"=",
"max_width",
"-",
"12",
"# Header",
"msg",
"=",
"'{:{width}}'",
".",
"format",
"(",
"'SENSORS'",
",",
"width",
"=",
"name_max_width",
")",
"ret",
".",
"append",
"(",
"self",
".",
"curse_add_line",
"(",
"msg",
",",
"\"TITLE\"",
")",
")",
"# Stats",
"for",
"i",
"in",
"self",
".",
"stats",
":",
"# Do not display anything if no battery are detected",
"if",
"i",
"[",
"'type'",
"]",
"==",
"'battery'",
"and",
"i",
"[",
"'value'",
"]",
"==",
"[",
"]",
":",
"continue",
"# New line",
"ret",
".",
"append",
"(",
"self",
".",
"curse_new_line",
"(",
")",
")",
"msg",
"=",
"'{:{width}}'",
".",
"format",
"(",
"i",
"[",
"\"label\"",
"]",
"[",
":",
"name_max_width",
"]",
",",
"width",
"=",
"name_max_width",
")",
"ret",
".",
"append",
"(",
"self",
".",
"curse_add_line",
"(",
"msg",
")",
")",
"if",
"i",
"[",
"'value'",
"]",
"in",
"(",
"b'ERR'",
",",
"b'SLP'",
",",
"b'UNK'",
",",
"b'NOS'",
")",
":",
"msg",
"=",
"'{:>13}'",
".",
"format",
"(",
"i",
"[",
"'value'",
"]",
")",
"ret",
".",
"append",
"(",
"self",
".",
"curse_add_line",
"(",
"msg",
",",
"self",
".",
"get_views",
"(",
"item",
"=",
"i",
"[",
"self",
".",
"get_key",
"(",
")",
"]",
",",
"key",
"=",
"'value'",
",",
"option",
"=",
"'decoration'",
")",
")",
")",
"else",
":",
"if",
"(",
"args",
".",
"fahrenheit",
"and",
"i",
"[",
"'type'",
"]",
"!=",
"'battery'",
"and",
"i",
"[",
"'type'",
"]",
"!=",
"'fan_speed'",
")",
":",
"value",
"=",
"to_fahrenheit",
"(",
"i",
"[",
"'value'",
"]",
")",
"unit",
"=",
"'F'",
"else",
":",
"value",
"=",
"i",
"[",
"'value'",
"]",
"unit",
"=",
"i",
"[",
"'unit'",
"]",
"try",
":",
"msg",
"=",
"'{:>13.0f}{}'",
".",
"format",
"(",
"value",
",",
"unit",
")",
"ret",
".",
"append",
"(",
"self",
".",
"curse_add_line",
"(",
"msg",
",",
"self",
".",
"get_views",
"(",
"item",
"=",
"i",
"[",
"self",
".",
"get_key",
"(",
")",
"]",
",",
"key",
"=",
"'value'",
",",
"option",
"=",
"'decoration'",
")",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"return",
"ret"
] | Return the dict to display in the curse interface. | [
"Return",
"the",
"dict",
"to",
"display",
"in",
"the",
"curse",
"interface",
"."
] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_sensors.py#L168-L217 | 0.000979 |
GPflow/GPflow | gpflow/training/monitor.py | GrowingIntervalCondition._growing_step_sequence | def _growing_step_sequence(interval_growth, max_interval, init_interval, start_level=None):
"""
Returns an iterator that constructs a sequence of trigger levels with growing intervals.
The interval is growing exponentially until it reaches the maximum value. Then the interval
stays the same and the sequence becomes linear.
An optional starting level `start_level` defaults to the initial interval. The interval
starts out as `init_interval`, multiplied by `interval_growth` in each step until it
reaches the `max_interval`.
"""
interval = init_interval
next_level = start_level or init_interval
while True:
yield next_level
interval = min(interval * interval_growth, max_interval)
next_level += interval | python | def _growing_step_sequence(interval_growth, max_interval, init_interval, start_level=None):
"""
Returns an iterator that constructs a sequence of trigger levels with growing intervals.
The interval is growing exponentially until it reaches the maximum value. Then the interval
stays the same and the sequence becomes linear.
An optional starting level `start_level` defaults to the initial interval. The interval
starts out as `init_interval`, multiplied by `interval_growth` in each step until it
reaches the `max_interval`.
"""
interval = init_interval
next_level = start_level or init_interval
while True:
yield next_level
interval = min(interval * interval_growth, max_interval)
next_level += interval | [
"def",
"_growing_step_sequence",
"(",
"interval_growth",
",",
"max_interval",
",",
"init_interval",
",",
"start_level",
"=",
"None",
")",
":",
"interval",
"=",
"init_interval",
"next_level",
"=",
"start_level",
"or",
"init_interval",
"while",
"True",
":",
"yield",
"next_level",
"interval",
"=",
"min",
"(",
"interval",
"*",
"interval_growth",
",",
"max_interval",
")",
"next_level",
"+=",
"interval"
] | Returns an iterator that constructs a sequence of trigger levels with growing intervals.
The interval is growing exponentially until it reaches the maximum value. Then the interval
stays the same and the sequence becomes linear.
An optional starting level `start_level` defaults to the initial interval. The interval
starts out as `init_interval`, multiplied by `interval_growth` in each step until it
reaches the `max_interval`. | [
"Returns",
"an",
"iterator",
"that",
"constructs",
"a",
"sequence",
"of",
"trigger",
"levels",
"with",
"growing",
"intervals",
".",
"The",
"interval",
"is",
"growing",
"exponentially",
"until",
"it",
"reaches",
"the",
"maximum",
"value",
".",
"Then",
"the",
"interval",
"stays",
"the",
"same",
"and",
"the",
"sequence",
"becomes",
"linear",
"."
] | train | https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/monitor.py#L550-L565 | 0.008434 |
saltstack/salt | salt/cloud/clouds/qingcloud.py | avail_images | def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result | python | def avail_images(kwargs=None, call=None):
'''
Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
params = {
'action': 'DescribeImages',
'provider': 'system',
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result | [
"def",
"avail_images",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_images function must be called with '",
"'-f or --function, or with the --list-images option'",
")",
"if",
"not",
"isinstance",
"(",
"kwargs",
",",
"dict",
")",
":",
"kwargs",
"=",
"{",
"}",
"params",
"=",
"{",
"'action'",
":",
"'DescribeImages'",
",",
"'provider'",
":",
"'system'",
",",
"'zone'",
":",
"_get_specified_zone",
"(",
"kwargs",
",",
"get_configured_provider",
"(",
")",
")",
",",
"}",
"items",
"=",
"query",
"(",
"params",
"=",
"params",
")",
"result",
"=",
"{",
"}",
"for",
"image",
"in",
"items",
"[",
"'image_set'",
"]",
":",
"result",
"[",
"image",
"[",
"'image_id'",
"]",
"]",
"=",
"{",
"}",
"for",
"key",
"in",
"image",
":",
"result",
"[",
"image",
"[",
"'image_id'",
"]",
"]",
"[",
"key",
"]",
"=",
"image",
"[",
"key",
"]",
"return",
"result"
] | Return a list of the images that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud --list-images my-qingcloud
salt-cloud -f avail_images my-qingcloud zone=gd1 | [
"Return",
"a",
"list",
"of",
"the",
"images",
"that",
"are",
"on",
"the",
"provider",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L271-L304 | 0.001105 |
se-esss-litterbox/Pynac | Pynac/Elements.py | Steerer.scaleField | def scaleField(self, scalingFactor):
"""
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
"""
self.field_strength = self.field_strength._replace(
val=self.field_strength.val * scalingFactor
) | python | def scaleField(self, scalingFactor):
"""
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
"""
self.field_strength = self.field_strength._replace(
val=self.field_strength.val * scalingFactor
) | [
"def",
"scaleField",
"(",
"self",
",",
"scalingFactor",
")",
":",
"self",
".",
"field_strength",
"=",
"self",
".",
"field_strength",
".",
"_replace",
"(",
"val",
"=",
"self",
".",
"field_strength",
".",
"val",
"*",
"scalingFactor",
")"
] | Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field. | [
"Adjust",
"the",
"field",
"of",
"the",
"magnet",
"by",
"the",
"value",
"of",
"scalingFactor",
".",
"The",
"adjustment",
"is",
"multiplicative",
"so",
"a",
"value",
"of",
"scalingFactor",
"=",
"1",
".",
"0",
"will",
"result",
"in",
"no",
"change",
"of",
"the",
"field",
"."
] | train | https://github.com/se-esss-litterbox/Pynac/blob/97e20aa85d20112cd114faa54a8197c5d0f61209/Pynac/Elements.py#L408-L416 | 0.010309 |
sprockets/sprockets-dynamodb | sprockets_dynamodb/client.py | Client.execute | def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result)) | python | def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result)) | [
"def",
"execute",
"(",
"self",
",",
"action",
",",
"parameters",
")",
":",
"measurements",
"=",
"collections",
".",
"deque",
"(",
"[",
"]",
",",
"self",
".",
"_max_retries",
")",
"for",
"attempt",
"in",
"range",
"(",
"1",
",",
"self",
".",
"_max_retries",
"+",
"1",
")",
":",
"try",
":",
"result",
"=",
"yield",
"self",
".",
"_execute",
"(",
"action",
",",
"parameters",
",",
"attempt",
",",
"measurements",
")",
"except",
"(",
"exceptions",
".",
"InternalServerError",
",",
"exceptions",
".",
"RequestException",
",",
"exceptions",
".",
"ThrottlingException",
",",
"exceptions",
".",
"ThroughputExceeded",
",",
"exceptions",
".",
"ServiceUnavailable",
")",
"as",
"error",
":",
"if",
"attempt",
"==",
"self",
".",
"_max_retries",
":",
"if",
"self",
".",
"_instrumentation_callback",
":",
"self",
".",
"_instrumentation_callback",
"(",
"measurements",
")",
"self",
".",
"_on_exception",
"(",
"error",
")",
"duration",
"=",
"self",
".",
"_sleep_duration",
"(",
"attempt",
")",
"self",
".",
"logger",
".",
"warning",
"(",
"'%r on attempt %i, sleeping %.2f seconds'",
",",
"error",
",",
"attempt",
",",
"duration",
")",
"yield",
"gen",
".",
"sleep",
"(",
"duration",
")",
"except",
"exceptions",
".",
"DynamoDBException",
"as",
"error",
":",
"if",
"self",
".",
"_instrumentation_callback",
":",
"self",
".",
"_instrumentation_callback",
"(",
"measurements",
")",
"self",
".",
"_on_exception",
"(",
"error",
")",
"else",
":",
"if",
"self",
".",
"_instrumentation_callback",
":",
"self",
".",
"_instrumentation_callback",
"(",
"measurements",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'%s result: %r'",
",",
"action",
",",
"result",
")",
"raise",
"gen",
".",
"Return",
"(",
"_unwrap_result",
"(",
"action",
",",
"result",
")",
")"
] | Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException` | [
"Execute",
"a",
"DynamoDB",
"action",
"with",
"the",
"given",
"parameters",
".",
"The",
"method",
"will",
"retry",
"requests",
"that",
"failed",
"due",
"to",
"OS",
"level",
"errors",
"or",
"when",
"being",
"throttled",
"by",
"DynamoDB",
"."
] | train | https://github.com/sprockets/sprockets-dynamodb/blob/2e202bcb01f23f828f91299599311007054de4aa/sprockets_dynamodb/client.py#L729-L790 | 0.000614 |
bukun/TorCMS | torcms/model/post_model.py | MPost.query_most | def query_most(num=8, kind='1'):
'''
Query most viewed.
'''
return TabPost.select().where(
(TabPost.kind == kind) &
(TabPost.valid == 1)
).order_by(
TabPost.view_count.desc()
).limit(num) | python | def query_most(num=8, kind='1'):
'''
Query most viewed.
'''
return TabPost.select().where(
(TabPost.kind == kind) &
(TabPost.valid == 1)
).order_by(
TabPost.view_count.desc()
).limit(num) | [
"def",
"query_most",
"(",
"num",
"=",
"8",
",",
"kind",
"=",
"'1'",
")",
":",
"return",
"TabPost",
".",
"select",
"(",
")",
".",
"where",
"(",
"(",
"TabPost",
".",
"kind",
"==",
"kind",
")",
"&",
"(",
"TabPost",
".",
"valid",
"==",
"1",
")",
")",
".",
"order_by",
"(",
"TabPost",
".",
"view_count",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"num",
")"
] | Query most viewed. | [
"Query",
"most",
"viewed",
"."
] | train | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_model.py#L487-L496 | 0.00738 |
Robpol86/colorclass | colorclass/core.py | ColorStr.count | def count(self, sub, start=0, end=-1):
"""Return the number of non-overlapping occurrences of substring sub in string[start:end].
Optional arguments start and end are interpreted as in slice notation.
:param str sub: Substring to search.
:param int start: Beginning position.
:param int end: Stop comparison at this position.
"""
return self.value_no_colors.count(sub, start, end) | python | def count(self, sub, start=0, end=-1):
"""Return the number of non-overlapping occurrences of substring sub in string[start:end].
Optional arguments start and end are interpreted as in slice notation.
:param str sub: Substring to search.
:param int start: Beginning position.
:param int end: Stop comparison at this position.
"""
return self.value_no_colors.count(sub, start, end) | [
"def",
"count",
"(",
"self",
",",
"sub",
",",
"start",
"=",
"0",
",",
"end",
"=",
"-",
"1",
")",
":",
"return",
"self",
".",
"value_no_colors",
".",
"count",
"(",
"sub",
",",
"start",
",",
"end",
")"
] | Return the number of non-overlapping occurrences of substring sub in string[start:end].
Optional arguments start and end are interpreted as in slice notation.
:param str sub: Substring to search.
:param int start: Beginning position.
:param int end: Stop comparison at this position. | [
"Return",
"the",
"number",
"of",
"non",
"-",
"overlapping",
"occurrences",
"of",
"substring",
"sub",
"in",
"string",
"[",
"start",
":",
"end",
"]",
"."
] | train | https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/core.py#L123-L132 | 0.006849 |
twneale/visitors | visitors/ext/etree.py | from_etree | def from_etree(
el, node=None, node_cls=None,
tagsub=functools.partial(re.sub, r'\{.+?\}', ''),
Node=Node):
'''Convert the element tree to a tater tree.
'''
node_cls = node_cls or Node
if node is None:
node = node_cls()
tag = tagsub(el.tag)
attrib = dict((tagsub(k), v) for (k, v) in el.attrib.items())
node.update(attrib, tag=tag)
if el.text:
node['text'] = el.text
for child in el:
child = from_etree(child, node_cls=node_cls)
node.append(child)
if el.tail:
node['tail'] = el.tail
return node | python | def from_etree(
el, node=None, node_cls=None,
tagsub=functools.partial(re.sub, r'\{.+?\}', ''),
Node=Node):
'''Convert the element tree to a tater tree.
'''
node_cls = node_cls or Node
if node is None:
node = node_cls()
tag = tagsub(el.tag)
attrib = dict((tagsub(k), v) for (k, v) in el.attrib.items())
node.update(attrib, tag=tag)
if el.text:
node['text'] = el.text
for child in el:
child = from_etree(child, node_cls=node_cls)
node.append(child)
if el.tail:
node['tail'] = el.tail
return node | [
"def",
"from_etree",
"(",
"el",
",",
"node",
"=",
"None",
",",
"node_cls",
"=",
"None",
",",
"tagsub",
"=",
"functools",
".",
"partial",
"(",
"re",
".",
"sub",
",",
"r'\\{.+?\\}'",
",",
"''",
")",
",",
"Node",
"=",
"Node",
")",
":",
"node_cls",
"=",
"node_cls",
"or",
"Node",
"if",
"node",
"is",
"None",
":",
"node",
"=",
"node_cls",
"(",
")",
"tag",
"=",
"tagsub",
"(",
"el",
".",
"tag",
")",
"attrib",
"=",
"dict",
"(",
"(",
"tagsub",
"(",
"k",
")",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"el",
".",
"attrib",
".",
"items",
"(",
")",
")",
"node",
".",
"update",
"(",
"attrib",
",",
"tag",
"=",
"tag",
")",
"if",
"el",
".",
"text",
":",
"node",
"[",
"'text'",
"]",
"=",
"el",
".",
"text",
"for",
"child",
"in",
"el",
":",
"child",
"=",
"from_etree",
"(",
"child",
",",
"node_cls",
"=",
"node_cls",
")",
"node",
".",
"append",
"(",
"child",
")",
"if",
"el",
".",
"tail",
":",
"node",
"[",
"'tail'",
"]",
"=",
"el",
".",
"tail",
"return",
"node"
] | Convert the element tree to a tater tree. | [
"Convert",
"the",
"element",
"tree",
"to",
"a",
"tater",
"tree",
"."
] | train | https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/ext/etree.py#L69-L89 | 0.003384 |
miquelo/resort | packages/resort/component/glassfish.py | Domain.jdbc_connection_pool | def jdbc_connection_pool(self, name, res_type, ds_classname, props):
"""
Domain JDBC connection pool.
:param str name:
Resource name.
:param str res_type:
Resource type.
:param str ds_classname:
Data source class name.
:param dict props:
Connection pool properties.
:rtype:
JDBCConnectionPool
"""
return JDBCConnectionPool(self.__endpoint, name, res_type,
ds_classname, props) | python | def jdbc_connection_pool(self, name, res_type, ds_classname, props):
"""
Domain JDBC connection pool.
:param str name:
Resource name.
:param str res_type:
Resource type.
:param str ds_classname:
Data source class name.
:param dict props:
Connection pool properties.
:rtype:
JDBCConnectionPool
"""
return JDBCConnectionPool(self.__endpoint, name, res_type,
ds_classname, props) | [
"def",
"jdbc_connection_pool",
"(",
"self",
",",
"name",
",",
"res_type",
",",
"ds_classname",
",",
"props",
")",
":",
"return",
"JDBCConnectionPool",
"(",
"self",
".",
"__endpoint",
",",
"name",
",",
"res_type",
",",
"ds_classname",
",",
"props",
")"
] | Domain JDBC connection pool.
:param str name:
Resource name.
:param str res_type:
Resource type.
:param str ds_classname:
Data source class name.
:param dict props:
Connection pool properties.
:rtype:
JDBCConnectionPool | [
"Domain",
"JDBC",
"connection",
"pool",
".",
":",
"param",
"str",
"name",
":",
"Resource",
"name",
".",
":",
"param",
"str",
"res_type",
":",
"Resource",
"type",
".",
":",
"param",
"str",
"ds_classname",
":",
"Data",
"source",
"class",
"name",
".",
":",
"param",
"dict",
"props",
":",
"Connection",
"pool",
"properties",
".",
":",
"rtype",
":",
"JDBCConnectionPool"
] | train | https://github.com/miquelo/resort/blob/097a25d3257c91a75c194fd44c2797ab356f85dd/packages/resort/component/glassfish.py#L295-L313 | 0.066975 |
jeffrimko/Auxly | lib/auxly/__init__.py | open | def open(target):
"""Opens the target file or URL in the default application.
**Attribution**:
Written by user4815162342 and originally posted on
`Stack Overflow <http://stackoverflow.com/a/17317468>`_.
**Examples**:
::
auxly.open("myfile.txt")
auxly.open("https://www.github.com/")
"""
if sys.platform == "win32":
os.startfile(target)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, target]) | python | def open(target):
"""Opens the target file or URL in the default application.
**Attribution**:
Written by user4815162342 and originally posted on
`Stack Overflow <http://stackoverflow.com/a/17317468>`_.
**Examples**:
::
auxly.open("myfile.txt")
auxly.open("https://www.github.com/")
"""
if sys.platform == "win32":
os.startfile(target)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, target]) | [
"def",
"open",
"(",
"target",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
":",
"os",
".",
"startfile",
"(",
"target",
")",
"else",
":",
"opener",
"=",
"\"open\"",
"if",
"sys",
".",
"platform",
"==",
"\"darwin\"",
"else",
"\"xdg-open\"",
"subprocess",
".",
"call",
"(",
"[",
"opener",
",",
"target",
"]",
")"
] | Opens the target file or URL in the default application.
**Attribution**:
Written by user4815162342 and originally posted on
`Stack Overflow <http://stackoverflow.com/a/17317468>`_.
**Examples**:
::
auxly.open("myfile.txt")
auxly.open("https://www.github.com/") | [
"Opens",
"the",
"target",
"file",
"or",
"URL",
"in",
"the",
"default",
"application",
"."
] | train | https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/__init__.py#L25-L41 | 0.001949 |
huggingface/pytorch-pretrained-BERT | pytorch_pretrained_bert/tokenization.py | _is_punctuation | def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | python | def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | [
"def",
"_is_punctuation",
"(",
"char",
")",
":",
"cp",
"=",
"ord",
"(",
"char",
")",
"# We treat all non-letter/number ASCII as punctuation.",
"# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode",
"# Punctuation class but we treat them as punctuation anyways, for",
"# consistency.",
"if",
"(",
"(",
"cp",
">=",
"33",
"and",
"cp",
"<=",
"47",
")",
"or",
"(",
"cp",
">=",
"58",
"and",
"cp",
"<=",
"64",
")",
"or",
"(",
"cp",
">=",
"91",
"and",
"cp",
"<=",
"96",
")",
"or",
"(",
"cp",
">=",
"123",
"and",
"cp",
"<=",
"126",
")",
")",
":",
"return",
"True",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
".",
"startswith",
"(",
"\"P\"",
")",
":",
"return",
"True",
"return",
"False"
] | Checks whether `chars` is a punctuation character. | [
"Checks",
"whether",
"chars",
"is",
"a",
"punctuation",
"character",
"."
] | train | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L402-L415 | 0.001751 |
xoolive/traffic | traffic/core/aero.py | mach2tas | def mach2tas(M, h):
""" True airspeed (tas) to mach number conversion """
a = vsound(h)
tas = M * a
return tas | python | def mach2tas(M, h):
""" True airspeed (tas) to mach number conversion """
a = vsound(h)
tas = M * a
return tas | [
"def",
"mach2tas",
"(",
"M",
",",
"h",
")",
":",
"a",
"=",
"vsound",
"(",
"h",
")",
"tas",
"=",
"M",
"*",
"a",
"return",
"tas"
] | True airspeed (tas) to mach number conversion | [
"True",
"airspeed",
"(",
"tas",
")",
"to",
"mach",
"number",
"conversion"
] | train | https://github.com/xoolive/traffic/blob/d1a8878098f16759f6b6e0e8d8b8f32e34a680a8/traffic/core/aero.py#L306-L310 | 0.007937 |
yyuu/botornado | boto/mturk/connection.py | MTurkConnection.grant_bonus | def grant_bonus(self, worker_id, assignment_id, bonus_price, reason):
"""
Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment. The Bonus must be passed in as an instance of the
Price object.
"""
params = bonus_price.get_as_params('BonusAmount', 1)
params['WorkerId'] = worker_id
params['AssignmentId'] = assignment_id
params['Reason'] = reason
return self._process_request('GrantBonus', params) | python | def grant_bonus(self, worker_id, assignment_id, bonus_price, reason):
"""
Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment. The Bonus must be passed in as an instance of the
Price object.
"""
params = bonus_price.get_as_params('BonusAmount', 1)
params['WorkerId'] = worker_id
params['AssignmentId'] = assignment_id
params['Reason'] = reason
return self._process_request('GrantBonus', params) | [
"def",
"grant_bonus",
"(",
"self",
",",
"worker_id",
",",
"assignment_id",
",",
"bonus_price",
",",
"reason",
")",
":",
"params",
"=",
"bonus_price",
".",
"get_as_params",
"(",
"'BonusAmount'",
",",
"1",
")",
"params",
"[",
"'WorkerId'",
"]",
"=",
"worker_id",
"params",
"[",
"'AssignmentId'",
"]",
"=",
"assignment_id",
"params",
"[",
"'Reason'",
"]",
"=",
"reason",
"return",
"self",
".",
"_process_request",
"(",
"'GrantBonus'",
",",
"params",
")"
] | Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment. The Bonus must be passed in as an instance of the
Price object. | [
"Issues",
"a",
"payment",
"of",
"money",
"from",
"your",
"account",
"to",
"a",
"Worker",
".",
"To",
"be",
"eligible",
"for",
"a",
"bonus",
"the",
"Worker",
"must",
"have",
"submitted",
"results",
"for",
"one",
"of",
"your",
"HITs",
"and",
"have",
"had",
"those",
"results",
"approved",
"or",
"rejected",
".",
"This",
"payment",
"happens",
"separately",
"from",
"the",
"reward",
"you",
"pay",
"to",
"the",
"Worker",
"when",
"you",
"approve",
"the",
"Worker",
"s",
"assignment",
".",
"The",
"Bonus",
"must",
"be",
"passed",
"in",
"as",
"an",
"instance",
"of",
"the",
"Price",
"object",
"."
] | train | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L482-L497 | 0.002621 |
ddorn/GUI | GUI/font.py | Font.set_size | def set_size(self, pt=None, px=None):
"""
Set the size of the font, in px or pt.
The px method is a bit inacurate, there can be one or two px less, and max 4 for big numbers (like 503)
but the size is never over-estimated. It makes almost the good value.
"""
assert (pt, px) != (None, None)
if pt is not None:
self.__init__(pt, self.font_name)
else:
self.__init__(self.px_to_pt(px), self.font_name) | python | def set_size(self, pt=None, px=None):
"""
Set the size of the font, in px or pt.
The px method is a bit inacurate, there can be one or two px less, and max 4 for big numbers (like 503)
but the size is never over-estimated. It makes almost the good value.
"""
assert (pt, px) != (None, None)
if pt is not None:
self.__init__(pt, self.font_name)
else:
self.__init__(self.px_to_pt(px), self.font_name) | [
"def",
"set_size",
"(",
"self",
",",
"pt",
"=",
"None",
",",
"px",
"=",
"None",
")",
":",
"assert",
"(",
"pt",
",",
"px",
")",
"!=",
"(",
"None",
",",
"None",
")",
"if",
"pt",
"is",
"not",
"None",
":",
"self",
".",
"__init__",
"(",
"pt",
",",
"self",
".",
"font_name",
")",
"else",
":",
"self",
".",
"__init__",
"(",
"self",
".",
"px_to_pt",
"(",
"px",
")",
",",
"self",
".",
"font_name",
")"
] | Set the size of the font, in px or pt.
The px method is a bit inacurate, there can be one or two px less, and max 4 for big numbers (like 503)
but the size is never over-estimated. It makes almost the good value. | [
"Set",
"the",
"size",
"of",
"the",
"font",
"in",
"px",
"or",
"pt",
"."
] | train | https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/font.py#L54-L67 | 0.006135 |
fermiPy/fermipy | fermipy/jobs/job_archive.py | JobArchive.update_job_status | def update_job_status(self, checker_func):
"""Update the status of all the jobs in the archive"""
njobs = len(self.cache.keys())
status_vect = np.zeros((8), int)
sys.stdout.write("Updating status of %i jobs: " % njobs)
sys.stdout.flush()
for i, key in enumerate(self.cache.keys()):
if i % 200 == 0:
sys.stdout.write('.')
sys.stdout.flush()
job_details = self.cache[key]
if job_details.status in [JobStatus.pending, JobStatus.running]:
if checker_func:
job_details.check_status_logfile(checker_func)
job_details.update_table_row(self._table, job_details.dbkey - 1)
status_vect[job_details.status] += 1
sys.stdout.write("!\n")
sys.stdout.flush()
sys.stdout.write("Summary:\n")
sys.stdout.write(" Unknown: %i\n" % status_vect[JobStatus.unknown])
sys.stdout.write(" Not Ready: %i\n" %
status_vect[JobStatus.not_ready])
sys.stdout.write(" Ready: %i\n" % status_vect[JobStatus.ready])
sys.stdout.write(" Pending: %i\n" % status_vect[JobStatus.pending])
sys.stdout.write(" Running: %i\n" % status_vect[JobStatus.running])
sys.stdout.write(" Done: %i\n" % status_vect[JobStatus.done])
sys.stdout.write(" Failed: %i\n" % status_vect[JobStatus.failed])
sys.stdout.write(" Partial: %i\n" %
status_vect[JobStatus.partial_failed]) | python | def update_job_status(self, checker_func):
"""Update the status of all the jobs in the archive"""
njobs = len(self.cache.keys())
status_vect = np.zeros((8), int)
sys.stdout.write("Updating status of %i jobs: " % njobs)
sys.stdout.flush()
for i, key in enumerate(self.cache.keys()):
if i % 200 == 0:
sys.stdout.write('.')
sys.stdout.flush()
job_details = self.cache[key]
if job_details.status in [JobStatus.pending, JobStatus.running]:
if checker_func:
job_details.check_status_logfile(checker_func)
job_details.update_table_row(self._table, job_details.dbkey - 1)
status_vect[job_details.status] += 1
sys.stdout.write("!\n")
sys.stdout.flush()
sys.stdout.write("Summary:\n")
sys.stdout.write(" Unknown: %i\n" % status_vect[JobStatus.unknown])
sys.stdout.write(" Not Ready: %i\n" %
status_vect[JobStatus.not_ready])
sys.stdout.write(" Ready: %i\n" % status_vect[JobStatus.ready])
sys.stdout.write(" Pending: %i\n" % status_vect[JobStatus.pending])
sys.stdout.write(" Running: %i\n" % status_vect[JobStatus.running])
sys.stdout.write(" Done: %i\n" % status_vect[JobStatus.done])
sys.stdout.write(" Failed: %i\n" % status_vect[JobStatus.failed])
sys.stdout.write(" Partial: %i\n" %
status_vect[JobStatus.partial_failed]) | [
"def",
"update_job_status",
"(",
"self",
",",
"checker_func",
")",
":",
"njobs",
"=",
"len",
"(",
"self",
".",
"cache",
".",
"keys",
"(",
")",
")",
"status_vect",
"=",
"np",
".",
"zeros",
"(",
"(",
"8",
")",
",",
"int",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Updating status of %i jobs: \"",
"%",
"njobs",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"for",
"i",
",",
"key",
"in",
"enumerate",
"(",
"self",
".",
"cache",
".",
"keys",
"(",
")",
")",
":",
"if",
"i",
"%",
"200",
"==",
"0",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'.'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"job_details",
"=",
"self",
".",
"cache",
"[",
"key",
"]",
"if",
"job_details",
".",
"status",
"in",
"[",
"JobStatus",
".",
"pending",
",",
"JobStatus",
".",
"running",
"]",
":",
"if",
"checker_func",
":",
"job_details",
".",
"check_status_logfile",
"(",
"checker_func",
")",
"job_details",
".",
"update_table_row",
"(",
"self",
".",
"_table",
",",
"job_details",
".",
"dbkey",
"-",
"1",
")",
"status_vect",
"[",
"job_details",
".",
"status",
"]",
"+=",
"1",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"!\\n\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Summary:\\n\"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Unknown: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"unknown",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Not Ready: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"not_ready",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Ready: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"ready",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Pending: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"pending",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Running: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"running",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Done: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"done",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Failed: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"failed",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" Partial: %i\\n\"",
"%",
"status_vect",
"[",
"JobStatus",
".",
"partial_failed",
"]",
")"
] | Update the status of all the jobs in the archive | [
"Update",
"the",
"status",
"of",
"all",
"the",
"jobs",
"in",
"the",
"archive"
] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/job_archive.py#L646-L675 | 0.001282 |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py | brocade_xstp_ext.get_stp_mst_detail_output_msti_port_port_hello_time | def get_stp_mst_detail_output_msti_port_port_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
port_hello_time = ET.SubElement(port, "port-hello-time")
port_hello_time.text = kwargs.pop('port_hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def get_stp_mst_detail_output_msti_port_port_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
port_hello_time = ET.SubElement(port, "port-hello-time")
port_hello_time.text = kwargs.pop('port_hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_stp_mst_detail_output_msti_port_port_hello_time",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_stp_mst_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_stp_mst_detail\"",
")",
"config",
"=",
"get_stp_mst_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_stp_mst_detail",
",",
"\"output\"",
")",
"msti",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"msti\"",
")",
"instance_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"instance-id\"",
")",
"instance_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'instance_id'",
")",
"port",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"port\"",
")",
"port_hello_time",
"=",
"ET",
".",
"SubElement",
"(",
"port",
",",
"\"port-hello-time\"",
")",
"port_hello_time",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'port_hello_time'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4759-L4774 | 0.002721 |
bitesofcode/projexui | projexui/widgets/xnodewidget/xnode.py | XNode.setHighlightColor | def setHighlightColor(self, color):
"""
Sets the color to be used when highlighting a node.
:param color <QColor> || None
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.NodeHighlight, color)
self.setDirty() | python | def setHighlightColor(self, color):
"""
Sets the color to be used when highlighting a node.
:param color <QColor> || None
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.NodeHighlight, color)
self.setDirty() | [
"def",
"setHighlightColor",
"(",
"self",
",",
"color",
")",
":",
"color",
"=",
"QColor",
"(",
"color",
")",
"if",
"self",
".",
"_palette",
"is",
"None",
":",
"self",
".",
"_palette",
"=",
"XNodePalette",
"(",
"self",
".",
"_scenePalette",
")",
"self",
".",
"_palette",
".",
"setColor",
"(",
"self",
".",
"_palette",
".",
"NodeHighlight",
",",
"color",
")",
"self",
".",
"setDirty",
"(",
")"
] | Sets the color to be used when highlighting a node.
:param color <QColor> || None | [
"Sets",
"the",
"color",
"to",
"be",
"used",
"when",
"highlighting",
"a",
"node",
".",
":",
"param",
"color",
"<QColor",
">",
"||",
"None"
] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnode.py#L1768-L1779 | 0.00995 |
gtnx/pandas-highcharts | pandas_highcharts/display.py | _series_data_filter | def _series_data_filter(data):
"""Replace each 'data' key in the list stored under 'series' by "[...]".
Use to not store and display the series data when you just want display and
modify the Highcharts parameters.
data: dict
Serialized DataFrame in a dict for Highcharts
Returns: a dict with filtered values
See also `core.serialize`
"""
data = copy.deepcopy(data)
if "series" in data:
for series in data["series"]:
series["data"] = "[...]"
return data | python | def _series_data_filter(data):
"""Replace each 'data' key in the list stored under 'series' by "[...]".
Use to not store and display the series data when you just want display and
modify the Highcharts parameters.
data: dict
Serialized DataFrame in a dict for Highcharts
Returns: a dict with filtered values
See also `core.serialize`
"""
data = copy.deepcopy(data)
if "series" in data:
for series in data["series"]:
series["data"] = "[...]"
return data | [
"def",
"_series_data_filter",
"(",
"data",
")",
":",
"data",
"=",
"copy",
".",
"deepcopy",
"(",
"data",
")",
"if",
"\"series\"",
"in",
"data",
":",
"for",
"series",
"in",
"data",
"[",
"\"series\"",
"]",
":",
"series",
"[",
"\"data\"",
"]",
"=",
"\"[...]\"",
"return",
"data"
] | Replace each 'data' key in the list stored under 'series' by "[...]".
Use to not store and display the series data when you just want display and
modify the Highcharts parameters.
data: dict
Serialized DataFrame in a dict for Highcharts
Returns: a dict with filtered values
See also `core.serialize` | [
"Replace",
"each",
"data",
"key",
"in",
"the",
"list",
"stored",
"under",
"series",
"by",
"[",
"...",
"]",
"."
] | train | https://github.com/gtnx/pandas-highcharts/blob/bf449b7db8b6966bcf95a0280bf2e4518f3e2419/pandas_highcharts/display.py#L63-L80 | 0.001908 |
pandas-dev/pandas | pandas/core/internals/managers.py | BlockManager.quantile | def quantile(self, axis=0, consolidate=True, transposed=False,
interpolation='linear', qs=None, numeric_only=None):
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object)
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
elif block.ndim == 1:
ax = Float64Index([qs])
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate(
[ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [b.make_block(b.values.T,
placement=np.arange(b.shape[1])
) for b in blocks]
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
values = _concat._concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values,
ndim=1,
placement=np.arange(len(values)))],
axes[0]) | python | def quantile(self, axis=0, consolidate=True, transposed=False,
interpolation='linear', qs=None, numeric_only=None):
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object)
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
elif block.ndim == 1:
ax = Float64Index([qs])
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate(
[ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [b.make_block(b.values.T,
placement=np.arange(b.shape[1])
) for b in blocks]
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
values = _concat._concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values,
ndim=1,
placement=np.arange(len(values)))],
axes[0]) | [
"def",
"quantile",
"(",
"self",
",",
"axis",
"=",
"0",
",",
"consolidate",
"=",
"True",
",",
"transposed",
"=",
"False",
",",
"interpolation",
"=",
"'linear'",
",",
"qs",
"=",
"None",
",",
"numeric_only",
"=",
"None",
")",
":",
"# Series dispatches to DataFrame for quantile, which allows us to",
"# simplify some of the code here and in the blocks",
"assert",
"self",
".",
"ndim",
">=",
"2",
"if",
"consolidate",
":",
"self",
".",
"_consolidate_inplace",
"(",
")",
"def",
"get_axe",
"(",
"block",
",",
"qs",
",",
"axes",
")",
":",
"from",
"pandas",
"import",
"Float64Index",
"if",
"is_list_like",
"(",
"qs",
")",
":",
"ax",
"=",
"Float64Index",
"(",
"qs",
")",
"elif",
"block",
".",
"ndim",
"==",
"1",
":",
"ax",
"=",
"Float64Index",
"(",
"[",
"qs",
"]",
")",
"else",
":",
"ax",
"=",
"axes",
"[",
"0",
"]",
"return",
"ax",
"axes",
",",
"blocks",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"b",
"in",
"self",
".",
"blocks",
":",
"block",
"=",
"b",
".",
"quantile",
"(",
"axis",
"=",
"axis",
",",
"qs",
"=",
"qs",
",",
"interpolation",
"=",
"interpolation",
")",
"axe",
"=",
"get_axe",
"(",
"b",
",",
"qs",
",",
"axes",
"=",
"self",
".",
"axes",
")",
"axes",
".",
"append",
"(",
"axe",
")",
"blocks",
".",
"append",
"(",
"block",
")",
"# note that some DatetimeTZ, Categorical are always ndim==1",
"ndim",
"=",
"{",
"b",
".",
"ndim",
"for",
"b",
"in",
"blocks",
"}",
"assert",
"0",
"not",
"in",
"ndim",
",",
"ndim",
"if",
"2",
"in",
"ndim",
":",
"new_axes",
"=",
"list",
"(",
"self",
".",
"axes",
")",
"# multiple blocks that are reduced",
"if",
"len",
"(",
"blocks",
")",
">",
"1",
":",
"new_axes",
"[",
"1",
"]",
"=",
"axes",
"[",
"0",
"]",
"# reset the placement to the original",
"for",
"b",
",",
"sb",
"in",
"zip",
"(",
"blocks",
",",
"self",
".",
"blocks",
")",
":",
"b",
".",
"mgr_locs",
"=",
"sb",
".",
"mgr_locs",
"else",
":",
"new_axes",
"[",
"axis",
"]",
"=",
"Index",
"(",
"np",
".",
"concatenate",
"(",
"[",
"ax",
".",
"values",
"for",
"ax",
"in",
"axes",
"]",
")",
")",
"if",
"transposed",
":",
"new_axes",
"=",
"new_axes",
"[",
":",
":",
"-",
"1",
"]",
"blocks",
"=",
"[",
"b",
".",
"make_block",
"(",
"b",
".",
"values",
".",
"T",
",",
"placement",
"=",
"np",
".",
"arange",
"(",
"b",
".",
"shape",
"[",
"1",
"]",
")",
")",
"for",
"b",
"in",
"blocks",
"]",
"return",
"self",
".",
"__class__",
"(",
"blocks",
",",
"new_axes",
")",
"# single block, i.e. ndim == {1}",
"values",
"=",
"_concat",
".",
"_concat_compat",
"(",
"[",
"b",
".",
"values",
"for",
"b",
"in",
"blocks",
"]",
")",
"# compute the orderings of our original data",
"if",
"len",
"(",
"self",
".",
"blocks",
")",
">",
"1",
":",
"indexer",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"self",
".",
"axes",
"[",
"0",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"i",
"=",
"0",
"for",
"b",
"in",
"self",
".",
"blocks",
":",
"for",
"j",
"in",
"b",
".",
"mgr_locs",
":",
"indexer",
"[",
"j",
"]",
"=",
"i",
"i",
"=",
"i",
"+",
"1",
"values",
"=",
"values",
".",
"take",
"(",
"indexer",
")",
"return",
"SingleBlockManager",
"(",
"[",
"make_block",
"(",
"values",
",",
"ndim",
"=",
"1",
",",
"placement",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"values",
")",
")",
")",
"]",
",",
"axes",
"[",
"0",
"]",
")"
] | Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object) | [
"Iterate",
"over",
"blocks",
"applying",
"quantile",
"reduction",
".",
"This",
"routine",
"is",
"intended",
"for",
"reduction",
"type",
"operations",
"and",
"will",
"do",
"inference",
"on",
"the",
"generated",
"blocks",
"."
] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L405-L501 | 0.000955 |
linkedin/luminol | src/luminol/correlator.py | Correlator._get_algorithm_and_params | def _get_algorithm_and_params(self, algorithm_name, algorithm_params):
"""
Get the specific algorithm and merge the algorithm params.
:param str algorithm: name of the algorithm to use.
:param dict algorithm_params: additional params for the specific algorithm.
"""
algorithm_name = algorithm_name or CORRELATOR_ALGORITHM
try:
self.algorithm = correlator_algorithms[algorithm_name]
except KeyError:
raise exceptions.AlgorithmNotFound('luminol.Correlator: ' + str(algorithm_name) + ' not found.')
# Merge parameters.
if algorithm_params:
if not isinstance(algorithm_params, dict):
raise exceptions.InvalidDataFormat('luminol.Correlator: algorithm_params passed is not a dictionary.')
else:
# self.algorithm_params = dict(algorithm_params.items() + self.algorithm_params.items())
self.algorithm_params = self.algorithm_params.copy()
self.algorithm_params.update(algorithm_params) | python | def _get_algorithm_and_params(self, algorithm_name, algorithm_params):
"""
Get the specific algorithm and merge the algorithm params.
:param str algorithm: name of the algorithm to use.
:param dict algorithm_params: additional params for the specific algorithm.
"""
algorithm_name = algorithm_name or CORRELATOR_ALGORITHM
try:
self.algorithm = correlator_algorithms[algorithm_name]
except KeyError:
raise exceptions.AlgorithmNotFound('luminol.Correlator: ' + str(algorithm_name) + ' not found.')
# Merge parameters.
if algorithm_params:
if not isinstance(algorithm_params, dict):
raise exceptions.InvalidDataFormat('luminol.Correlator: algorithm_params passed is not a dictionary.')
else:
# self.algorithm_params = dict(algorithm_params.items() + self.algorithm_params.items())
self.algorithm_params = self.algorithm_params.copy()
self.algorithm_params.update(algorithm_params) | [
"def",
"_get_algorithm_and_params",
"(",
"self",
",",
"algorithm_name",
",",
"algorithm_params",
")",
":",
"algorithm_name",
"=",
"algorithm_name",
"or",
"CORRELATOR_ALGORITHM",
"try",
":",
"self",
".",
"algorithm",
"=",
"correlator_algorithms",
"[",
"algorithm_name",
"]",
"except",
"KeyError",
":",
"raise",
"exceptions",
".",
"AlgorithmNotFound",
"(",
"'luminol.Correlator: '",
"+",
"str",
"(",
"algorithm_name",
")",
"+",
"' not found.'",
")",
"# Merge parameters.",
"if",
"algorithm_params",
":",
"if",
"not",
"isinstance",
"(",
"algorithm_params",
",",
"dict",
")",
":",
"raise",
"exceptions",
".",
"InvalidDataFormat",
"(",
"'luminol.Correlator: algorithm_params passed is not a dictionary.'",
")",
"else",
":",
"# self.algorithm_params = dict(algorithm_params.items() + self.algorithm_params.items())",
"self",
".",
"algorithm_params",
"=",
"self",
".",
"algorithm_params",
".",
"copy",
"(",
")",
"self",
".",
"algorithm_params",
".",
"update",
"(",
"algorithm_params",
")"
] | Get the specific algorithm and merge the algorithm params.
:param str algorithm: name of the algorithm to use.
:param dict algorithm_params: additional params for the specific algorithm. | [
"Get",
"the",
"specific",
"algorithm",
"and",
"merge",
"the",
"algorithm",
"params",
".",
":",
"param",
"str",
"algorithm",
":",
"name",
"of",
"the",
"algorithm",
"to",
"use",
".",
":",
"param",
"dict",
"algorithm_params",
":",
"additional",
"params",
"for",
"the",
"specific",
"algorithm",
"."
] | train | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L72-L90 | 0.005613 |