repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
rapidpro/expressions | python/temba_expressions/functions/__init__.py | FunctionManager._get_arg_spec | def _get_arg_spec(func):
"""
Gets the argument spec of the given function, returning defaults as a dict of param names to values
"""
args, varargs, keywords, defaults = inspect.getargspec(func)
# build a mapping from argument names to their default values, if any:
if defaults is None:
defaults = {}
else:
defaulted_args = args[-len(defaults):]
defaults = {name: val for name, val in zip(defaulted_args, defaults)}
return args, varargs, defaults | python | def _get_arg_spec(func):
"""
Gets the argument spec of the given function, returning defaults as a dict of param names to values
"""
args, varargs, keywords, defaults = inspect.getargspec(func)
# build a mapping from argument names to their default values, if any:
if defaults is None:
defaults = {}
else:
defaulted_args = args[-len(defaults):]
defaults = {name: val for name, val in zip(defaulted_args, defaults)}
return args, varargs, defaults | [
"def",
"_get_arg_spec",
"(",
"func",
")",
":",
"args",
",",
"varargs",
",",
"keywords",
",",
"defaults",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"# build a mapping from argument names to their default values, if any:",
"if",
"defaults",
"is",
"None",
":",
"defaults",
"=",
"{",
"}",
"else",
":",
"defaulted_args",
"=",
"args",
"[",
"-",
"len",
"(",
"defaults",
")",
":",
"]",
"defaults",
"=",
"{",
"name",
":",
"val",
"for",
"name",
",",
"val",
"in",
"zip",
"(",
"defaulted_args",
",",
"defaults",
")",
"}",
"return",
"args",
",",
"varargs",
",",
"defaults"
] | Gets the argument spec of the given function, returning defaults as a dict of param names to values | [
"Gets",
"the",
"argument",
"spec",
"of",
"the",
"given",
"function",
"returning",
"defaults",
"as",
"a",
"dict",
"of",
"param",
"names",
"to",
"values"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/__init__.py#L105-L118 |
BlueBrain/nat | nat/runOCR.py | check_ocrmypdf | def check_ocrmypdf(input_file, output_file, *args, env=None):
"Run ocrmypdf and confirmed that a valid file was created"
p, out, err = run_ocrmypdf(input_file, output_file, *args, env=env)
if p.returncode != 0:
print('stdout\n======')
print(out)
print('stderr\n======')
print(err)
#assert p.returncode == 0
#assert os.path.exists(output_file), "Output file not created"
#assert os.stat(output_file).st_size > 100, "PDF too small or empty"
return output_file | python | def check_ocrmypdf(input_file, output_file, *args, env=None):
"Run ocrmypdf and confirmed that a valid file was created"
p, out, err = run_ocrmypdf(input_file, output_file, *args, env=env)
if p.returncode != 0:
print('stdout\n======')
print(out)
print('stderr\n======')
print(err)
#assert p.returncode == 0
#assert os.path.exists(output_file), "Output file not created"
#assert os.stat(output_file).st_size > 100, "PDF too small or empty"
return output_file | [
"def",
"check_ocrmypdf",
"(",
"input_file",
",",
"output_file",
",",
"*",
"args",
",",
"env",
"=",
"None",
")",
":",
"p",
",",
"out",
",",
"err",
"=",
"run_ocrmypdf",
"(",
"input_file",
",",
"output_file",
",",
"*",
"args",
",",
"env",
"=",
"env",
")",
"if",
"p",
".",
"returncode",
"!=",
"0",
":",
"print",
"(",
"'stdout\\n======'",
")",
"print",
"(",
"out",
")",
"print",
"(",
"'stderr\\n======'",
")",
"print",
"(",
"err",
")",
"#assert p.returncode == 0",
"#assert os.path.exists(output_file), \"Output file not created\"",
"#assert os.stat(output_file).st_size > 100, \"PDF too small or empty\"",
"return",
"output_file"
] | Run ocrmypdf and confirmed that a valid file was created | [
"Run",
"ocrmypdf",
"and",
"confirmed",
"that",
"a",
"valid",
"file",
"was",
"created"
] | train | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/runOCR.py#L24-L36 |
BlueBrain/nat | nat/runOCR.py | run_ocrmypdf | def run_ocrmypdf(input_file, output_file, *args, env=None):
"Run ocrmypdf and let caller deal with results"
if env is None:
env = os.environ
p_args = OCRMYPDF + list(args) + [input_file, output_file]
p = Popen(
p_args, close_fds=True, stdout=PIPE, stderr=PIPE,
universal_newlines=True, env=env)
out, err = p.communicate()
return p, out, err | python | def run_ocrmypdf(input_file, output_file, *args, env=None):
"Run ocrmypdf and let caller deal with results"
if env is None:
env = os.environ
p_args = OCRMYPDF + list(args) + [input_file, output_file]
p = Popen(
p_args, close_fds=True, stdout=PIPE, stderr=PIPE,
universal_newlines=True, env=env)
out, err = p.communicate()
return p, out, err | [
"def",
"run_ocrmypdf",
"(",
"input_file",
",",
"output_file",
",",
"*",
"args",
",",
"env",
"=",
"None",
")",
":",
"if",
"env",
"is",
"None",
":",
"env",
"=",
"os",
".",
"environ",
"p_args",
"=",
"OCRMYPDF",
"+",
"list",
"(",
"args",
")",
"+",
"[",
"input_file",
",",
"output_file",
"]",
"p",
"=",
"Popen",
"(",
"p_args",
",",
"close_fds",
"=",
"True",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"universal_newlines",
"=",
"True",
",",
"env",
"=",
"env",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"return",
"p",
",",
"out",
",",
"err"
] | Run ocrmypdf and let caller deal with results | [
"Run",
"ocrmypdf",
"and",
"let",
"caller",
"deal",
"with",
"results"
] | train | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/runOCR.py#L39-L50 |
ttinies/sc2gameMapRepo | sc2maptool/mapRecord.py | standardizeMapName | def standardizeMapName(mapName):
"""pretty-fy the name for pysc2 map lookup"""
#print("foreignName: %s (%s)"%(mapName, mapName in c.mapNameTranslations))
#if mapName in c.mapNameTranslations:
# return c.mapNameTranslations[mapName]
newName = os.path.basename(mapName)
newName = newName.split(".")[0]
newName = newName.split("(")[0]
newName = re.sub("[LT]E+$", "", newName)
newName = re.sub("-", "", newName)
newName = re.sub(' ', '', newName, flags=re.UNICODE)
foreignName = newName#bytes(mapName, 'utf-16')
#print("foreignName: %s (%s)"%(foreignName, foreignName in c.mapNameTranslations))
if foreignName in c.mapNameTranslations:
return c.mapNameTranslations[foreignName]
return newName | python | def standardizeMapName(mapName):
"""pretty-fy the name for pysc2 map lookup"""
#print("foreignName: %s (%s)"%(mapName, mapName in c.mapNameTranslations))
#if mapName in c.mapNameTranslations:
# return c.mapNameTranslations[mapName]
newName = os.path.basename(mapName)
newName = newName.split(".")[0]
newName = newName.split("(")[0]
newName = re.sub("[LT]E+$", "", newName)
newName = re.sub("-", "", newName)
newName = re.sub(' ', '', newName, flags=re.UNICODE)
foreignName = newName#bytes(mapName, 'utf-16')
#print("foreignName: %s (%s)"%(foreignName, foreignName in c.mapNameTranslations))
if foreignName in c.mapNameTranslations:
return c.mapNameTranslations[foreignName]
return newName | [
"def",
"standardizeMapName",
"(",
"mapName",
")",
":",
"#print(\"foreignName: %s (%s)\"%(mapName, mapName in c.mapNameTranslations))",
"#if mapName in c.mapNameTranslations:",
"# return c.mapNameTranslations[mapName]",
"newName",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"mapName",
")",
"newName",
"=",
"newName",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"newName",
"=",
"newName",
".",
"split",
"(",
"\"(\"",
")",
"[",
"0",
"]",
"newName",
"=",
"re",
".",
"sub",
"(",
"\"[LT]E+$\"",
",",
"\"\"",
",",
"newName",
")",
"newName",
"=",
"re",
".",
"sub",
"(",
"\"-\"",
",",
"\"\"",
",",
"newName",
")",
"newName",
"=",
"re",
".",
"sub",
"(",
"' '",
",",
"''",
",",
"newName",
",",
"flags",
"=",
"re",
".",
"UNICODE",
")",
"foreignName",
"=",
"newName",
"#bytes(mapName, 'utf-16')",
"#print(\"foreignName: %s (%s)\"%(foreignName, foreignName in c.mapNameTranslations))",
"if",
"foreignName",
"in",
"c",
".",
"mapNameTranslations",
":",
"return",
"c",
".",
"mapNameTranslations",
"[",
"foreignName",
"]",
"return",
"newName"
] | pretty-fy the name for pysc2 map lookup | [
"pretty",
"-",
"fy",
"the",
"name",
"for",
"pysc2",
"map",
"lookup"
] | train | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/mapRecord.py#L9-L24 |
lablup/backend.ai-common | src/ai/backend/common/utils.py | env_info | def env_info():
'''
Returns a string that contains the Python version and runtime path.
'''
v = sys.version_info
pyver = f'Python {v.major}.{v.minor}.{v.micro}'
if v.releaselevel == 'alpha':
pyver += 'a'
if v.releaselevel == 'beta':
pyver += 'b'
if v.releaselevel == 'candidate':
pyver += 'rc'
if v.releaselevel != 'final':
pyver += str(v.serial)
return f'{pyver} (env: {sys.prefix})' | python | def env_info():
'''
Returns a string that contains the Python version and runtime path.
'''
v = sys.version_info
pyver = f'Python {v.major}.{v.minor}.{v.micro}'
if v.releaselevel == 'alpha':
pyver += 'a'
if v.releaselevel == 'beta':
pyver += 'b'
if v.releaselevel == 'candidate':
pyver += 'rc'
if v.releaselevel != 'final':
pyver += str(v.serial)
return f'{pyver} (env: {sys.prefix})' | [
"def",
"env_info",
"(",
")",
":",
"v",
"=",
"sys",
".",
"version_info",
"pyver",
"=",
"f'Python {v.major}.{v.minor}.{v.micro}'",
"if",
"v",
".",
"releaselevel",
"==",
"'alpha'",
":",
"pyver",
"+=",
"'a'",
"if",
"v",
".",
"releaselevel",
"==",
"'beta'",
":",
"pyver",
"+=",
"'b'",
"if",
"v",
".",
"releaselevel",
"==",
"'candidate'",
":",
"pyver",
"+=",
"'rc'",
"if",
"v",
".",
"releaselevel",
"!=",
"'final'",
":",
"pyver",
"+=",
"str",
"(",
"v",
".",
"serial",
")",
"return",
"f'{pyver} (env: {sys.prefix})'"
] | Returns a string that contains the Python version and runtime path. | [
"Returns",
"a",
"string",
"that",
"contains",
"the",
"Python",
"version",
"and",
"runtime",
"path",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/utils.py#L17-L31 |
lablup/backend.ai-common | src/ai/backend/common/utils.py | dict2kvlist | def dict2kvlist(o):
'''
Serializes a dict-like object into a generator of the flatten list of
repeating key-value pairs. It is useful when using HMSET method in Redis.
Example:
>>> list(dict2kvlist({'a': 1, 'b': 2}))
['a', 1, 'b', 2]
'''
return chain.from_iterable((k, v) for k, v in o.items()) | python | def dict2kvlist(o):
'''
Serializes a dict-like object into a generator of the flatten list of
repeating key-value pairs. It is useful when using HMSET method in Redis.
Example:
>>> list(dict2kvlist({'a': 1, 'b': 2}))
['a', 1, 'b', 2]
'''
return chain.from_iterable((k, v) for k, v in o.items()) | [
"def",
"dict2kvlist",
"(",
"o",
")",
":",
"return",
"chain",
".",
"from_iterable",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"o",
".",
"items",
"(",
")",
")"
] | Serializes a dict-like object into a generator of the flatten list of
repeating key-value pairs. It is useful when using HMSET method in Redis.
Example:
>>> list(dict2kvlist({'a': 1, 'b': 2}))
['a', 1, 'b', 2] | [
"Serializes",
"a",
"dict",
"-",
"like",
"object",
"into",
"a",
"generator",
"of",
"the",
"flatten",
"list",
"of",
"repeating",
"key",
"-",
"value",
"pairs",
".",
"It",
"is",
"useful",
"when",
"using",
"HMSET",
"method",
"in",
"Redis",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/utils.py#L43-L52 |
lablup/backend.ai-common | src/ai/backend/common/utils.py | nmget | def nmget(o, key_path, def_val=None, path_delimiter='.', null_as_default=True):
'''
A short-hand for retrieving a value from nested mappings
("nested-mapping-get"). At each level it checks if the given "path"
component in the given key exists and return the default value whenever
fails.
Example:
>>> o = {'a':{'b':1}, 'x': None}
>>> nmget(o, 'a', 0)
{'b': 1}
>>> nmget(o, 'a.b', 0)
1
>>> nmget(o, 'a/b', 0, '/')
1
>>> nmget(o, 'a.c', 0)
0
>>> nmget(o, 'x', 0)
0
>>> nmget(o, 'x', 0, null_as_default=False)
None
'''
pieces = key_path.split(path_delimiter)
while pieces:
p = pieces.pop(0)
if o is None or p not in o:
return def_val
o = o[p]
if o is None and null_as_default:
return def_val
return o | python | def nmget(o, key_path, def_val=None, path_delimiter='.', null_as_default=True):
'''
A short-hand for retrieving a value from nested mappings
("nested-mapping-get"). At each level it checks if the given "path"
component in the given key exists and return the default value whenever
fails.
Example:
>>> o = {'a':{'b':1}, 'x': None}
>>> nmget(o, 'a', 0)
{'b': 1}
>>> nmget(o, 'a.b', 0)
1
>>> nmget(o, 'a/b', 0, '/')
1
>>> nmget(o, 'a.c', 0)
0
>>> nmget(o, 'x', 0)
0
>>> nmget(o, 'x', 0, null_as_default=False)
None
'''
pieces = key_path.split(path_delimiter)
while pieces:
p = pieces.pop(0)
if o is None or p not in o:
return def_val
o = o[p]
if o is None and null_as_default:
return def_val
return o | [
"def",
"nmget",
"(",
"o",
",",
"key_path",
",",
"def_val",
"=",
"None",
",",
"path_delimiter",
"=",
"'.'",
",",
"null_as_default",
"=",
"True",
")",
":",
"pieces",
"=",
"key_path",
".",
"split",
"(",
"path_delimiter",
")",
"while",
"pieces",
":",
"p",
"=",
"pieces",
".",
"pop",
"(",
"0",
")",
"if",
"o",
"is",
"None",
"or",
"p",
"not",
"in",
"o",
":",
"return",
"def_val",
"o",
"=",
"o",
"[",
"p",
"]",
"if",
"o",
"is",
"None",
"and",
"null_as_default",
":",
"return",
"def_val",
"return",
"o"
] | A short-hand for retrieving a value from nested mappings
("nested-mapping-get"). At each level it checks if the given "path"
component in the given key exists and return the default value whenever
fails.
Example:
>>> o = {'a':{'b':1}, 'x': None}
>>> nmget(o, 'a', 0)
{'b': 1}
>>> nmget(o, 'a.b', 0)
1
>>> nmget(o, 'a/b', 0, '/')
1
>>> nmget(o, 'a.c', 0)
0
>>> nmget(o, 'x', 0)
0
>>> nmget(o, 'x', 0, null_as_default=False)
None | [
"A",
"short",
"-",
"hand",
"for",
"retrieving",
"a",
"value",
"from",
"nested",
"mappings",
"(",
"nested",
"-",
"mapping",
"-",
"get",
")",
".",
"At",
"each",
"level",
"it",
"checks",
"if",
"the",
"given",
"path",
"component",
"in",
"the",
"given",
"key",
"exists",
"and",
"return",
"the",
"default",
"value",
"whenever",
"fails",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/utils.py#L61-L91 |
ska-sa/katversion | katversion/build.py | patch_init_py | def patch_init_py(base_dir, name, version):
"""Patch __init__.py to remove version check and append hard-coded version."""
# Ensure main package dir is there (may be absent in script-only packages)
package_dir = os.path.join(base_dir, name)
if not os.path.isdir(package_dir):
os.makedirs(package_dir)
# Open top-level __init__.py and read whole file
init_py = os.path.join(package_dir, '__init__.py')
log.info("patching %s to bake in version '%s'" % (init_py, version))
with open(init_py, 'r+') as init_file:
lines = init_file.readlines()
# Search for sentinels indicating version checking block
try:
begin = lines.index("# BEGIN VERSION CHECK\n")
end = lines.index("# END VERSION CHECK\n")
except ValueError:
begin = end = len(lines)
# Delete existing repo version checking block in file
init_file.seek(0)
init_file.writelines(lines[:begin] + lines[end+1:])
# Append new version attribute to ensure it is authoritative, but only
# if it is not already there (this happens in pip sdist installs)
version_cmd = "__version__ = '{0}'\n".format(version)
if not lines or lines[-1] != version_cmd:
init_file.write("\n# Automatically added by katversion\n")
init_file.write(version_cmd)
init_file.truncate() | python | def patch_init_py(base_dir, name, version):
"""Patch __init__.py to remove version check and append hard-coded version."""
# Ensure main package dir is there (may be absent in script-only packages)
package_dir = os.path.join(base_dir, name)
if not os.path.isdir(package_dir):
os.makedirs(package_dir)
# Open top-level __init__.py and read whole file
init_py = os.path.join(package_dir, '__init__.py')
log.info("patching %s to bake in version '%s'" % (init_py, version))
with open(init_py, 'r+') as init_file:
lines = init_file.readlines()
# Search for sentinels indicating version checking block
try:
begin = lines.index("# BEGIN VERSION CHECK\n")
end = lines.index("# END VERSION CHECK\n")
except ValueError:
begin = end = len(lines)
# Delete existing repo version checking block in file
init_file.seek(0)
init_file.writelines(lines[:begin] + lines[end+1:])
# Append new version attribute to ensure it is authoritative, but only
# if it is not already there (this happens in pip sdist installs)
version_cmd = "__version__ = '{0}'\n".format(version)
if not lines or lines[-1] != version_cmd:
init_file.write("\n# Automatically added by katversion\n")
init_file.write(version_cmd)
init_file.truncate() | [
"def",
"patch_init_py",
"(",
"base_dir",
",",
"name",
",",
"version",
")",
":",
"# Ensure main package dir is there (may be absent in script-only packages)",
"package_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"name",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"package_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"package_dir",
")",
"# Open top-level __init__.py and read whole file",
"init_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"package_dir",
",",
"'__init__.py'",
")",
"log",
".",
"info",
"(",
"\"patching %s to bake in version '%s'\"",
"%",
"(",
"init_py",
",",
"version",
")",
")",
"with",
"open",
"(",
"init_py",
",",
"'r+'",
")",
"as",
"init_file",
":",
"lines",
"=",
"init_file",
".",
"readlines",
"(",
")",
"# Search for sentinels indicating version checking block",
"try",
":",
"begin",
"=",
"lines",
".",
"index",
"(",
"\"# BEGIN VERSION CHECK\\n\"",
")",
"end",
"=",
"lines",
".",
"index",
"(",
"\"# END VERSION CHECK\\n\"",
")",
"except",
"ValueError",
":",
"begin",
"=",
"end",
"=",
"len",
"(",
"lines",
")",
"# Delete existing repo version checking block in file",
"init_file",
".",
"seek",
"(",
"0",
")",
"init_file",
".",
"writelines",
"(",
"lines",
"[",
":",
"begin",
"]",
"+",
"lines",
"[",
"end",
"+",
"1",
":",
"]",
")",
"# Append new version attribute to ensure it is authoritative, but only",
"# if it is not already there (this happens in pip sdist installs)",
"version_cmd",
"=",
"\"__version__ = '{0}'\\n\"",
".",
"format",
"(",
"version",
")",
"if",
"not",
"lines",
"or",
"lines",
"[",
"-",
"1",
"]",
"!=",
"version_cmd",
":",
"init_file",
".",
"write",
"(",
"\"\\n# Automatically added by katversion\\n\"",
")",
"init_file",
".",
"write",
"(",
"version_cmd",
")",
"init_file",
".",
"truncate",
"(",
")"
] | Patch __init__.py to remove version check and append hard-coded version. | [
"Patch",
"__init__",
".",
"py",
"to",
"remove",
"version",
"check",
"and",
"append",
"hard",
"-",
"coded",
"version",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/build.py#L32-L58 |
ska-sa/katversion | katversion/build.py | setuptools_entry | def setuptools_entry(dist, keyword, value):
"""Setuptools entry point for setting version and baking it into package."""
# If 'use_katversion' is False, ignore the rest
if not value:
return
# Enforce the version obtained by katversion, overriding user setting
version = get_version()
if dist.metadata.version is not None:
s = "Ignoring explicit version='{0}' in setup.py, using '{1}' instead"
warnings.warn(s.format(dist.metadata.version, version))
dist.metadata.version = version
# Extend build_py command to bake version string into installed package
ExistingCustomBuildPy = dist.cmdclass.get('build_py', object)
class KatVersionBuildPy(AddVersionToInitBuildPy, ExistingCustomBuildPy):
"""First perform existing build_py and then bake in version string."""
dist.cmdclass['build_py'] = KatVersionBuildPy
# Extend sdist command to bake version string into source package
ExistingCustomSdist = dist.cmdclass.get('sdist', object)
class KatVersionSdist(AddVersionToInitSdist, ExistingCustomSdist):
"""First perform existing sdist and then bake in version string."""
dist.cmdclass['sdist'] = KatVersionSdist | python | def setuptools_entry(dist, keyword, value):
"""Setuptools entry point for setting version and baking it into package."""
# If 'use_katversion' is False, ignore the rest
if not value:
return
# Enforce the version obtained by katversion, overriding user setting
version = get_version()
if dist.metadata.version is not None:
s = "Ignoring explicit version='{0}' in setup.py, using '{1}' instead"
warnings.warn(s.format(dist.metadata.version, version))
dist.metadata.version = version
# Extend build_py command to bake version string into installed package
ExistingCustomBuildPy = dist.cmdclass.get('build_py', object)
class KatVersionBuildPy(AddVersionToInitBuildPy, ExistingCustomBuildPy):
"""First perform existing build_py and then bake in version string."""
dist.cmdclass['build_py'] = KatVersionBuildPy
# Extend sdist command to bake version string into source package
ExistingCustomSdist = dist.cmdclass.get('sdist', object)
class KatVersionSdist(AddVersionToInitSdist, ExistingCustomSdist):
"""First perform existing sdist and then bake in version string."""
dist.cmdclass['sdist'] = KatVersionSdist | [
"def",
"setuptools_entry",
"(",
"dist",
",",
"keyword",
",",
"value",
")",
":",
"# If 'use_katversion' is False, ignore the rest",
"if",
"not",
"value",
":",
"return",
"# Enforce the version obtained by katversion, overriding user setting",
"version",
"=",
"get_version",
"(",
")",
"if",
"dist",
".",
"metadata",
".",
"version",
"is",
"not",
"None",
":",
"s",
"=",
"\"Ignoring explicit version='{0}' in setup.py, using '{1}' instead\"",
"warnings",
".",
"warn",
"(",
"s",
".",
"format",
"(",
"dist",
".",
"metadata",
".",
"version",
",",
"version",
")",
")",
"dist",
".",
"metadata",
".",
"version",
"=",
"version",
"# Extend build_py command to bake version string into installed package",
"ExistingCustomBuildPy",
"=",
"dist",
".",
"cmdclass",
".",
"get",
"(",
"'build_py'",
",",
"object",
")",
"class",
"KatVersionBuildPy",
"(",
"AddVersionToInitBuildPy",
",",
"ExistingCustomBuildPy",
")",
":",
"\"\"\"First perform existing build_py and then bake in version string.\"\"\"",
"dist",
".",
"cmdclass",
"[",
"'build_py'",
"]",
"=",
"KatVersionBuildPy",
"# Extend sdist command to bake version string into source package",
"ExistingCustomSdist",
"=",
"dist",
".",
"cmdclass",
".",
"get",
"(",
"'sdist'",
",",
"object",
")",
"class",
"KatVersionSdist",
"(",
"AddVersionToInitSdist",
",",
"ExistingCustomSdist",
")",
":",
"\"\"\"First perform existing sdist and then bake in version string.\"\"\"",
"dist",
".",
"cmdclass",
"[",
"'sdist'",
"]",
"=",
"KatVersionSdist"
] | Setuptools entry point for setting version and baking it into package. | [
"Setuptools",
"entry",
"point",
"for",
"setting",
"version",
"and",
"baking",
"it",
"into",
"package",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/build.py#L102-L122 |
beregond/super_state_machine | super_state_machine/utils.py | is_ | def is_(self, state):
"""Check if machine is in given state."""
translator = self._meta['translator']
state = translator.translate(state)
return self.actual_state == state | python | def is_(self, state):
"""Check if machine is in given state."""
translator = self._meta['translator']
state = translator.translate(state)
return self.actual_state == state | [
"def",
"is_",
"(",
"self",
",",
"state",
")",
":",
"translator",
"=",
"self",
".",
"_meta",
"[",
"'translator'",
"]",
"state",
"=",
"translator",
".",
"translate",
"(",
"state",
")",
"return",
"self",
".",
"actual_state",
"==",
"state"
] | Check if machine is in given state. | [
"Check",
"if",
"machine",
"is",
"in",
"given",
"state",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L9-L13 |
beregond/super_state_machine | super_state_machine/utils.py | can_be_ | def can_be_(self, state):
"""Check if machine can transit to given state."""
translator = self._meta['translator']
state = translator.translate(state)
if self._meta['complete']:
return True
if self.actual_state is None:
return True
transitions = self._meta['transitions'][self.actual_state]
return state in transitions | python | def can_be_(self, state):
"""Check if machine can transit to given state."""
translator = self._meta['translator']
state = translator.translate(state)
if self._meta['complete']:
return True
if self.actual_state is None:
return True
transitions = self._meta['transitions'][self.actual_state]
return state in transitions | [
"def",
"can_be_",
"(",
"self",
",",
"state",
")",
":",
"translator",
"=",
"self",
".",
"_meta",
"[",
"'translator'",
"]",
"state",
"=",
"translator",
".",
"translate",
"(",
"state",
")",
"if",
"self",
".",
"_meta",
"[",
"'complete'",
"]",
":",
"return",
"True",
"if",
"self",
".",
"actual_state",
"is",
"None",
":",
"return",
"True",
"transitions",
"=",
"self",
".",
"_meta",
"[",
"'transitions'",
"]",
"[",
"self",
".",
"actual_state",
"]",
"return",
"state",
"in",
"transitions"
] | Check if machine can transit to given state. | [
"Check",
"if",
"machine",
"can",
"transit",
"to",
"given",
"state",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L16-L28 |
beregond/super_state_machine | super_state_machine/utils.py | force_set | def force_set(self, state):
"""Set new state without checking if transition is allowed."""
translator = self._meta['translator']
state = translator.translate(state)
attr = self._meta['state_attribute_name']
setattr(self, attr, state) | python | def force_set(self, state):
"""Set new state without checking if transition is allowed."""
translator = self._meta['translator']
state = translator.translate(state)
attr = self._meta['state_attribute_name']
setattr(self, attr, state) | [
"def",
"force_set",
"(",
"self",
",",
"state",
")",
":",
"translator",
"=",
"self",
".",
"_meta",
"[",
"'translator'",
"]",
"state",
"=",
"translator",
".",
"translate",
"(",
"state",
")",
"attr",
"=",
"self",
".",
"_meta",
"[",
"'state_attribute_name'",
"]",
"setattr",
"(",
"self",
",",
"attr",
",",
"state",
")"
] | Set new state without checking if transition is allowed. | [
"Set",
"new",
"state",
"without",
"checking",
"if",
"transition",
"is",
"allowed",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L31-L36 |
beregond/super_state_machine | super_state_machine/utils.py | set_ | def set_(self, state):
"""Set new state for machine."""
if not self.can_be_(state):
state = self._meta['translator'].translate(state)
raise TransitionError(
"Cannot transit from '{actual_value}' to '{value}'."
.format(actual_value=self.actual_state.value, value=state.value)
)
self.force_set(state) | python | def set_(self, state):
"""Set new state for machine."""
if not self.can_be_(state):
state = self._meta['translator'].translate(state)
raise TransitionError(
"Cannot transit from '{actual_value}' to '{value}'."
.format(actual_value=self.actual_state.value, value=state.value)
)
self.force_set(state) | [
"def",
"set_",
"(",
"self",
",",
"state",
")",
":",
"if",
"not",
"self",
".",
"can_be_",
"(",
"state",
")",
":",
"state",
"=",
"self",
".",
"_meta",
"[",
"'translator'",
"]",
".",
"translate",
"(",
"state",
")",
"raise",
"TransitionError",
"(",
"\"Cannot transit from '{actual_value}' to '{value}'.\"",
".",
"format",
"(",
"actual_value",
"=",
"self",
".",
"actual_state",
".",
"value",
",",
"value",
"=",
"state",
".",
"value",
")",
")",
"self",
".",
"force_set",
"(",
"state",
")"
] | Set new state for machine. | [
"Set",
"new",
"state",
"for",
"machine",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L39-L48 |
beregond/super_state_machine | super_state_machine/utils.py | generate_getter | def generate_getter(value):
"""Generate getter for given value."""
@property
@wraps(is_)
def getter(self):
return self.is_(value)
return getter | python | def generate_getter(value):
"""Generate getter for given value."""
@property
@wraps(is_)
def getter(self):
return self.is_(value)
return getter | [
"def",
"generate_getter",
"(",
"value",
")",
":",
"@",
"property",
"@",
"wraps",
"(",
"is_",
")",
"def",
"getter",
"(",
"self",
")",
":",
"return",
"self",
".",
"is_",
"(",
"value",
")",
"return",
"getter"
] | Generate getter for given value. | [
"Generate",
"getter",
"for",
"given",
"value",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L64-L71 |
beregond/super_state_machine | super_state_machine/utils.py | generate_checker | def generate_checker(value):
"""Generate state checker for given value."""
@property
@wraps(can_be_)
def checker(self):
return self.can_be_(value)
return checker | python | def generate_checker(value):
"""Generate state checker for given value."""
@property
@wraps(can_be_)
def checker(self):
return self.can_be_(value)
return checker | [
"def",
"generate_checker",
"(",
"value",
")",
":",
"@",
"property",
"@",
"wraps",
"(",
"can_be_",
")",
"def",
"checker",
"(",
"self",
")",
":",
"return",
"self",
".",
"can_be_",
"(",
"value",
")",
"return",
"checker"
] | Generate state checker for given value. | [
"Generate",
"state",
"checker",
"for",
"given",
"value",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L74-L81 |
beregond/super_state_machine | super_state_machine/utils.py | generate_setter | def generate_setter(value):
"""Generate setter for given value."""
@wraps(set_)
def setter(self):
self.set_(value)
return setter | python | def generate_setter(value):
"""Generate setter for given value."""
@wraps(set_)
def setter(self):
self.set_(value)
return setter | [
"def",
"generate_setter",
"(",
"value",
")",
":",
"@",
"wraps",
"(",
"set_",
")",
"def",
"setter",
"(",
"self",
")",
":",
"self",
".",
"set_",
"(",
"value",
")",
"return",
"setter"
] | Generate setter for given value. | [
"Generate",
"setter",
"for",
"given",
"value",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L84-L90 |
beregond/super_state_machine | super_state_machine/utils.py | EnumValueTranslator.translate | def translate(self, value):
"""Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum.
"""
if self._check_if_already_proper(value):
return value
try:
return self.search_table[value]
except KeyError:
raise ValueError("Value {value} doesn't match any state.".format(
value=value
)) | python | def translate(self, value):
"""Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum.
"""
if self._check_if_already_proper(value):
return value
try:
return self.search_table[value]
except KeyError:
raise ValueError("Value {value} doesn't match any state.".format(
value=value
)) | [
"def",
"translate",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_check_if_already_proper",
"(",
"value",
")",
":",
"return",
"value",
"try",
":",
"return",
"self",
".",
"search_table",
"[",
"value",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Value {value} doesn't match any state.\"",
".",
"format",
"(",
"value",
"=",
"value",
")",
")"
] | Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum. | [
"Translate",
"value",
"to",
"enum",
"instance",
"."
] | train | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L127-L142 |
vkosuri/dialogflow-lite | dialogflow_lite/dialogflow.py | Dialogflow._query | def _query(self, text):
"""
Takes natural language text and information as query parameters and returns information as JSON.
"""
params = (
('v', self.api_version),
('query', text),
('lang', self.language),
('sessionId', self.session_id),
('timezone', self.timezone),
)
# store query_response if required
if self.query_response:
self.previous_query_response = self.query_response
self.query_response = result = self.session.get(url=self.query_url, params=params).json()
return result | python | def _query(self, text):
"""
Takes natural language text and information as query parameters and returns information as JSON.
"""
params = (
('v', self.api_version),
('query', text),
('lang', self.language),
('sessionId', self.session_id),
('timezone', self.timezone),
)
# store query_response if required
if self.query_response:
self.previous_query_response = self.query_response
self.query_response = result = self.session.get(url=self.query_url, params=params).json()
return result | [
"def",
"_query",
"(",
"self",
",",
"text",
")",
":",
"params",
"=",
"(",
"(",
"'v'",
",",
"self",
".",
"api_version",
")",
",",
"(",
"'query'",
",",
"text",
")",
",",
"(",
"'lang'",
",",
"self",
".",
"language",
")",
",",
"(",
"'sessionId'",
",",
"self",
".",
"session_id",
")",
",",
"(",
"'timezone'",
",",
"self",
".",
"timezone",
")",
",",
")",
"# store query_response if required",
"if",
"self",
".",
"query_response",
":",
"self",
".",
"previous_query_response",
"=",
"self",
".",
"query_response",
"self",
".",
"query_response",
"=",
"result",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
"=",
"self",
".",
"query_url",
",",
"params",
"=",
"params",
")",
".",
"json",
"(",
")",
"return",
"result"
] | Takes natural language text and information as query parameters and returns information as JSON. | [
"Takes",
"natural",
"language",
"text",
"and",
"information",
"as",
"query",
"parameters",
"and",
"returns",
"information",
"as",
"JSON",
"."
] | train | https://github.com/vkosuri/dialogflow-lite/blob/488d6ffb4128471e672c8304995514a3c8982edc/dialogflow_lite/dialogflow.py#L88-L105 |
MacHu-GWU/dataIO-project | dataIO/zzz_manual_install.py | is_venv | def is_venv():
"""Check whether if this workspace is a virtualenv.
"""
dir_path = os.path.dirname(SRC)
is_venv_flag = True
if SYS_NAME == "Windows":
executable_list = ["activate", "pip.exe", "python.exe"]
elif SYS_NAME in ["Darwin", "Linux"]:
executable_list = ["activate", "pip", "python"]
for executable in executable_list:
path = os.path.join(dir_path, BIN_SCRIPTS, executable)
if not os.path.exists(path):
is_venv_flag = False
return is_venv_flag | python | def is_venv():
"""Check whether if this workspace is a virtualenv.
"""
dir_path = os.path.dirname(SRC)
is_venv_flag = True
if SYS_NAME == "Windows":
executable_list = ["activate", "pip.exe", "python.exe"]
elif SYS_NAME in ["Darwin", "Linux"]:
executable_list = ["activate", "pip", "python"]
for executable in executable_list:
path = os.path.join(dir_path, BIN_SCRIPTS, executable)
if not os.path.exists(path):
is_venv_flag = False
return is_venv_flag | [
"def",
"is_venv",
"(",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"SRC",
")",
"is_venv_flag",
"=",
"True",
"if",
"SYS_NAME",
"==",
"\"Windows\"",
":",
"executable_list",
"=",
"[",
"\"activate\"",
",",
"\"pip.exe\"",
",",
"\"python.exe\"",
"]",
"elif",
"SYS_NAME",
"in",
"[",
"\"Darwin\"",
",",
"\"Linux\"",
"]",
":",
"executable_list",
"=",
"[",
"\"activate\"",
",",
"\"pip\"",
",",
"\"python\"",
"]",
"for",
"executable",
"in",
"executable_list",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"BIN_SCRIPTS",
",",
"executable",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"is_venv_flag",
"=",
"False",
"return",
"is_venv_flag"
] | Check whether if this workspace is a virtualenv. | [
"Check",
"whether",
"if",
"this",
"workspace",
"is",
"a",
"virtualenv",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L47-L63 |
MacHu-GWU/dataIO-project | dataIO/zzz_manual_install.py | find_linux_venv_py_version | def find_linux_venv_py_version():
"""Find python version name used in this virtualenv.
For example: ``python2.7``, ``python3.4``
"""
available_python_version = [
"python2.6",
"python2.7",
"python3.3",
"python3.4",
"python3.5",
"python3.6",
]
dir_path = os.path.dirname(SRC)
for basename in os.listdir(os.path.join(dir_path, BIN_SCRIPTS)):
for python_version in available_python_version:
if python_version in basename:
return python_version
raise Exception("Can't find virtualenv python version!") | python | def find_linux_venv_py_version():
"""Find python version name used in this virtualenv.
For example: ``python2.7``, ``python3.4``
"""
available_python_version = [
"python2.6",
"python2.7",
"python3.3",
"python3.4",
"python3.5",
"python3.6",
]
dir_path = os.path.dirname(SRC)
for basename in os.listdir(os.path.join(dir_path, BIN_SCRIPTS)):
for python_version in available_python_version:
if python_version in basename:
return python_version
raise Exception("Can't find virtualenv python version!") | [
"def",
"find_linux_venv_py_version",
"(",
")",
":",
"available_python_version",
"=",
"[",
"\"python2.6\"",
",",
"\"python2.7\"",
",",
"\"python3.3\"",
",",
"\"python3.4\"",
",",
"\"python3.5\"",
",",
"\"python3.6\"",
",",
"]",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"SRC",
")",
"for",
"basename",
"in",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"BIN_SCRIPTS",
")",
")",
":",
"for",
"python_version",
"in",
"available_python_version",
":",
"if",
"python_version",
"in",
"basename",
":",
"return",
"python_version",
"raise",
"Exception",
"(",
"\"Can't find virtualenv python version!\"",
")"
] | Find python version name used in this virtualenv.
For example: ``python2.7``, ``python3.4`` | [
"Find",
"python",
"version",
"name",
"used",
"in",
"this",
"virtualenv",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L66-L84 |
MacHu-GWU/dataIO-project | dataIO/zzz_manual_install.py | find_venv_DST | def find_venv_DST():
"""Find where this package should be installed to in this virtualenv.
For example: ``/path-to-venv/lib/python2.7/site-packages/package-name``
"""
dir_path = os.path.dirname(SRC)
if SYS_NAME == "Windows":
DST = os.path.join(dir_path, "Lib", "site-packages", PKG_NAME)
elif SYS_NAME in ["Darwin", "Linux"]:
python_version = find_linux_venv_py_version()
DST = os.path.join(dir_path, "lib", python_version, "site-packages", PKG_NAME)
return DST | python | def find_venv_DST():
"""Find where this package should be installed to in this virtualenv.
For example: ``/path-to-venv/lib/python2.7/site-packages/package-name``
"""
dir_path = os.path.dirname(SRC)
if SYS_NAME == "Windows":
DST = os.path.join(dir_path, "Lib", "site-packages", PKG_NAME)
elif SYS_NAME in ["Darwin", "Linux"]:
python_version = find_linux_venv_py_version()
DST = os.path.join(dir_path, "lib", python_version, "site-packages", PKG_NAME)
return DST | [
"def",
"find_venv_DST",
"(",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"SRC",
")",
"if",
"SYS_NAME",
"==",
"\"Windows\"",
":",
"DST",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"\"Lib\"",
",",
"\"site-packages\"",
",",
"PKG_NAME",
")",
"elif",
"SYS_NAME",
"in",
"[",
"\"Darwin\"",
",",
"\"Linux\"",
"]",
":",
"python_version",
"=",
"find_linux_venv_py_version",
"(",
")",
"DST",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"\"lib\"",
",",
"python_version",
",",
"\"site-packages\"",
",",
"PKG_NAME",
")",
"return",
"DST"
] | Find where this package should be installed to in this virtualenv.
For example: ``/path-to-venv/lib/python2.7/site-packages/package-name`` | [
"Find",
"where",
"this",
"package",
"should",
"be",
"installed",
"to",
"in",
"this",
"virtualenv",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L87-L100 |
MacHu-GWU/dataIO-project | dataIO/zzz_manual_install.py | find_DST | def find_DST():
"""Find where this package should be installed to.
"""
if SYS_NAME == "Windows":
return os.path.join(site.getsitepackages()[1], PKG_NAME)
elif SYS_NAME in ["Darwin", "Linux"]:
return os.path.join(site.getsitepackages()[0], PKG_NAME) | python | def find_DST():
"""Find where this package should be installed to.
"""
if SYS_NAME == "Windows":
return os.path.join(site.getsitepackages()[1], PKG_NAME)
elif SYS_NAME in ["Darwin", "Linux"]:
return os.path.join(site.getsitepackages()[0], PKG_NAME) | [
"def",
"find_DST",
"(",
")",
":",
"if",
"SYS_NAME",
"==",
"\"Windows\"",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"site",
".",
"getsitepackages",
"(",
")",
"[",
"1",
"]",
",",
"PKG_NAME",
")",
"elif",
"SYS_NAME",
"in",
"[",
"\"Darwin\"",
",",
"\"Linux\"",
"]",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"site",
".",
"getsitepackages",
"(",
")",
"[",
"0",
"]",
",",
"PKG_NAME",
")"
] | Find where this package should be installed to. | [
"Find",
"where",
"this",
"package",
"should",
"be",
"installed",
"to",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L103-L109 |
MacHu-GWU/dataIO-project | dataIO/zzz_manual_install.py | md5_of_file | def md5_of_file(abspath):
"""Md5 value of a file.
"""
chunk_size = 1024 * 1024
m = hashlib.md5()
with open(abspath, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest() | python | def md5_of_file(abspath):
"""Md5 value of a file.
"""
chunk_size = 1024 * 1024
m = hashlib.md5()
with open(abspath, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest() | [
"def",
"md5_of_file",
"(",
"abspath",
")",
":",
"chunk_size",
"=",
"1024",
"*",
"1024",
"m",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"abspath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"while",
"True",
":",
"data",
"=",
"f",
".",
"read",
"(",
"chunk_size",
")",
"if",
"not",
"data",
":",
"break",
"m",
".",
"update",
"(",
"data",
")",
"return",
"m",
".",
"hexdigest",
"(",
")"
] | Md5 value of a file. | [
"Md5",
"value",
"of",
"a",
"file",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L118-L129 |
MacHu-GWU/dataIO-project | dataIO/zzz_manual_install.py | check_need_install | def check_need_install():
"""Check if installed package are exactly the same to this one.
By checking md5 value of all files.
"""
need_install_flag = False
for root, _, basename_list in os.walk(SRC):
if os.path.basename(root) != "__pycache__":
for basename in basename_list:
src = os.path.join(root, basename)
dst = os.path.join(root.replace(SRC, DST), basename)
if os.path.exists(dst):
if md5_of_file(src) != md5_of_file(dst):
return True
else:
return True
return need_install_flag | python | def check_need_install():
"""Check if installed package are exactly the same to this one.
By checking md5 value of all files.
"""
need_install_flag = False
for root, _, basename_list in os.walk(SRC):
if os.path.basename(root) != "__pycache__":
for basename in basename_list:
src = os.path.join(root, basename)
dst = os.path.join(root.replace(SRC, DST), basename)
if os.path.exists(dst):
if md5_of_file(src) != md5_of_file(dst):
return True
else:
return True
return need_install_flag | [
"def",
"check_need_install",
"(",
")",
":",
"need_install_flag",
"=",
"False",
"for",
"root",
",",
"_",
",",
"basename_list",
"in",
"os",
".",
"walk",
"(",
"SRC",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"!=",
"\"__pycache__\"",
":",
"for",
"basename",
"in",
"basename_list",
":",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"basename",
")",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
".",
"replace",
"(",
"SRC",
",",
"DST",
")",
",",
"basename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dst",
")",
":",
"if",
"md5_of_file",
"(",
"src",
")",
"!=",
"md5_of_file",
"(",
"dst",
")",
":",
"return",
"True",
"else",
":",
"return",
"True",
"return",
"need_install_flag"
] | Check if installed package are exactly the same to this one.
By checking md5 value of all files. | [
"Check",
"if",
"installed",
"package",
"are",
"exactly",
"the",
"same",
"to",
"this",
"one",
".",
"By",
"checking",
"md5",
"value",
"of",
"all",
"files",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L132-L147 |
MacHu-GWU/dataIO-project | dataIO/zzz_manual_install.py | install | def install():
"""Manual install main script.
"""
# check installed package
print("Compare to '%s' ..." % DST)
need_install_flag = check_need_install()
if not need_install_flag:
print(" package is up-to-date, no need to install.")
return
print("Difference been found, start installing ...")
# remove __pycache__ folder and *.pyc file
print("Remove *.pyc file ...")
pyc_folder_list = list()
for root, _, basename_list in os.walk(SRC):
if os.path.basename(root) == "__pycache__":
pyc_folder_list.append(root)
for folder in pyc_folder_list:
shutil.rmtree(folder)
print(" all *.pyc file has been removed.")
# install this package to all python version
print("Uninstall %s from %s ..." % (PKG_NAME, DST))
try:
shutil.rmtree(DST)
print(" Successfully uninstall %s" % PKG_NAME)
except Exception as e:
print(" %s" % e)
print("Install %s to %s ..." % (PKG_NAME, DST))
shutil.copytree(SRC, DST)
print(" Complete!") | python | def install():
"""Manual install main script.
"""
# check installed package
print("Compare to '%s' ..." % DST)
need_install_flag = check_need_install()
if not need_install_flag:
print(" package is up-to-date, no need to install.")
return
print("Difference been found, start installing ...")
# remove __pycache__ folder and *.pyc file
print("Remove *.pyc file ...")
pyc_folder_list = list()
for root, _, basename_list in os.walk(SRC):
if os.path.basename(root) == "__pycache__":
pyc_folder_list.append(root)
for folder in pyc_folder_list:
shutil.rmtree(folder)
print(" all *.pyc file has been removed.")
# install this package to all python version
print("Uninstall %s from %s ..." % (PKG_NAME, DST))
try:
shutil.rmtree(DST)
print(" Successfully uninstall %s" % PKG_NAME)
except Exception as e:
print(" %s" % e)
print("Install %s to %s ..." % (PKG_NAME, DST))
shutil.copytree(SRC, DST)
print(" Complete!") | [
"def",
"install",
"(",
")",
":",
"# check installed package",
"print",
"(",
"\"Compare to '%s' ...\"",
"%",
"DST",
")",
"need_install_flag",
"=",
"check_need_install",
"(",
")",
"if",
"not",
"need_install_flag",
":",
"print",
"(",
"\" package is up-to-date, no need to install.\"",
")",
"return",
"print",
"(",
"\"Difference been found, start installing ...\"",
")",
"# remove __pycache__ folder and *.pyc file",
"print",
"(",
"\"Remove *.pyc file ...\"",
")",
"pyc_folder_list",
"=",
"list",
"(",
")",
"for",
"root",
",",
"_",
",",
"basename_list",
"in",
"os",
".",
"walk",
"(",
"SRC",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"==",
"\"__pycache__\"",
":",
"pyc_folder_list",
".",
"append",
"(",
"root",
")",
"for",
"folder",
"in",
"pyc_folder_list",
":",
"shutil",
".",
"rmtree",
"(",
"folder",
")",
"print",
"(",
"\" all *.pyc file has been removed.\"",
")",
"# install this package to all python version",
"print",
"(",
"\"Uninstall %s from %s ...\"",
"%",
"(",
"PKG_NAME",
",",
"DST",
")",
")",
"try",
":",
"shutil",
".",
"rmtree",
"(",
"DST",
")",
"print",
"(",
"\" Successfully uninstall %s\"",
"%",
"PKG_NAME",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\" %s\"",
"%",
"e",
")",
"print",
"(",
"\"Install %s to %s ...\"",
"%",
"(",
"PKG_NAME",
",",
"DST",
")",
")",
"shutil",
".",
"copytree",
"(",
"SRC",
",",
"DST",
")",
"print",
"(",
"\" Complete!\"",
")"
] | Manual install main script. | [
"Manual",
"install",
"main",
"script",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L150-L182 |
lablup/backend.ai-common | src/ai/backend/common/docker.py | login | async def login(
sess: aiohttp.ClientSession,
registry_url: yarl.URL,
credentials: dict,
scope: str) -> dict:
'''
Authorize to the docker registry using the given credentials and token scope, and returns a set
of required aiohttp.ClientSession.request() keyword arguments for further API requests.
Some registry servers only rely on HTTP Basic Authentication without token-based access controls
(usually via nginx proxy). We do support them also. :)
'''
if credentials.get('username') and credentials.get('password'):
basic_auth = aiohttp.BasicAuth(
credentials['username'], credentials['password'],
)
else:
basic_auth = None
realm = registry_url / 'token' # fallback
service = 'registry' # fallback
async with sess.get(registry_url / 'v2/', auth=basic_auth) as resp:
ping_status = resp.status
www_auth_header = resp.headers.get('WWW-Authenticate')
if www_auth_header:
match = re.search(r'realm="([^"]+)"', www_auth_header)
if match:
realm = match.group(1)
match = re.search(r'service="([^"]+)"', www_auth_header)
if match:
service = match.group(1)
if ping_status == 200:
log.debug('docker-registry: {0} -> basic-auth', registry_url)
return {'auth': basic_auth, 'headers': {}}
elif ping_status == 404:
raise RuntimeError(f'Unsupported docker registry: {registry_url}! '
'(API v2 not implemented)')
elif ping_status == 401:
params = {
'scope': scope,
'offline_token': 'true',
'client_id': 'docker',
'service': service,
}
async with sess.get(realm, params=params, auth=basic_auth) as resp:
log.debug('docker-registry: {0} -> {1}', registry_url, realm)
if resp.status == 200:
data = json.loads(await resp.read())
token = data.get('token', None)
return {'auth': None, 'headers': {
'Authorization': f'Bearer {token}'
}}
raise RuntimeError('authentication for docker registry '
'f{registry_url} failed') | python | async def login(
sess: aiohttp.ClientSession,
registry_url: yarl.URL,
credentials: dict,
scope: str) -> dict:
'''
Authorize to the docker registry using the given credentials and token scope, and returns a set
of required aiohttp.ClientSession.request() keyword arguments for further API requests.
Some registry servers only rely on HTTP Basic Authentication without token-based access controls
(usually via nginx proxy). We do support them also. :)
'''
if credentials.get('username') and credentials.get('password'):
basic_auth = aiohttp.BasicAuth(
credentials['username'], credentials['password'],
)
else:
basic_auth = None
realm = registry_url / 'token' # fallback
service = 'registry' # fallback
async with sess.get(registry_url / 'v2/', auth=basic_auth) as resp:
ping_status = resp.status
www_auth_header = resp.headers.get('WWW-Authenticate')
if www_auth_header:
match = re.search(r'realm="([^"]+)"', www_auth_header)
if match:
realm = match.group(1)
match = re.search(r'service="([^"]+)"', www_auth_header)
if match:
service = match.group(1)
if ping_status == 200:
log.debug('docker-registry: {0} -> basic-auth', registry_url)
return {'auth': basic_auth, 'headers': {}}
elif ping_status == 404:
raise RuntimeError(f'Unsupported docker registry: {registry_url}! '
'(API v2 not implemented)')
elif ping_status == 401:
params = {
'scope': scope,
'offline_token': 'true',
'client_id': 'docker',
'service': service,
}
async with sess.get(realm, params=params, auth=basic_auth) as resp:
log.debug('docker-registry: {0} -> {1}', registry_url, realm)
if resp.status == 200:
data = json.loads(await resp.read())
token = data.get('token', None)
return {'auth': None, 'headers': {
'Authorization': f'Bearer {token}'
}}
raise RuntimeError('authentication for docker registry '
'f{registry_url} failed') | [
"async",
"def",
"login",
"(",
"sess",
":",
"aiohttp",
".",
"ClientSession",
",",
"registry_url",
":",
"yarl",
".",
"URL",
",",
"credentials",
":",
"dict",
",",
"scope",
":",
"str",
")",
"->",
"dict",
":",
"if",
"credentials",
".",
"get",
"(",
"'username'",
")",
"and",
"credentials",
".",
"get",
"(",
"'password'",
")",
":",
"basic_auth",
"=",
"aiohttp",
".",
"BasicAuth",
"(",
"credentials",
"[",
"'username'",
"]",
",",
"credentials",
"[",
"'password'",
"]",
",",
")",
"else",
":",
"basic_auth",
"=",
"None",
"realm",
"=",
"registry_url",
"/",
"'token'",
"# fallback",
"service",
"=",
"'registry'",
"# fallback",
"async",
"with",
"sess",
".",
"get",
"(",
"registry_url",
"/",
"'v2/'",
",",
"auth",
"=",
"basic_auth",
")",
"as",
"resp",
":",
"ping_status",
"=",
"resp",
".",
"status",
"www_auth_header",
"=",
"resp",
".",
"headers",
".",
"get",
"(",
"'WWW-Authenticate'",
")",
"if",
"www_auth_header",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'realm=\"([^\"]+)\"'",
",",
"www_auth_header",
")",
"if",
"match",
":",
"realm",
"=",
"match",
".",
"group",
"(",
"1",
")",
"match",
"=",
"re",
".",
"search",
"(",
"r'service=\"([^\"]+)\"'",
",",
"www_auth_header",
")",
"if",
"match",
":",
"service",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"ping_status",
"==",
"200",
":",
"log",
".",
"debug",
"(",
"'docker-registry: {0} -> basic-auth'",
",",
"registry_url",
")",
"return",
"{",
"'auth'",
":",
"basic_auth",
",",
"'headers'",
":",
"{",
"}",
"}",
"elif",
"ping_status",
"==",
"404",
":",
"raise",
"RuntimeError",
"(",
"f'Unsupported docker registry: {registry_url}! '",
"'(API v2 not implemented)'",
")",
"elif",
"ping_status",
"==",
"401",
":",
"params",
"=",
"{",
"'scope'",
":",
"scope",
",",
"'offline_token'",
":",
"'true'",
",",
"'client_id'",
":",
"'docker'",
",",
"'service'",
":",
"service",
",",
"}",
"async",
"with",
"sess",
".",
"get",
"(",
"realm",
",",
"params",
"=",
"params",
",",
"auth",
"=",
"basic_auth",
")",
"as",
"resp",
":",
"log",
".",
"debug",
"(",
"'docker-registry: {0} -> {1}'",
",",
"registry_url",
",",
"realm",
")",
"if",
"resp",
".",
"status",
"==",
"200",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"await",
"resp",
".",
"read",
"(",
")",
")",
"token",
"=",
"data",
".",
"get",
"(",
"'token'",
",",
"None",
")",
"return",
"{",
"'auth'",
":",
"None",
",",
"'headers'",
":",
"{",
"'Authorization'",
":",
"f'Bearer {token}'",
"}",
"}",
"raise",
"RuntimeError",
"(",
"'authentication for docker registry '",
"'f{registry_url} failed'",
")"
] | Authorize to the docker registry using the given credentials and token scope, and returns a set
of required aiohttp.ClientSession.request() keyword arguments for further API requests.
Some registry servers only rely on HTTP Basic Authentication without token-based access controls
(usually via nginx proxy). We do support them also. :) | [
"Authorize",
"to",
"the",
"docker",
"registry",
"using",
"the",
"given",
"credentials",
"and",
"token",
"scope",
"and",
"returns",
"a",
"set",
"of",
"required",
"aiohttp",
".",
"ClientSession",
".",
"request",
"()",
"keyword",
"arguments",
"for",
"further",
"API",
"requests",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/docker.py#L33-L85 |
ska-sa/katversion | katversion/version.py | is_git | def is_git(path):
"""Return True if this is a git repo."""
try:
repo_dir = run_cmd(path, 'git', 'rev-parse', '--git-dir')
return True if repo_dir else False
except (OSError, RuntimeError):
return False | python | def is_git(path):
"""Return True if this is a git repo."""
try:
repo_dir = run_cmd(path, 'git', 'rev-parse', '--git-dir')
return True if repo_dir else False
except (OSError, RuntimeError):
return False | [
"def",
"is_git",
"(",
"path",
")",
":",
"try",
":",
"repo_dir",
"=",
"run_cmd",
"(",
"path",
",",
"'git'",
",",
"'rev-parse'",
",",
"'--git-dir'",
")",
"return",
"True",
"if",
"repo_dir",
"else",
"False",
"except",
"(",
"OSError",
",",
"RuntimeError",
")",
":",
"return",
"False"
] | Return True if this is a git repo. | [
"Return",
"True",
"if",
"this",
"is",
"a",
"git",
"repo",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L45-L51 |
ska-sa/katversion | katversion/version.py | date_version | def date_version(scm=None):
"""Generate a version string based on the SCM type and the date."""
dt = str(time.strftime('%Y%m%d%H%M'))
if scm:
version = "0.0+unknown.{0}.{1}".format(scm, dt)
else:
version = "0.0+unknown." + dt
return version | python | def date_version(scm=None):
"""Generate a version string based on the SCM type and the date."""
dt = str(time.strftime('%Y%m%d%H%M'))
if scm:
version = "0.0+unknown.{0}.{1}".format(scm, dt)
else:
version = "0.0+unknown." + dt
return version | [
"def",
"date_version",
"(",
"scm",
"=",
"None",
")",
":",
"dt",
"=",
"str",
"(",
"time",
".",
"strftime",
"(",
"'%Y%m%d%H%M'",
")",
")",
"if",
"scm",
":",
"version",
"=",
"\"0.0+unknown.{0}.{1}\"",
".",
"format",
"(",
"scm",
",",
"dt",
")",
"else",
":",
"version",
"=",
"\"0.0+unknown.\"",
"+",
"dt",
"return",
"version"
] | Generate a version string based on the SCM type and the date. | [
"Generate",
"a",
"version",
"string",
"based",
"on",
"the",
"SCM",
"type",
"and",
"the",
"date",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L63-L70 |
ska-sa/katversion | katversion/version.py | get_git_cleaned_branch_name | def get_git_cleaned_branch_name(path):
"""Get the git branch name of the current HEAD in path. The branch name is
scrubbed to conform to PEP-440.
PEP-440 Local version identifiers shall only consist out of:
- ASCII letters ( [a-zA-Z] )
- ASCII digits ( [0-9] )
- periods ( . )
https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
Parameters
----------
path: str
The path to run git commands in.
"""
# Get name of current branch (or 'HEAD' for a detached HEAD)
branch_name = run_cmd(path, 'git', 'rev-parse', '--abbrev-ref', 'HEAD')
branch_name = re.sub(r"[^A-Za-z0-9]+", ".", branch_name.strip())
return branch_name | python | def get_git_cleaned_branch_name(path):
"""Get the git branch name of the current HEAD in path. The branch name is
scrubbed to conform to PEP-440.
PEP-440 Local version identifiers shall only consist out of:
- ASCII letters ( [a-zA-Z] )
- ASCII digits ( [0-9] )
- periods ( . )
https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
Parameters
----------
path: str
The path to run git commands in.
"""
# Get name of current branch (or 'HEAD' for a detached HEAD)
branch_name = run_cmd(path, 'git', 'rev-parse', '--abbrev-ref', 'HEAD')
branch_name = re.sub(r"[^A-Za-z0-9]+", ".", branch_name.strip())
return branch_name | [
"def",
"get_git_cleaned_branch_name",
"(",
"path",
")",
":",
"# Get name of current branch (or 'HEAD' for a detached HEAD)",
"branch_name",
"=",
"run_cmd",
"(",
"path",
",",
"'git'",
",",
"'rev-parse'",
",",
"'--abbrev-ref'",
",",
"'HEAD'",
")",
"branch_name",
"=",
"re",
".",
"sub",
"(",
"r\"[^A-Za-z0-9]+\"",
",",
"\".\"",
",",
"branch_name",
".",
"strip",
"(",
")",
")",
"return",
"branch_name"
] | Get the git branch name of the current HEAD in path. The branch name is
scrubbed to conform to PEP-440.
PEP-440 Local version identifiers shall only consist out of:
- ASCII letters ( [a-zA-Z] )
- ASCII digits ( [0-9] )
- periods ( . )
https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
Parameters
----------
path: str
The path to run git commands in. | [
"Get",
"the",
"git",
"branch",
"name",
"of",
"the",
"current",
"HEAD",
"in",
"path",
".",
"The",
"branch",
"name",
"is",
"scrubbed",
"to",
"conform",
"to",
"PEP",
"-",
"440",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L73-L91 |
ska-sa/katversion | katversion/version.py | get_git_version | def get_git_version(path):
"""Get the GIT version."""
branch_name = get_git_cleaned_branch_name(path)
# Determine whether working copy is dirty (i.e. contains modified files)
mods = run_cmd(path, 'git', 'status', '--porcelain', '--untracked-files=no')
dirty = '.dirty' if mods else ''
# Get a list of all commits on branch, with corresponding branch/tag refs
# Each line looks something like: "d3e4d42 (HEAD, master, tag: v0.1)"
git_output = run_cmd(path, 'git', 'log', '--pretty="%h%d"')
commits = git_output.strip().replace('"', '').split('\n')
num_commits_since_branch = len(commits)
# Short hash of the latest commit
short_commit_name = commits[0].partition(' ')[0]
# A valid version is sequence of dotted numbers optionally prefixed by 'v'
valid_version = re.compile(r'^v?([\.\d]+)$')
def tagged_version(commit):
"""First tag on commit that is valid version, as a list of numbers."""
refs = commit.partition(' ')[2]
for ref in refs.lstrip('(').rstrip(')').split(', '):
if ref.startswith('tag: '):
tag = ref[5:].lower()
found = valid_version.match(tag)
if found:
return [int(v) for v in found.group(1).split('.') if v]
return []
# Walk back along branch and find first valid tagged version (or use 0.0)
for commit in commits:
version_numbers = tagged_version(commit)
if version_numbers:
break
else:
version_numbers = [0, 0]
# It is a release if current commit has a version tag (and dir is clean)
release = (commit == commits[0]) and not dirty
if not release:
# We are working towards the next (minor) release according to PEP 440
version_numbers[-1] += 1
version = '.'.join([str(v) for v in version_numbers])
if not release:
# Development version contains extra embellishments
version = ("%s.dev%d+%s.%s%s" % (version, num_commits_since_branch,
branch_name, short_commit_name, dirty))
return version | python | def get_git_version(path):
"""Get the GIT version."""
branch_name = get_git_cleaned_branch_name(path)
# Determine whether working copy is dirty (i.e. contains modified files)
mods = run_cmd(path, 'git', 'status', '--porcelain', '--untracked-files=no')
dirty = '.dirty' if mods else ''
# Get a list of all commits on branch, with corresponding branch/tag refs
# Each line looks something like: "d3e4d42 (HEAD, master, tag: v0.1)"
git_output = run_cmd(path, 'git', 'log', '--pretty="%h%d"')
commits = git_output.strip().replace('"', '').split('\n')
num_commits_since_branch = len(commits)
# Short hash of the latest commit
short_commit_name = commits[0].partition(' ')[0]
# A valid version is sequence of dotted numbers optionally prefixed by 'v'
valid_version = re.compile(r'^v?([\.\d]+)$')
def tagged_version(commit):
"""First tag on commit that is valid version, as a list of numbers."""
refs = commit.partition(' ')[2]
for ref in refs.lstrip('(').rstrip(')').split(', '):
if ref.startswith('tag: '):
tag = ref[5:].lower()
found = valid_version.match(tag)
if found:
return [int(v) for v in found.group(1).split('.') if v]
return []
# Walk back along branch and find first valid tagged version (or use 0.0)
for commit in commits:
version_numbers = tagged_version(commit)
if version_numbers:
break
else:
version_numbers = [0, 0]
# It is a release if current commit has a version tag (and dir is clean)
release = (commit == commits[0]) and not dirty
if not release:
# We are working towards the next (minor) release according to PEP 440
version_numbers[-1] += 1
version = '.'.join([str(v) for v in version_numbers])
if not release:
# Development version contains extra embellishments
version = ("%s.dev%d+%s.%s%s" % (version, num_commits_since_branch,
branch_name, short_commit_name, dirty))
return version | [
"def",
"get_git_version",
"(",
"path",
")",
":",
"branch_name",
"=",
"get_git_cleaned_branch_name",
"(",
"path",
")",
"# Determine whether working copy is dirty (i.e. contains modified files)",
"mods",
"=",
"run_cmd",
"(",
"path",
",",
"'git'",
",",
"'status'",
",",
"'--porcelain'",
",",
"'--untracked-files=no'",
")",
"dirty",
"=",
"'.dirty'",
"if",
"mods",
"else",
"''",
"# Get a list of all commits on branch, with corresponding branch/tag refs",
"# Each line looks something like: \"d3e4d42 (HEAD, master, tag: v0.1)\"",
"git_output",
"=",
"run_cmd",
"(",
"path",
",",
"'git'",
",",
"'log'",
",",
"'--pretty=\"%h%d\"'",
")",
"commits",
"=",
"git_output",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"split",
"(",
"'\\n'",
")",
"num_commits_since_branch",
"=",
"len",
"(",
"commits",
")",
"# Short hash of the latest commit",
"short_commit_name",
"=",
"commits",
"[",
"0",
"]",
".",
"partition",
"(",
"' '",
")",
"[",
"0",
"]",
"# A valid version is sequence of dotted numbers optionally prefixed by 'v'",
"valid_version",
"=",
"re",
".",
"compile",
"(",
"r'^v?([\\.\\d]+)$'",
")",
"def",
"tagged_version",
"(",
"commit",
")",
":",
"\"\"\"First tag on commit that is valid version, as a list of numbers.\"\"\"",
"refs",
"=",
"commit",
".",
"partition",
"(",
"' '",
")",
"[",
"2",
"]",
"for",
"ref",
"in",
"refs",
".",
"lstrip",
"(",
"'('",
")",
".",
"rstrip",
"(",
"')'",
")",
".",
"split",
"(",
"', '",
")",
":",
"if",
"ref",
".",
"startswith",
"(",
"'tag: '",
")",
":",
"tag",
"=",
"ref",
"[",
"5",
":",
"]",
".",
"lower",
"(",
")",
"found",
"=",
"valid_version",
".",
"match",
"(",
"tag",
")",
"if",
"found",
":",
"return",
"[",
"int",
"(",
"v",
")",
"for",
"v",
"in",
"found",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"'.'",
")",
"if",
"v",
"]",
"return",
"[",
"]",
"# Walk back along branch and find first valid tagged version (or use 0.0)",
"for",
"commit",
"in",
"commits",
":",
"version_numbers",
"=",
"tagged_version",
"(",
"commit",
")",
"if",
"version_numbers",
":",
"break",
"else",
":",
"version_numbers",
"=",
"[",
"0",
",",
"0",
"]",
"# It is a release if current commit has a version tag (and dir is clean)",
"release",
"=",
"(",
"commit",
"==",
"commits",
"[",
"0",
"]",
")",
"and",
"not",
"dirty",
"if",
"not",
"release",
":",
"# We are working towards the next (minor) release according to PEP 440",
"version_numbers",
"[",
"-",
"1",
"]",
"+=",
"1",
"version",
"=",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"v",
")",
"for",
"v",
"in",
"version_numbers",
"]",
")",
"if",
"not",
"release",
":",
"# Development version contains extra embellishments",
"version",
"=",
"(",
"\"%s.dev%d+%s.%s%s\"",
"%",
"(",
"version",
",",
"num_commits_since_branch",
",",
"branch_name",
",",
"short_commit_name",
",",
"dirty",
")",
")",
"return",
"version"
] | Get the GIT version. | [
"Get",
"the",
"GIT",
"version",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L94-L137 |
ska-sa/katversion | katversion/version.py | get_version_from_scm | def get_version_from_scm(path=None):
"""Get the current version string of this package using SCM tool.
Parameters
----------
path : None or string, optional
The SCM checkout path (default is current directory)
Returns
-------
version : string
The version string for this package
"""
if is_git(path):
return 'git', get_git_version(path)
elif is_svn(path):
return 'svn', get_svn_version(path)
return None, None | python | def get_version_from_scm(path=None):
"""Get the current version string of this package using SCM tool.
Parameters
----------
path : None or string, optional
The SCM checkout path (default is current directory)
Returns
-------
version : string
The version string for this package
"""
if is_git(path):
return 'git', get_git_version(path)
elif is_svn(path):
return 'svn', get_svn_version(path)
return None, None | [
"def",
"get_version_from_scm",
"(",
"path",
"=",
"None",
")",
":",
"if",
"is_git",
"(",
"path",
")",
":",
"return",
"'git'",
",",
"get_git_version",
"(",
"path",
")",
"elif",
"is_svn",
"(",
"path",
")",
":",
"return",
"'svn'",
",",
"get_svn_version",
"(",
"path",
")",
"return",
"None",
",",
"None"
] | Get the current version string of this package using SCM tool.
Parameters
----------
path : None or string, optional
The SCM checkout path (default is current directory)
Returns
-------
version : string
The version string for this package | [
"Get",
"the",
"current",
"version",
"string",
"of",
"this",
"package",
"using",
"SCM",
"tool",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L147-L165 |
ska-sa/katversion | katversion/version.py | get_version_from_module | def get_version_from_module(module):
"""Use pkg_resources to get version of installed module by name."""
if module is not None:
# Setup.py will not pass in a module, but creating __version__ from
# __init__ will.
module = str(module).split('.', 1)[0]
try:
package = pkg_resources.get_distribution(module)
return package.version
except pkg_resources.DistributionNotFound:
# So there you have it the module is not installed.
pass | python | def get_version_from_module(module):
"""Use pkg_resources to get version of installed module by name."""
if module is not None:
# Setup.py will not pass in a module, but creating __version__ from
# __init__ will.
module = str(module).split('.', 1)[0]
try:
package = pkg_resources.get_distribution(module)
return package.version
except pkg_resources.DistributionNotFound:
# So there you have it the module is not installed.
pass | [
"def",
"get_version_from_module",
"(",
"module",
")",
":",
"if",
"module",
"is",
"not",
"None",
":",
"# Setup.py will not pass in a module, but creating __version__ from",
"# __init__ will.",
"module",
"=",
"str",
"(",
"module",
")",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"try",
":",
"package",
"=",
"pkg_resources",
".",
"get_distribution",
"(",
"module",
")",
"return",
"package",
".",
"version",
"except",
"pkg_resources",
".",
"DistributionNotFound",
":",
"# So there you have it the module is not installed.",
"pass"
] | Use pkg_resources to get version of installed module by name. | [
"Use",
"pkg_resources",
"to",
"get",
"version",
"of",
"installed",
"module",
"by",
"name",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L168-L179 |
ska-sa/katversion | katversion/version.py | _must_decode | def _must_decode(value):
"""Copied from pkginfo 1.4.1, _compat module."""
if type(value) is bytes:
try:
return value.decode('utf-8')
except UnicodeDecodeError:
return value.decode('latin1')
return value | python | def _must_decode(value):
"""Copied from pkginfo 1.4.1, _compat module."""
if type(value) is bytes:
try:
return value.decode('utf-8')
except UnicodeDecodeError:
return value.decode('latin1')
return value | [
"def",
"_must_decode",
"(",
"value",
")",
":",
"if",
"type",
"(",
"value",
")",
"is",
"bytes",
":",
"try",
":",
"return",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"return",
"value",
".",
"decode",
"(",
"'latin1'",
")",
"return",
"value"
] | Copied from pkginfo 1.4.1, _compat module. | [
"Copied",
"from",
"pkginfo",
"1",
".",
"4",
".",
"1",
"_compat",
"module",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L182-L189 |
ska-sa/katversion | katversion/version.py | get_version_from_unpacked_sdist | def get_version_from_unpacked_sdist(path):
"""Assume path points to unpacked source distribution and get version."""
# This is a condensed version of the relevant code in pkginfo 1.4.1
try:
with open(os.path.join(path, 'PKG-INFO')) as f:
data = f.read()
except IOError:
# Could not load path as an unpacked sdist as it had no PKG-INFO file
return
fp = StringIO(_must_decode(data))
msg = Parser().parse(fp)
value = msg.get('Version')
if value != 'UNKNOWN':
return value | python | def get_version_from_unpacked_sdist(path):
"""Assume path points to unpacked source distribution and get version."""
# This is a condensed version of the relevant code in pkginfo 1.4.1
try:
with open(os.path.join(path, 'PKG-INFO')) as f:
data = f.read()
except IOError:
# Could not load path as an unpacked sdist as it had no PKG-INFO file
return
fp = StringIO(_must_decode(data))
msg = Parser().parse(fp)
value = msg.get('Version')
if value != 'UNKNOWN':
return value | [
"def",
"get_version_from_unpacked_sdist",
"(",
"path",
")",
":",
"# This is a condensed version of the relevant code in pkginfo 1.4.1",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'PKG-INFO'",
")",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"except",
"IOError",
":",
"# Could not load path as an unpacked sdist as it had no PKG-INFO file",
"return",
"fp",
"=",
"StringIO",
"(",
"_must_decode",
"(",
"data",
")",
")",
"msg",
"=",
"Parser",
"(",
")",
".",
"parse",
"(",
"fp",
")",
"value",
"=",
"msg",
".",
"get",
"(",
"'Version'",
")",
"if",
"value",
"!=",
"'UNKNOWN'",
":",
"return",
"value"
] | Assume path points to unpacked source distribution and get version. | [
"Assume",
"path",
"points",
"to",
"unpacked",
"source",
"distribution",
"and",
"get",
"version",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L192-L205 |
ska-sa/katversion | katversion/version.py | get_version_from_file | def get_version_from_file(path):
"""Find the VERSION_FILE and return its contents.
Returns
-------
version : string or None
"""
filename = os.path.join(path, VERSION_FILE)
if not os.path.isfile(filename):
# Look in the parent directory of path instead.
filename = os.path.join(os.path.dirname(path), VERSION_FILE)
if not os.path.isfile(filename):
filename = ''
if filename:
with open(filename) as fh:
version = fh.readline().strip()
if version:
return version | python | def get_version_from_file(path):
"""Find the VERSION_FILE and return its contents.
Returns
-------
version : string or None
"""
filename = os.path.join(path, VERSION_FILE)
if not os.path.isfile(filename):
# Look in the parent directory of path instead.
filename = os.path.join(os.path.dirname(path), VERSION_FILE)
if not os.path.isfile(filename):
filename = ''
if filename:
with open(filename) as fh:
version = fh.readline().strip()
if version:
return version | [
"def",
"get_version_from_file",
"(",
"path",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"VERSION_FILE",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"# Look in the parent directory of path instead.",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"VERSION_FILE",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"filename",
"=",
"''",
"if",
"filename",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"fh",
":",
"version",
"=",
"fh",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"if",
"version",
":",
"return",
"version"
] | Find the VERSION_FILE and return its contents.
Returns
-------
version : string or None | [
"Find",
"the",
"VERSION_FILE",
"and",
"return",
"its",
"contents",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L208-L227 |
ska-sa/katversion | katversion/version.py | normalised | def normalised(version):
"""Normalise a version string according to PEP 440, if possible."""
norm_version = pkg_resources.parse_version(version)
if not isinstance(norm_version, tuple):
# Let setuptools (>= 8) do the normalisation
return str(norm_version)
else:
# Homegrown normalisation for older setuptools (< 8)
public, sep, local = version.lower().partition('+')
# Remove leading 'v' from public version
if len(public) >= 2:
if public[0] == 'v' and public[1] in '0123456789':
public = public[1:]
# Turn all chars except alphanumerics into periods in local version
local = NON_ALPHANUMERIC.sub('.', local)
return public + sep + local | python | def normalised(version):
"""Normalise a version string according to PEP 440, if possible."""
norm_version = pkg_resources.parse_version(version)
if not isinstance(norm_version, tuple):
# Let setuptools (>= 8) do the normalisation
return str(norm_version)
else:
# Homegrown normalisation for older setuptools (< 8)
public, sep, local = version.lower().partition('+')
# Remove leading 'v' from public version
if len(public) >= 2:
if public[0] == 'v' and public[1] in '0123456789':
public = public[1:]
# Turn all chars except alphanumerics into periods in local version
local = NON_ALPHANUMERIC.sub('.', local)
return public + sep + local | [
"def",
"normalised",
"(",
"version",
")",
":",
"norm_version",
"=",
"pkg_resources",
".",
"parse_version",
"(",
"version",
")",
"if",
"not",
"isinstance",
"(",
"norm_version",
",",
"tuple",
")",
":",
"# Let setuptools (>= 8) do the normalisation",
"return",
"str",
"(",
"norm_version",
")",
"else",
":",
"# Homegrown normalisation for older setuptools (< 8)",
"public",
",",
"sep",
",",
"local",
"=",
"version",
".",
"lower",
"(",
")",
".",
"partition",
"(",
"'+'",
")",
"# Remove leading 'v' from public version",
"if",
"len",
"(",
"public",
")",
">=",
"2",
":",
"if",
"public",
"[",
"0",
"]",
"==",
"'v'",
"and",
"public",
"[",
"1",
"]",
"in",
"'0123456789'",
":",
"public",
"=",
"public",
"[",
"1",
":",
"]",
"# Turn all chars except alphanumerics into periods in local version",
"local",
"=",
"NON_ALPHANUMERIC",
".",
"sub",
"(",
"'.'",
",",
"local",
")",
"return",
"public",
"+",
"sep",
"+",
"local"
] | Normalise a version string according to PEP 440, if possible. | [
"Normalise",
"a",
"version",
"string",
"according",
"to",
"PEP",
"440",
"if",
"possible",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L230-L245 |
ska-sa/katversion | katversion/version.py | get_version | def get_version(path=None, module=None):
"""Return the version string.
This function ensures that the version string complies with PEP 440.
The format of our version string is:
- for RELEASE builds:
<major>.<minor>
e.g.
0.1
2.4
- for DEVELOPMENT builds:
<major>.<minor>.dev<num_branch_commits> \
+<branch_name>.g<short_git_sha>[.dirty]
e.g.
1.1.dev34+new.shiny.feature.gfa973da
0.1.dev7+master.gb91ffa6.dirty
- for UNKNOWN builds:
0.0+unknown.[<scm_type>.]<date>
e.g.
0.0+unknown.svn.201402031023
0.0+unknown.201602081715
The <major>.<minor> substring for development builds will be that of the
NEXT (minor) release, in order to allow proper Python version ordering.
Parameters
----------
path : None or string, optional
A file or directory to use to find the SCM or sdist checkout path
(default is the current working directory)
module : None or string, optional
Get version via module name (e.g. __name__ variable), which takes
precedence over path if provided (ignore otherwise)
Returns
-------
version: string
A string representation of the package version
"""
# Check the module option first.
version = get_version_from_module(module)
if version:
return normalised(version)
# Turn path into a valid directory (default is current directory)
if path is None:
path = os.getcwd()
path = os.path.abspath(path)
if os.path.exists(path) and not os.path.isdir(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
raise ValueError('No such package source directory: %r' % (path,))
# Check for an sdist in the process of being installed by pip.
version = get_version_from_unpacked_sdist(path)
if version:
return normalised(version)
# Check the SCM.
scm, version = get_version_from_scm(path)
if version:
return normalised(version)
# Check if there is a katversion file in the given path.
version = get_version_from_file(path)
if version:
return normalised(version)
# None of the above got a version so we will make one up based on the date.
return normalised(date_version(scm)) | python | def get_version(path=None, module=None):
"""Return the version string.
This function ensures that the version string complies with PEP 440.
The format of our version string is:
- for RELEASE builds:
<major>.<minor>
e.g.
0.1
2.4
- for DEVELOPMENT builds:
<major>.<minor>.dev<num_branch_commits> \
+<branch_name>.g<short_git_sha>[.dirty]
e.g.
1.1.dev34+new.shiny.feature.gfa973da
0.1.dev7+master.gb91ffa6.dirty
- for UNKNOWN builds:
0.0+unknown.[<scm_type>.]<date>
e.g.
0.0+unknown.svn.201402031023
0.0+unknown.201602081715
The <major>.<minor> substring for development builds will be that of the
NEXT (minor) release, in order to allow proper Python version ordering.
Parameters
----------
path : None or string, optional
A file or directory to use to find the SCM or sdist checkout path
(default is the current working directory)
module : None or string, optional
Get version via module name (e.g. __name__ variable), which takes
precedence over path if provided (ignore otherwise)
Returns
-------
version: string
A string representation of the package version
"""
# Check the module option first.
version = get_version_from_module(module)
if version:
return normalised(version)
# Turn path into a valid directory (default is current directory)
if path is None:
path = os.getcwd()
path = os.path.abspath(path)
if os.path.exists(path) and not os.path.isdir(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
raise ValueError('No such package source directory: %r' % (path,))
# Check for an sdist in the process of being installed by pip.
version = get_version_from_unpacked_sdist(path)
if version:
return normalised(version)
# Check the SCM.
scm, version = get_version_from_scm(path)
if version:
return normalised(version)
# Check if there is a katversion file in the given path.
version = get_version_from_file(path)
if version:
return normalised(version)
# None of the above got a version so we will make one up based on the date.
return normalised(date_version(scm)) | [
"def",
"get_version",
"(",
"path",
"=",
"None",
",",
"module",
"=",
"None",
")",
":",
"# Check the module option first.",
"version",
"=",
"get_version_from_module",
"(",
"module",
")",
"if",
"version",
":",
"return",
"normalised",
"(",
"version",
")",
"# Turn path into a valid directory (default is current directory)",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"getcwd",
"(",
")",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'No such package source directory: %r'",
"%",
"(",
"path",
",",
")",
")",
"# Check for an sdist in the process of being installed by pip.",
"version",
"=",
"get_version_from_unpacked_sdist",
"(",
"path",
")",
"if",
"version",
":",
"return",
"normalised",
"(",
"version",
")",
"# Check the SCM.",
"scm",
",",
"version",
"=",
"get_version_from_scm",
"(",
"path",
")",
"if",
"version",
":",
"return",
"normalised",
"(",
"version",
")",
"# Check if there is a katversion file in the given path.",
"version",
"=",
"get_version_from_file",
"(",
"path",
")",
"if",
"version",
":",
"return",
"normalised",
"(",
"version",
")",
"# None of the above got a version so we will make one up based on the date.",
"return",
"normalised",
"(",
"date_version",
"(",
"scm",
")",
")"
] | Return the version string.
This function ensures that the version string complies with PEP 440.
The format of our version string is:
- for RELEASE builds:
<major>.<minor>
e.g.
0.1
2.4
- for DEVELOPMENT builds:
<major>.<minor>.dev<num_branch_commits> \
+<branch_name>.g<short_git_sha>[.dirty]
e.g.
1.1.dev34+new.shiny.feature.gfa973da
0.1.dev7+master.gb91ffa6.dirty
- for UNKNOWN builds:
0.0+unknown.[<scm_type>.]<date>
e.g.
0.0+unknown.svn.201402031023
0.0+unknown.201602081715
The <major>.<minor> substring for development builds will be that of the
NEXT (minor) release, in order to allow proper Python version ordering.
Parameters
----------
path : None or string, optional
A file or directory to use to find the SCM or sdist checkout path
(default is the current working directory)
module : None or string, optional
Get version via module name (e.g. __name__ variable), which takes
precedence over path if provided (ignore otherwise)
Returns
-------
version: string
A string representation of the package version | [
"Return",
"the",
"version",
"string",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L248-L321 |
ska-sa/katversion | katversion/version.py | _sane_version_list | def _sane_version_list(version):
"""Ensure the major and minor are int.
Parameters
----------
version: list
Version components
Returns
-------
version: list
List of components where first two components has been sanitised
"""
v0 = str(version[0])
if v0:
# Test if the major is a number.
try:
v0 = v0.lstrip("v").lstrip("V")
# Handle the common case where tags have v before major.
v0 = int(v0)
except ValueError:
v0 = None
if v0 is None:
version = [0, 0] + version
else:
version[0] = v0
try:
# Test if the minor is a number.
version[1] = int(version[1])
except ValueError:
# Insert Minor 0.
version = [version[0], 0] + version[1:]
return version | python | def _sane_version_list(version):
"""Ensure the major and minor are int.
Parameters
----------
version: list
Version components
Returns
-------
version: list
List of components where first two components has been sanitised
"""
v0 = str(version[0])
if v0:
# Test if the major is a number.
try:
v0 = v0.lstrip("v").lstrip("V")
# Handle the common case where tags have v before major.
v0 = int(v0)
except ValueError:
v0 = None
if v0 is None:
version = [0, 0] + version
else:
version[0] = v0
try:
# Test if the minor is a number.
version[1] = int(version[1])
except ValueError:
# Insert Minor 0.
version = [version[0], 0] + version[1:]
return version | [
"def",
"_sane_version_list",
"(",
"version",
")",
":",
"v0",
"=",
"str",
"(",
"version",
"[",
"0",
"]",
")",
"if",
"v0",
":",
"# Test if the major is a number.",
"try",
":",
"v0",
"=",
"v0",
".",
"lstrip",
"(",
"\"v\"",
")",
".",
"lstrip",
"(",
"\"V\"",
")",
"# Handle the common case where tags have v before major.",
"v0",
"=",
"int",
"(",
"v0",
")",
"except",
"ValueError",
":",
"v0",
"=",
"None",
"if",
"v0",
"is",
"None",
":",
"version",
"=",
"[",
"0",
",",
"0",
"]",
"+",
"version",
"else",
":",
"version",
"[",
"0",
"]",
"=",
"v0",
"try",
":",
"# Test if the minor is a number.",
"version",
"[",
"1",
"]",
"=",
"int",
"(",
"version",
"[",
"1",
"]",
")",
"except",
"ValueError",
":",
"# Insert Minor 0.",
"version",
"=",
"[",
"version",
"[",
"0",
"]",
",",
"0",
"]",
"+",
"version",
"[",
"1",
":",
"]",
"return",
"version"
] | Ensure the major and minor are int.
Parameters
----------
version: list
Version components
Returns
-------
version: list
List of components where first two components has been sanitised | [
"Ensure",
"the",
"major",
"and",
"minor",
"are",
"int",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L324-L360 |
ska-sa/katversion | katversion/version.py | get_version_list | def get_version_list(path=None, module=None):
"""Return the version information as a tuple.
This uses get_version and breaks the string up. Would make more sense if
the version was a tuple throughout katversion.
"""
major = 0
minor = 0
patch = '' # PEP440 calls this prerelease, postrelease or devrelease
ver = get_version(path, module)
if ver is not None:
ver_segments = _sane_version_list(ver.split(".", 2))
major = ver_segments[0]
minor = ver_segments[1]
patch = ".".join(ver_segments[2:]) # Rejoin the .
# Return None as first field, makes substitution easier in next step.
return [None, major, minor, patch] | python | def get_version_list(path=None, module=None):
"""Return the version information as a tuple.
This uses get_version and breaks the string up. Would make more sense if
the version was a tuple throughout katversion.
"""
major = 0
minor = 0
patch = '' # PEP440 calls this prerelease, postrelease or devrelease
ver = get_version(path, module)
if ver is not None:
ver_segments = _sane_version_list(ver.split(".", 2))
major = ver_segments[0]
minor = ver_segments[1]
patch = ".".join(ver_segments[2:]) # Rejoin the .
# Return None as first field, makes substitution easier in next step.
return [None, major, minor, patch] | [
"def",
"get_version_list",
"(",
"path",
"=",
"None",
",",
"module",
"=",
"None",
")",
":",
"major",
"=",
"0",
"minor",
"=",
"0",
"patch",
"=",
"''",
"# PEP440 calls this prerelease, postrelease or devrelease",
"ver",
"=",
"get_version",
"(",
"path",
",",
"module",
")",
"if",
"ver",
"is",
"not",
"None",
":",
"ver_segments",
"=",
"_sane_version_list",
"(",
"ver",
".",
"split",
"(",
"\".\"",
",",
"2",
")",
")",
"major",
"=",
"ver_segments",
"[",
"0",
"]",
"minor",
"=",
"ver_segments",
"[",
"1",
"]",
"patch",
"=",
"\".\"",
".",
"join",
"(",
"ver_segments",
"[",
"2",
":",
"]",
")",
"# Rejoin the .",
"# Return None as first field, makes substitution easier in next step.",
"return",
"[",
"None",
",",
"major",
",",
"minor",
",",
"patch",
"]"
] | Return the version information as a tuple.
This uses get_version and breaks the string up. Would make more sense if
the version was a tuple throughout katversion. | [
"Return",
"the",
"version",
"information",
"as",
"a",
"tuple",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L363-L381 |
ska-sa/katversion | katversion/version.py | build_info | def build_info(name, path=None, module=None):
"""Return the build info tuple."""
verlist = get_version_list(path, module)
verlist[0] = name
return tuple(verlist) | python | def build_info(name, path=None, module=None):
"""Return the build info tuple."""
verlist = get_version_list(path, module)
verlist[0] = name
return tuple(verlist) | [
"def",
"build_info",
"(",
"name",
",",
"path",
"=",
"None",
",",
"module",
"=",
"None",
")",
":",
"verlist",
"=",
"get_version_list",
"(",
"path",
",",
"module",
")",
"verlist",
"[",
"0",
"]",
"=",
"name",
"return",
"tuple",
"(",
"verlist",
")"
] | Return the build info tuple. | [
"Return",
"the",
"build",
"info",
"tuple",
"."
] | train | https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L384-L388 |
rainwoodman/kdcount | kdcount/cluster.py | fof.find | def find(self, groupid):
""" return all of the indices of particles of groupid """
return self.indices[self.offset[groupid]
:self.offset[groupid]+ self.length[groupid]] | python | def find(self, groupid):
""" return all of the indices of particles of groupid """
return self.indices[self.offset[groupid]
:self.offset[groupid]+ self.length[groupid]] | [
"def",
"find",
"(",
"self",
",",
"groupid",
")",
":",
"return",
"self",
".",
"indices",
"[",
"self",
".",
"offset",
"[",
"groupid",
"]",
":",
"self",
".",
"offset",
"[",
"groupid",
"]",
"+",
"self",
".",
"length",
"[",
"groupid",
"]",
"]"
] | return all of the indices of particles of groupid | [
"return",
"all",
"of",
"the",
"indices",
"of",
"particles",
"of",
"groupid"
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L79-L82 |
rainwoodman/kdcount | kdcount/cluster.py | fof.sum | def sum(self, weights=None):
""" return the sum of weights of each object """
if weights is None:
weights = self.data.weights
return utils.bincount(self.labels, weights, self.N) | python | def sum(self, weights=None):
""" return the sum of weights of each object """
if weights is None:
weights = self.data.weights
return utils.bincount(self.labels, weights, self.N) | [
"def",
"sum",
"(",
"self",
",",
"weights",
"=",
"None",
")",
":",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"self",
".",
"data",
".",
"weights",
"return",
"utils",
".",
"bincount",
"(",
"self",
".",
"labels",
",",
"weights",
",",
"self",
".",
"N",
")"
] | return the sum of weights of each object | [
"return",
"the",
"sum",
"of",
"weights",
"of",
"each",
"object"
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L84-L88 |
rainwoodman/kdcount | kdcount/cluster.py | fof.center | def center(self, weights=None):
""" return the center of each object """
if weights is None:
weights = self.data.weights
mass = utils.bincount(self.labels, weights, self.N)
cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8')
for d in range(self.data.pos.shape[-1]):
cp[..., d] = utils.bincount(self.labels, weights *
self.data.pos[..., d], self.N)
cp[..., d] /= mass
return cp | python | def center(self, weights=None):
""" return the center of each object """
if weights is None:
weights = self.data.weights
mass = utils.bincount(self.labels, weights, self.N)
cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8')
for d in range(self.data.pos.shape[-1]):
cp[..., d] = utils.bincount(self.labels, weights *
self.data.pos[..., d], self.N)
cp[..., d] /= mass
return cp | [
"def",
"center",
"(",
"self",
",",
"weights",
"=",
"None",
")",
":",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"self",
".",
"data",
".",
"weights",
"mass",
"=",
"utils",
".",
"bincount",
"(",
"self",
".",
"labels",
",",
"weights",
",",
"self",
".",
"N",
")",
"cp",
"=",
"numpy",
".",
"empty",
"(",
"(",
"len",
"(",
"mass",
")",
",",
"self",
".",
"data",
".",
"pos",
".",
"shape",
"[",
"-",
"1",
"]",
")",
",",
"'f8'",
")",
"for",
"d",
"in",
"range",
"(",
"self",
".",
"data",
".",
"pos",
".",
"shape",
"[",
"-",
"1",
"]",
")",
":",
"cp",
"[",
"...",
",",
"d",
"]",
"=",
"utils",
".",
"bincount",
"(",
"self",
".",
"labels",
",",
"weights",
"*",
"self",
".",
"data",
".",
"pos",
"[",
"...",
",",
"d",
"]",
",",
"self",
".",
"N",
")",
"cp",
"[",
"...",
",",
"d",
"]",
"/=",
"mass",
"return",
"cp"
] | return the center of each object | [
"return",
"the",
"center",
"of",
"each",
"object"
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L90-L100 |
BlueBrain/nat | nat/paramSample.py | ParamSample.getParamValues | def getParamValues(self, paramName=None, paramId=None, useOnlyValids=True):
"""
Return the rows of sampleDF that are associated to the parameter
specified in paramName.
"""
if not paramName is None:
if not paramId is None:
if getParameterTypeNameFromID(paramId) != paramName:
raise ValueError("Parameters paramId and paramName "
+ "passed to ParamSample.getParamValues() are incompatible.")
else:
if paramId is None:
raise ValueError("At least one of the attribute paramName and paramId "
+ "passed to ParamSample.getParamValues() most not be None.")
paramName = getParameterTypeNameFromID(paramId)
df = self.sampleDF
if useOnlyValids:
df = df[df["isValid"] == True]
df.loc[:, "paramNames"] = [getParameterTypeNameFromID(param.typeId) for param in df["obj_parameter"]]
return df[df["paramNames"] == paramName] | python | def getParamValues(self, paramName=None, paramId=None, useOnlyValids=True):
"""
Return the rows of sampleDF that are associated to the parameter
specified in paramName.
"""
if not paramName is None:
if not paramId is None:
if getParameterTypeNameFromID(paramId) != paramName:
raise ValueError("Parameters paramId and paramName "
+ "passed to ParamSample.getParamValues() are incompatible.")
else:
if paramId is None:
raise ValueError("At least one of the attribute paramName and paramId "
+ "passed to ParamSample.getParamValues() most not be None.")
paramName = getParameterTypeNameFromID(paramId)
df = self.sampleDF
if useOnlyValids:
df = df[df["isValid"] == True]
df.loc[:, "paramNames"] = [getParameterTypeNameFromID(param.typeId) for param in df["obj_parameter"]]
return df[df["paramNames"] == paramName] | [
"def",
"getParamValues",
"(",
"self",
",",
"paramName",
"=",
"None",
",",
"paramId",
"=",
"None",
",",
"useOnlyValids",
"=",
"True",
")",
":",
"if",
"not",
"paramName",
"is",
"None",
":",
"if",
"not",
"paramId",
"is",
"None",
":",
"if",
"getParameterTypeNameFromID",
"(",
"paramId",
")",
"!=",
"paramName",
":",
"raise",
"ValueError",
"(",
"\"Parameters paramId and paramName \"",
"+",
"\"passed to ParamSample.getParamValues() are incompatible.\"",
")",
"else",
":",
"if",
"paramId",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"At least one of the attribute paramName and paramId \"",
"+",
"\"passed to ParamSample.getParamValues() most not be None.\"",
")",
"paramName",
"=",
"getParameterTypeNameFromID",
"(",
"paramId",
")",
"df",
"=",
"self",
".",
"sampleDF",
"if",
"useOnlyValids",
":",
"df",
"=",
"df",
"[",
"df",
"[",
"\"isValid\"",
"]",
"==",
"True",
"]",
"df",
".",
"loc",
"[",
":",
",",
"\"paramNames\"",
"]",
"=",
"[",
"getParameterTypeNameFromID",
"(",
"param",
".",
"typeId",
")",
"for",
"param",
"in",
"df",
"[",
"\"obj_parameter\"",
"]",
"]",
"return",
"df",
"[",
"df",
"[",
"\"paramNames\"",
"]",
"==",
"paramName",
"]"
] | Return the rows of sampleDF that are associated to the parameter
specified in paramName. | [
"Return",
"the",
"rows",
"of",
"sampleDF",
"that",
"are",
"associated",
"to",
"the",
"parameter",
"specified",
"in",
"paramName",
"."
] | train | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/paramSample.py#L365-L388 |
BlueBrain/nat | nat/paramSample.py | ParamSample.interpolate | def interpolate(self, interpValues):
"""
interpValues should be a dictionnary where the keys are the parameter names
for the independant variable for which interpolation should be run and
the values are the value to which the parameter should be interpolated.
"""
self.__operations.append(["interpolate", interpValues])
df = self.sampleDF
self.interpValues = interpValues
for interParamName, value in interpValues.items():
self.__report += "Interpolation of the parameters for independent variables '" \
+ interParamName + "' at value " + str(value) + ".\n"
for ind, (paramTrace, resType) in enumerate(zip(df["obj_parameter"], df["Result type"])):
if resType == "numericalTrace" and interParamName in paramTrace.indepNames:
val = paramTrace.getInterp1dValues(value, interParamName, statsToReturn=["mean"])
if isinstance(val, list):
if len(val) == 1:
val = val[0]
else:
raise ValueError("This case has not been implemented yet.")
df.loc[ind, "Values"] = float(val) | python | def interpolate(self, interpValues):
"""
interpValues should be a dictionnary where the keys are the parameter names
for the independant variable for which interpolation should be run and
the values are the value to which the parameter should be interpolated.
"""
self.__operations.append(["interpolate", interpValues])
df = self.sampleDF
self.interpValues = interpValues
for interParamName, value in interpValues.items():
self.__report += "Interpolation of the parameters for independent variables '" \
+ interParamName + "' at value " + str(value) + ".\n"
for ind, (paramTrace, resType) in enumerate(zip(df["obj_parameter"], df["Result type"])):
if resType == "numericalTrace" and interParamName in paramTrace.indepNames:
val = paramTrace.getInterp1dValues(value, interParamName, statsToReturn=["mean"])
if isinstance(val, list):
if len(val) == 1:
val = val[0]
else:
raise ValueError("This case has not been implemented yet.")
df.loc[ind, "Values"] = float(val) | [
"def",
"interpolate",
"(",
"self",
",",
"interpValues",
")",
":",
"self",
".",
"__operations",
".",
"append",
"(",
"[",
"\"interpolate\"",
",",
"interpValues",
"]",
")",
"df",
"=",
"self",
".",
"sampleDF",
"self",
".",
"interpValues",
"=",
"interpValues",
"for",
"interParamName",
",",
"value",
"in",
"interpValues",
".",
"items",
"(",
")",
":",
"self",
".",
"__report",
"+=",
"\"Interpolation of the parameters for independent variables '\"",
"+",
"interParamName",
"+",
"\"' at value \"",
"+",
"str",
"(",
"value",
")",
"+",
"\".\\n\"",
"for",
"ind",
",",
"(",
"paramTrace",
",",
"resType",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"df",
"[",
"\"obj_parameter\"",
"]",
",",
"df",
"[",
"\"Result type\"",
"]",
")",
")",
":",
"if",
"resType",
"==",
"\"numericalTrace\"",
"and",
"interParamName",
"in",
"paramTrace",
".",
"indepNames",
":",
"val",
"=",
"paramTrace",
".",
"getInterp1dValues",
"(",
"value",
",",
"interParamName",
",",
"statsToReturn",
"=",
"[",
"\"mean\"",
"]",
")",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"if",
"len",
"(",
"val",
")",
"==",
"1",
":",
"val",
"=",
"val",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"This case has not been implemented yet.\"",
")",
"df",
".",
"loc",
"[",
"ind",
",",
"\"Values\"",
"]",
"=",
"float",
"(",
"val",
")"
] | interpValues should be a dictionnary where the keys are the parameter names
for the independant variable for which interpolation should be run and
the values are the value to which the parameter should be interpolated. | [
"interpValues",
"should",
"be",
"a",
"dictionnary",
"where",
"the",
"keys",
"are",
"the",
"parameter",
"names",
"for",
"the",
"independant",
"variable",
"for",
"which",
"interpolation",
"should",
"be",
"run",
"and",
"the",
"values",
"are",
"the",
"value",
"to",
"which",
"the",
"parameter",
"should",
"be",
"interpolated",
"."
] | train | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/paramSample.py#L391-L412 |
hammerlab/stanity | stanity/psisloo.py | loo_compare | def loo_compare(psisloo1, psisloo2):
"""
Compares two models using pointwise approximate leave-one-out cross validation.
For the method to be valid, the two models should have been fit on the same input data.
Parameters
-------------------
psisloo1 : Psisloo object for model1
psisloo2 : Psisloo object for model2
Returns
-------------------
Dict with two values:
diff: difference in elpd (estimated log predictive density)
between two models, where a positive value indicates
that model2 is a better fit than model1.
se_diff: estimated standard error of the difference
between model2 & model1.
"""
## TODO: confirm that dimensions for psisloo1 & psisloo2 are the same
loores = psisloo1.pointwise.join(
psisloo2.pointwise,
lsuffix = '_m1',
rsuffix = '_m2')
loores['pw_diff'] = loores.pointwise_elpd_m2 - loores.pointwise_elpd_m1
sum_elpd_diff = loores.apply(numpy.sum).pw_diff
sd_elpd_diff = loores.apply(numpy.std).pw_diff
elpd_diff = {
'diff' : sum_elpd_diff,
'se_diff' : math.sqrt(len(loores.pw_diff)) * sd_elpd_diff
}
return elpd_diff | python | def loo_compare(psisloo1, psisloo2):
"""
Compares two models using pointwise approximate leave-one-out cross validation.
For the method to be valid, the two models should have been fit on the same input data.
Parameters
-------------------
psisloo1 : Psisloo object for model1
psisloo2 : Psisloo object for model2
Returns
-------------------
Dict with two values:
diff: difference in elpd (estimated log predictive density)
between two models, where a positive value indicates
that model2 is a better fit than model1.
se_diff: estimated standard error of the difference
between model2 & model1.
"""
## TODO: confirm that dimensions for psisloo1 & psisloo2 are the same
loores = psisloo1.pointwise.join(
psisloo2.pointwise,
lsuffix = '_m1',
rsuffix = '_m2')
loores['pw_diff'] = loores.pointwise_elpd_m2 - loores.pointwise_elpd_m1
sum_elpd_diff = loores.apply(numpy.sum).pw_diff
sd_elpd_diff = loores.apply(numpy.std).pw_diff
elpd_diff = {
'diff' : sum_elpd_diff,
'se_diff' : math.sqrt(len(loores.pw_diff)) * sd_elpd_diff
}
return elpd_diff | [
"def",
"loo_compare",
"(",
"psisloo1",
",",
"psisloo2",
")",
":",
"## TODO: confirm that dimensions for psisloo1 & psisloo2 are the same",
"loores",
"=",
"psisloo1",
".",
"pointwise",
".",
"join",
"(",
"psisloo2",
".",
"pointwise",
",",
"lsuffix",
"=",
"'_m1'",
",",
"rsuffix",
"=",
"'_m2'",
")",
"loores",
"[",
"'pw_diff'",
"]",
"=",
"loores",
".",
"pointwise_elpd_m2",
"-",
"loores",
".",
"pointwise_elpd_m1",
"sum_elpd_diff",
"=",
"loores",
".",
"apply",
"(",
"numpy",
".",
"sum",
")",
".",
"pw_diff",
"sd_elpd_diff",
"=",
"loores",
".",
"apply",
"(",
"numpy",
".",
"std",
")",
".",
"pw_diff",
"elpd_diff",
"=",
"{",
"'diff'",
":",
"sum_elpd_diff",
",",
"'se_diff'",
":",
"math",
".",
"sqrt",
"(",
"len",
"(",
"loores",
".",
"pw_diff",
")",
")",
"*",
"sd_elpd_diff",
"}",
"return",
"elpd_diff"
] | Compares two models using pointwise approximate leave-one-out cross validation.
For the method to be valid, the two models should have been fit on the same input data.
Parameters
-------------------
psisloo1 : Psisloo object for model1
psisloo2 : Psisloo object for model2
Returns
-------------------
Dict with two values:
diff: difference in elpd (estimated log predictive density)
between two models, where a positive value indicates
that model2 is a better fit than model1.
se_diff: estimated standard error of the difference
between model2 & model1. | [
"Compares",
"two",
"models",
"using",
"pointwise",
"approximate",
"leave",
"-",
"one",
"-",
"out",
"cross",
"validation",
".",
"For",
"the",
"method",
"to",
"be",
"valid",
"the",
"two",
"models",
"should",
"have",
"been",
"fit",
"on",
"the",
"same",
"input",
"data",
"."
] | train | https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psisloo.py#L104-L143 |
hammerlab/stanity | stanity/psisloo.py | Psisloo.plot | def plot(self):
""" Graphical summary of pointwise pareto-k importance-sampling indices
Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis)
"""
seaborn.pointplot(
y = self.pointwise.pareto_k,
x = self.pointwise.index,
join = False) | python | def plot(self):
""" Graphical summary of pointwise pareto-k importance-sampling indices
Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis)
"""
seaborn.pointplot(
y = self.pointwise.pareto_k,
x = self.pointwise.index,
join = False) | [
"def",
"plot",
"(",
"self",
")",
":",
"seaborn",
".",
"pointplot",
"(",
"y",
"=",
"self",
".",
"pointwise",
".",
"pareto_k",
",",
"x",
"=",
"self",
".",
"pointwise",
".",
"index",
",",
"join",
"=",
"False",
")"
] | Graphical summary of pointwise pareto-k importance-sampling indices
Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis) | [
"Graphical",
"summary",
"of",
"pointwise",
"pareto",
"-",
"k",
"importance",
"-",
"sampling",
"indices"
] | train | https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psisloo.py#L65-L74 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.add_layer | def add_layer(self, formula='', thickness=np.NaN, density=np.NaN):
"""provide another way to define the layers (stack)
Parameters:
===========
formula: string
ex: 'CoAg2'
ex: 'Al'
thickness: float (in mm)
density: float (g/cm3)
"""
if formula == '':
return
_new_stack = _utilities.formula_to_dictionary(formula=formula,
thickness=thickness,
density=density,
database=self.database)
# check if density has been defined
self.__lock_density_if_defined(stack=_new_stack)
new_stack = self.__update_stack_with_isotopes_infos(stack=_new_stack)
self.stack = {**self.stack, **new_stack}
# calculate stack_sigma, layer density, atoms_per_cm3 ...
self.__math_on_stack() | python | def add_layer(self, formula='', thickness=np.NaN, density=np.NaN):
"""provide another way to define the layers (stack)
Parameters:
===========
formula: string
ex: 'CoAg2'
ex: 'Al'
thickness: float (in mm)
density: float (g/cm3)
"""
if formula == '':
return
_new_stack = _utilities.formula_to_dictionary(formula=formula,
thickness=thickness,
density=density,
database=self.database)
# check if density has been defined
self.__lock_density_if_defined(stack=_new_stack)
new_stack = self.__update_stack_with_isotopes_infos(stack=_new_stack)
self.stack = {**self.stack, **new_stack}
# calculate stack_sigma, layer density, atoms_per_cm3 ...
self.__math_on_stack() | [
"def",
"add_layer",
"(",
"self",
",",
"formula",
"=",
"''",
",",
"thickness",
"=",
"np",
".",
"NaN",
",",
"density",
"=",
"np",
".",
"NaN",
")",
":",
"if",
"formula",
"==",
"''",
":",
"return",
"_new_stack",
"=",
"_utilities",
".",
"formula_to_dictionary",
"(",
"formula",
"=",
"formula",
",",
"thickness",
"=",
"thickness",
",",
"density",
"=",
"density",
",",
"database",
"=",
"self",
".",
"database",
")",
"# check if density has been defined",
"self",
".",
"__lock_density_if_defined",
"(",
"stack",
"=",
"_new_stack",
")",
"new_stack",
"=",
"self",
".",
"__update_stack_with_isotopes_infos",
"(",
"stack",
"=",
"_new_stack",
")",
"self",
".",
"stack",
"=",
"{",
"*",
"*",
"self",
".",
"stack",
",",
"*",
"*",
"new_stack",
"}",
"# calculate stack_sigma, layer density, atoms_per_cm3 ...",
"self",
".",
"__math_on_stack",
"(",
")"
] | provide another way to define the layers (stack)
Parameters:
===========
formula: string
ex: 'CoAg2'
ex: 'Al'
thickness: float (in mm)
density: float (g/cm3) | [
"provide",
"another",
"way",
"to",
"define",
"the",
"layers",
"(",
"stack",
")"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L118-L143 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.get_isotopic_ratio | def get_isotopic_ratio(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
compound = str(compound)
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_compound = str(_compound)
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_isotopic_ratio(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
list_istopes = _stack[compound][element]['isotopes']['list']
list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
iso_ratio = zip(list_istopes, list_ratio)
_stoichiometric_ratio = {}
for _iso, _ratio in iso_ratio:
_stoichiometric_ratio[_iso] = _ratio
return _stoichiometric_ratio | python | def get_isotopic_ratio(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
compound = str(compound)
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_compound = str(_compound)
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_isotopic_ratio(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
list_istopes = _stack[compound][element]['isotopes']['list']
list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
iso_ratio = zip(list_istopes, list_ratio)
_stoichiometric_ratio = {}
for _iso, _ratio in iso_ratio:
_stoichiometric_ratio[_iso] = _ratio
return _stoichiometric_ratio | [
"def",
"get_isotopic_ratio",
"(",
"self",
",",
"compound",
"=",
"''",
",",
"element",
"=",
"''",
")",
":",
"_stack",
"=",
"self",
".",
"stack",
"compound",
"=",
"str",
"(",
"compound",
")",
"if",
"compound",
"==",
"''",
":",
"_list_compounds",
"=",
"_stack",
".",
"keys",
"(",
")",
"list_all_dict",
"=",
"{",
"}",
"for",
"_compound",
"in",
"_list_compounds",
":",
"_compound",
"=",
"str",
"(",
"_compound",
")",
"_list_element",
"=",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
"list_all_dict",
"[",
"_compound",
"]",
"=",
"{",
"}",
"for",
"_element",
"in",
"_list_element",
":",
"list_all_dict",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"=",
"self",
".",
"get_isotopic_ratio",
"(",
"compound",
"=",
"_compound",
",",
"element",
"=",
"_element",
")",
"return",
"list_all_dict",
"# checking compound is valid",
"list_compounds",
"=",
"_stack",
".",
"keys",
"(",
")",
"if",
"compound",
"not",
"in",
"list_compounds",
":",
"list_compounds_joined",
"=",
"', '",
".",
"join",
"(",
"list_compounds",
")",
"raise",
"ValueError",
"(",
"\"Compound '{}' could not be find in {}\"",
".",
"format",
"(",
"compound",
",",
"list_compounds_joined",
")",
")",
"# checking element is valid",
"if",
"element",
"==",
"''",
":",
"# we assume that the element and compounds names matched",
"element",
"=",
"compound",
"list_element",
"=",
"_stack",
"[",
"compound",
"]",
".",
"keys",
"(",
")",
"if",
"element",
"not",
"in",
"list_element",
":",
"list_element_joined",
"=",
"', '",
".",
"join",
"(",
"list_element",
")",
"raise",
"ValueError",
"(",
"\"Element '{}' should be any of those elements: {}\"",
".",
"format",
"(",
"element",
",",
"list_element_joined",
")",
")",
"list_istopes",
"=",
"_stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'list'",
"]",
"list_ratio",
"=",
"_stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'isotopic_ratio'",
"]",
"iso_ratio",
"=",
"zip",
"(",
"list_istopes",
",",
"list_ratio",
")",
"_stoichiometric_ratio",
"=",
"{",
"}",
"for",
"_iso",
",",
"_ratio",
"in",
"iso_ratio",
":",
"_stoichiometric_ratio",
"[",
"_iso",
"]",
"=",
"_ratio",
"return",
"_stoichiometric_ratio"
] | returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack | [
"returns",
"the",
"list",
"of",
"isotopes",
"for",
"the",
"element",
"of",
"the",
"compound",
"defined",
"with",
"their",
"stoichiometric",
"values"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L145-L196 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.set_isotopic_ratio | def set_isotopic_ratio(self, compound='', element='', list_ratio=[]):
"""defines the new set of ratio of the compound/element and trigger the calculation to update the density
Parameters:
===========
compound: string (default is ''). Name of compound
element: string (default is ''). Name of element
list_ratio: list (default is []). list of new stoichiometric_ratio
Raises:
=======
ValueError if compound does not exist
ValueError if element does not exist
ValueError if list_ratio does not have the right format
"""
_stack = self.stack
list_compounds = _stack.keys()
if compound not in _stack.keys():
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
old_list_ratio = _stack[compound][element]['isotopes']['list']
if not (len(old_list_ratio) == len(list_ratio)):
raise ValueError("New list of ratio ({} elements) does not match old list size ({} elements!".format(len(
list_ratio), len(old_list_ratio)))
self.stack[compound][element]['isotopes']['isotopic_ratio'] = list_ratio
self.__update_molar_mass(compound=compound, element=element)
self.__update_density(compound=compound, element=element)
# update entire stack
self.__math_on_stack() | python | def set_isotopic_ratio(self, compound='', element='', list_ratio=[]):
"""defines the new set of ratio of the compound/element and trigger the calculation to update the density
Parameters:
===========
compound: string (default is ''). Name of compound
element: string (default is ''). Name of element
list_ratio: list (default is []). list of new stoichiometric_ratio
Raises:
=======
ValueError if compound does not exist
ValueError if element does not exist
ValueError if list_ratio does not have the right format
"""
_stack = self.stack
list_compounds = _stack.keys()
if compound not in _stack.keys():
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
old_list_ratio = _stack[compound][element]['isotopes']['list']
if not (len(old_list_ratio) == len(list_ratio)):
raise ValueError("New list of ratio ({} elements) does not match old list size ({} elements!".format(len(
list_ratio), len(old_list_ratio)))
self.stack[compound][element]['isotopes']['isotopic_ratio'] = list_ratio
self.__update_molar_mass(compound=compound, element=element)
self.__update_density(compound=compound, element=element)
# update entire stack
self.__math_on_stack() | [
"def",
"set_isotopic_ratio",
"(",
"self",
",",
"compound",
"=",
"''",
",",
"element",
"=",
"''",
",",
"list_ratio",
"=",
"[",
"]",
")",
":",
"_stack",
"=",
"self",
".",
"stack",
"list_compounds",
"=",
"_stack",
".",
"keys",
"(",
")",
"if",
"compound",
"not",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"list_compounds_joined",
"=",
"', '",
".",
"join",
"(",
"list_compounds",
")",
"raise",
"ValueError",
"(",
"\"Compound '{}' could not be find in {}\"",
".",
"format",
"(",
"compound",
",",
"list_compounds_joined",
")",
")",
"if",
"element",
"==",
"''",
":",
"# we assume that the element and compounds names matched",
"element",
"=",
"compound",
"list_element",
"=",
"_stack",
"[",
"compound",
"]",
".",
"keys",
"(",
")",
"if",
"element",
"not",
"in",
"list_element",
":",
"list_element_joined",
"=",
"', '",
".",
"join",
"(",
"list_element",
")",
"raise",
"ValueError",
"(",
"\"Element '{}' should be any of those elements: {}\"",
".",
"format",
"(",
"element",
",",
"list_element_joined",
")",
")",
"old_list_ratio",
"=",
"_stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'list'",
"]",
"if",
"not",
"(",
"len",
"(",
"old_list_ratio",
")",
"==",
"len",
"(",
"list_ratio",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"New list of ratio ({} elements) does not match old list size ({} elements!\"",
".",
"format",
"(",
"len",
"(",
"list_ratio",
")",
",",
"len",
"(",
"old_list_ratio",
")",
")",
")",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'isotopic_ratio'",
"]",
"=",
"list_ratio",
"self",
".",
"__update_molar_mass",
"(",
"compound",
"=",
"compound",
",",
"element",
"=",
"element",
")",
"self",
".",
"__update_density",
"(",
"compound",
"=",
"compound",
",",
"element",
"=",
"element",
")",
"# update entire stack",
"self",
".",
"__math_on_stack",
"(",
")"
] | defines the new set of ratio of the compound/element and trigger the calculation to update the density
Parameters:
===========
compound: string (default is ''). Name of compound
element: string (default is ''). Name of element
list_ratio: list (default is []). list of new stoichiometric_ratio
Raises:
=======
ValueError if compound does not exist
ValueError if element does not exist
ValueError if list_ratio does not have the right format | [
"defines",
"the",
"new",
"set",
"of",
"ratio",
"of",
"the",
"compound",
"/",
"element",
"and",
"trigger",
"the",
"calculation",
"to",
"update",
"the",
"density"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L198-L238 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.get_density | def get_density(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their density
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_density(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compile, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
return _stack[compound][element]['density']['value'] | python | def get_density(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their density
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_density(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compile, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
return _stack[compound][element]['density']['value'] | [
"def",
"get_density",
"(",
"self",
",",
"compound",
"=",
"''",
",",
"element",
"=",
"''",
")",
":",
"_stack",
"=",
"self",
".",
"stack",
"if",
"compound",
"==",
"''",
":",
"_list_compounds",
"=",
"_stack",
".",
"keys",
"(",
")",
"list_all_dict",
"=",
"{",
"}",
"for",
"_compound",
"in",
"_list_compounds",
":",
"_list_element",
"=",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
"list_all_dict",
"[",
"_compound",
"]",
"=",
"{",
"}",
"for",
"_element",
"in",
"_list_element",
":",
"list_all_dict",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"=",
"self",
".",
"get_density",
"(",
"compound",
"=",
"_compound",
",",
"element",
"=",
"_element",
")",
"return",
"list_all_dict",
"# checking compound is valid",
"list_compounds",
"=",
"_stack",
".",
"keys",
"(",
")",
"if",
"compound",
"not",
"in",
"list_compounds",
":",
"list_compounds_joined",
"=",
"', '",
".",
"join",
"(",
"list_compounds",
")",
"raise",
"ValueError",
"(",
"\"Compound '{}' could not be find in {}\"",
".",
"format",
"(",
"compile",
",",
"list_compounds_joined",
")",
")",
"# checking element is valid",
"if",
"element",
"==",
"''",
":",
"# we assume that the element and compounds names matched",
"element",
"=",
"compound",
"list_element",
"=",
"_stack",
"[",
"compound",
"]",
".",
"keys",
"(",
")",
"if",
"element",
"not",
"in",
"list_element",
":",
"list_element_joined",
"=",
"', '",
".",
"join",
"(",
"list_element",
")",
"raise",
"ValueError",
"(",
"\"Element '{}' should be any of those elements: {}\"",
".",
"format",
"(",
"element",
",",
"list_element_joined",
")",
")",
"return",
"_stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'density'",
"]",
"[",
"'value'",
"]"
] | returns the list of isotopes for the element of the compound defined with their density
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack | [
"returns",
"the",
"list",
"of",
"isotopes",
"for",
"the",
"element",
"of",
"the",
"compound",
"defined",
"with",
"their",
"density"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L240-L281 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__math_on_stack | def __math_on_stack(self, used_lock=False):
"""will perform all the various update of the stack, such as populating the stack_sigma, caluclate the density of the
layers....etc. """
# populate stack_sigma (Sigma vs Energy for every element)
self.__get_sigmas()
# populate compound density (if none provided)
self.__update_layer_density()
# populate compound molar mass
# self.__update_layer_molar_mass() ### included in __calculate_atoms_per_cm3
# populate atoms_per_cm3
self.__calculate_atoms_per_cm3(used_lock=used_lock)
# calculate transmission and attenuation
self.__calculate_transmission_attenuation() | python | def __math_on_stack(self, used_lock=False):
"""will perform all the various update of the stack, such as populating the stack_sigma, caluclate the density of the
layers....etc. """
# populate stack_sigma (Sigma vs Energy for every element)
self.__get_sigmas()
# populate compound density (if none provided)
self.__update_layer_density()
# populate compound molar mass
# self.__update_layer_molar_mass() ### included in __calculate_atoms_per_cm3
# populate atoms_per_cm3
self.__calculate_atoms_per_cm3(used_lock=used_lock)
# calculate transmission and attenuation
self.__calculate_transmission_attenuation() | [
"def",
"__math_on_stack",
"(",
"self",
",",
"used_lock",
"=",
"False",
")",
":",
"# populate stack_sigma (Sigma vs Energy for every element)",
"self",
".",
"__get_sigmas",
"(",
")",
"# populate compound density (if none provided)",
"self",
".",
"__update_layer_density",
"(",
")",
"# populate compound molar mass",
"# self.__update_layer_molar_mass() ### included in __calculate_atoms_per_cm3",
"# populate atoms_per_cm3",
"self",
".",
"__calculate_atoms_per_cm3",
"(",
"used_lock",
"=",
"used_lock",
")",
"# calculate transmission and attenuation",
"self",
".",
"__calculate_transmission_attenuation",
"(",
")"
] | will perform all the various update of the stack, such as populating the stack_sigma, caluclate the density of the
layers....etc. | [
"will",
"perform",
"all",
"the",
"various",
"update",
"of",
"the",
"stack",
"such",
"as",
"populating",
"the",
"stack_sigma",
"caluclate",
"the",
"density",
"of",
"the",
"layers",
"....",
"etc",
"."
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L283-L300 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__lock_density_if_defined | def __lock_density_if_defined(self, stack: dict):
"""lock (True) the density lock if the density has been been defined during initialization
Store the resulting dictionary into density_lock
Parameters:
===========
stack: dictionary (optional)
if not provided, the entire stack will be used
"""
if self.stack == {}:
density_lock = {}
else:
density_lock = self.density_lock
for _compound in stack.keys():
_density = stack[_compound]['density']['value']
if np.isnan(_density):
density_lock[_compound] = False
else:
density_lock[_compound] = True
self.density_lock = density_lock | python | def __lock_density_if_defined(self, stack: dict):
"""lock (True) the density lock if the density has been been defined during initialization
Store the resulting dictionary into density_lock
Parameters:
===========
stack: dictionary (optional)
if not provided, the entire stack will be used
"""
if self.stack == {}:
density_lock = {}
else:
density_lock = self.density_lock
for _compound in stack.keys():
_density = stack[_compound]['density']['value']
if np.isnan(_density):
density_lock[_compound] = False
else:
density_lock[_compound] = True
self.density_lock = density_lock | [
"def",
"__lock_density_if_defined",
"(",
"self",
",",
"stack",
":",
"dict",
")",
":",
"if",
"self",
".",
"stack",
"==",
"{",
"}",
":",
"density_lock",
"=",
"{",
"}",
"else",
":",
"density_lock",
"=",
"self",
".",
"density_lock",
"for",
"_compound",
"in",
"stack",
".",
"keys",
"(",
")",
":",
"_density",
"=",
"stack",
"[",
"_compound",
"]",
"[",
"'density'",
"]",
"[",
"'value'",
"]",
"if",
"np",
".",
"isnan",
"(",
"_density",
")",
":",
"density_lock",
"[",
"_compound",
"]",
"=",
"False",
"else",
":",
"density_lock",
"[",
"_compound",
"]",
"=",
"True",
"self",
".",
"density_lock",
"=",
"density_lock"
] | lock (True) the density lock if the density has been been defined during initialization
Store the resulting dictionary into density_lock
Parameters:
===========
stack: dictionary (optional)
if not provided, the entire stack will be used | [
"lock",
"(",
"True",
")",
"the",
"density",
"lock",
"if",
"the",
"density",
"has",
"been",
"been",
"defined",
"during",
"initialization",
"Store",
"the",
"resulting",
"dictionary",
"into",
"density_lock"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L302-L323 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__calculate_atoms_per_cm3 | def __calculate_atoms_per_cm3(self, used_lock=False):
"""calculate for each element, the atoms per cm3"""
stack = self.stack
_density_lock = self.density_lock
for _name_of_compound in stack.keys():
if used_lock and _density_lock[_name_of_compound]:
continue
molar_mass_layer, atoms_per_cm3_layer = _utilities.get_atoms_per_cm3_of_layer(
compound_dict=stack[_name_of_compound])
# Update layer molar mass
stack[_name_of_compound]['molar_mass'] = {'value': molar_mass_layer,
'units': 'g/mol'}
# Update atoms per cm3
stack[_name_of_compound]['atoms_per_cm3'] = atoms_per_cm3_layer
for _index, _name_of_ele in enumerate(stack[_name_of_compound]['elements']):
stack[_name_of_compound][_name_of_ele]['atoms_per_cm3'] = atoms_per_cm3_layer * \
stack[_name_of_compound][
'stoichiometric_ratio'][_index]
self.stack = stack | python | def __calculate_atoms_per_cm3(self, used_lock=False):
"""calculate for each element, the atoms per cm3"""
stack = self.stack
_density_lock = self.density_lock
for _name_of_compound in stack.keys():
if used_lock and _density_lock[_name_of_compound]:
continue
molar_mass_layer, atoms_per_cm3_layer = _utilities.get_atoms_per_cm3_of_layer(
compound_dict=stack[_name_of_compound])
# Update layer molar mass
stack[_name_of_compound]['molar_mass'] = {'value': molar_mass_layer,
'units': 'g/mol'}
# Update atoms per cm3
stack[_name_of_compound]['atoms_per_cm3'] = atoms_per_cm3_layer
for _index, _name_of_ele in enumerate(stack[_name_of_compound]['elements']):
stack[_name_of_compound][_name_of_ele]['atoms_per_cm3'] = atoms_per_cm3_layer * \
stack[_name_of_compound][
'stoichiometric_ratio'][_index]
self.stack = stack | [
"def",
"__calculate_atoms_per_cm3",
"(",
"self",
",",
"used_lock",
"=",
"False",
")",
":",
"stack",
"=",
"self",
".",
"stack",
"_density_lock",
"=",
"self",
".",
"density_lock",
"for",
"_name_of_compound",
"in",
"stack",
".",
"keys",
"(",
")",
":",
"if",
"used_lock",
"and",
"_density_lock",
"[",
"_name_of_compound",
"]",
":",
"continue",
"molar_mass_layer",
",",
"atoms_per_cm3_layer",
"=",
"_utilities",
".",
"get_atoms_per_cm3_of_layer",
"(",
"compound_dict",
"=",
"stack",
"[",
"_name_of_compound",
"]",
")",
"# Update layer molar mass",
"stack",
"[",
"_name_of_compound",
"]",
"[",
"'molar_mass'",
"]",
"=",
"{",
"'value'",
":",
"molar_mass_layer",
",",
"'units'",
":",
"'g/mol'",
"}",
"# Update atoms per cm3",
"stack",
"[",
"_name_of_compound",
"]",
"[",
"'atoms_per_cm3'",
"]",
"=",
"atoms_per_cm3_layer",
"for",
"_index",
",",
"_name_of_ele",
"in",
"enumerate",
"(",
"stack",
"[",
"_name_of_compound",
"]",
"[",
"'elements'",
"]",
")",
":",
"stack",
"[",
"_name_of_compound",
"]",
"[",
"_name_of_ele",
"]",
"[",
"'atoms_per_cm3'",
"]",
"=",
"atoms_per_cm3_layer",
"*",
"stack",
"[",
"_name_of_compound",
"]",
"[",
"'stoichiometric_ratio'",
"]",
"[",
"_index",
"]",
"self",
".",
"stack",
"=",
"stack"
] | calculate for each element, the atoms per cm3 | [
"calculate",
"for",
"each",
"element",
"the",
"atoms",
"per",
"cm3"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L396-L415 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__update_stack_with_isotopes_infos | def __update_stack_with_isotopes_infos(self, stack: dict):
"""retrieve the isotopes, isotopes file names, mass and atomic_ratio from each element in stack"""
for _key in stack:
_elements = stack[_key]['elements']
for _element in _elements:
_dict = _utilities.get_isotope_dicts(element=_element, database=self.database)
stack[_key][_element] = _dict
stack = self.__fill_missing_keys(stack=stack)
return stack | python | def __update_stack_with_isotopes_infos(self, stack: dict):
"""retrieve the isotopes, isotopes file names, mass and atomic_ratio from each element in stack"""
for _key in stack:
_elements = stack[_key]['elements']
for _element in _elements:
_dict = _utilities.get_isotope_dicts(element=_element, database=self.database)
stack[_key][_element] = _dict
stack = self.__fill_missing_keys(stack=stack)
return stack | [
"def",
"__update_stack_with_isotopes_infos",
"(",
"self",
",",
"stack",
":",
"dict",
")",
":",
"for",
"_key",
"in",
"stack",
":",
"_elements",
"=",
"stack",
"[",
"_key",
"]",
"[",
"'elements'",
"]",
"for",
"_element",
"in",
"_elements",
":",
"_dict",
"=",
"_utilities",
".",
"get_isotope_dicts",
"(",
"element",
"=",
"_element",
",",
"database",
"=",
"self",
".",
"database",
")",
"stack",
"[",
"_key",
"]",
"[",
"_element",
"]",
"=",
"_dict",
"stack",
"=",
"self",
".",
"__fill_missing_keys",
"(",
"stack",
"=",
"stack",
")",
"return",
"stack"
] | retrieve the isotopes, isotopes file names, mass and atomic_ratio from each element in stack | [
"retrieve",
"the",
"isotopes",
"isotopes",
"file",
"names",
"mass",
"and",
"atomic_ratio",
"from",
"each",
"element",
"in",
"stack"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L432-L441 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__update_layer_density | def __update_layer_density(self):
"""calculate or update the layer density"""
_stack = self.stack
_density_lock = self.density_lock
list_compound = _stack.keys()
for _key in list_compound:
if _density_lock[_key]:
continue
_list_ratio = _stack[_key]['stoichiometric_ratio']
_list_density = []
for _element in _stack[_key]['elements']:
_list_density.append(_stack[_key][_element]['density']['value'])
_compound_density = _utilities.get_compound_density(list_density=_list_density,
list_ratio=_list_ratio)
_stack[_key]['density']['value'] = _compound_density
self.stack = _stack | python | def __update_layer_density(self):
"""calculate or update the layer density"""
_stack = self.stack
_density_lock = self.density_lock
list_compound = _stack.keys()
for _key in list_compound:
if _density_lock[_key]:
continue
_list_ratio = _stack[_key]['stoichiometric_ratio']
_list_density = []
for _element in _stack[_key]['elements']:
_list_density.append(_stack[_key][_element]['density']['value'])
_compound_density = _utilities.get_compound_density(list_density=_list_density,
list_ratio=_list_ratio)
_stack[_key]['density']['value'] = _compound_density
self.stack = _stack | [
"def",
"__update_layer_density",
"(",
"self",
")",
":",
"_stack",
"=",
"self",
".",
"stack",
"_density_lock",
"=",
"self",
".",
"density_lock",
"list_compound",
"=",
"_stack",
".",
"keys",
"(",
")",
"for",
"_key",
"in",
"list_compound",
":",
"if",
"_density_lock",
"[",
"_key",
"]",
":",
"continue",
"_list_ratio",
"=",
"_stack",
"[",
"_key",
"]",
"[",
"'stoichiometric_ratio'",
"]",
"_list_density",
"=",
"[",
"]",
"for",
"_element",
"in",
"_stack",
"[",
"_key",
"]",
"[",
"'elements'",
"]",
":",
"_list_density",
".",
"append",
"(",
"_stack",
"[",
"_key",
"]",
"[",
"_element",
"]",
"[",
"'density'",
"]",
"[",
"'value'",
"]",
")",
"_compound_density",
"=",
"_utilities",
".",
"get_compound_density",
"(",
"list_density",
"=",
"_list_density",
",",
"list_ratio",
"=",
"_list_ratio",
")",
"_stack",
"[",
"_key",
"]",
"[",
"'density'",
"]",
"[",
"'value'",
"]",
"=",
"_compound_density",
"self",
".",
"stack",
"=",
"_stack"
] | calculate or update the layer density | [
"calculate",
"or",
"update",
"the",
"layer",
"density"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L443-L460 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__update_density | def __update_density(self, compound='', element=''):
"""Re-calculate the density of the element given due to stoichiometric changes as
well as the compound density (if density is not locked)
Parameters:
===========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_density_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_density = self.stack[compound][element]['isotopes']['density']['value']
ratio_density = zip(list_ratio, list_density)
for _ratio, _density in ratio_density:
_density_element += np.float(_ratio) * np.float(_density)
self.stack[compound][element]['density']['value'] = _density_element
_density_lock = self.density_lock
if not _density_lock[compound]:
self.__update_layer_density() | python | def __update_density(self, compound='', element=''):
"""Re-calculate the density of the element given due to stoichiometric changes as
well as the compound density (if density is not locked)
Parameters:
===========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_density_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_density = self.stack[compound][element]['isotopes']['density']['value']
ratio_density = zip(list_ratio, list_density)
for _ratio, _density in ratio_density:
_density_element += np.float(_ratio) * np.float(_density)
self.stack[compound][element]['density']['value'] = _density_element
_density_lock = self.density_lock
if not _density_lock[compound]:
self.__update_layer_density() | [
"def",
"__update_density",
"(",
"self",
",",
"compound",
"=",
"''",
",",
"element",
"=",
"''",
")",
":",
"_density_element",
"=",
"0",
"list_ratio",
"=",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'isotopic_ratio'",
"]",
"list_density",
"=",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'density'",
"]",
"[",
"'value'",
"]",
"ratio_density",
"=",
"zip",
"(",
"list_ratio",
",",
"list_density",
")",
"for",
"_ratio",
",",
"_density",
"in",
"ratio_density",
":",
"_density_element",
"+=",
"np",
".",
"float",
"(",
"_ratio",
")",
"*",
"np",
".",
"float",
"(",
"_density",
")",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'density'",
"]",
"[",
"'value'",
"]",
"=",
"_density_element",
"_density_lock",
"=",
"self",
".",
"density_lock",
"if",
"not",
"_density_lock",
"[",
"compound",
"]",
":",
"self",
".",
"__update_layer_density",
"(",
")"
] | Re-calculate the density of the element given due to stoichiometric changes as
well as the compound density (if density is not locked)
Parameters:
===========
compound: string (default is '') name of compound
element: string (default is '') name of element | [
"Re",
"-",
"calculate",
"the",
"density",
"of",
"the",
"element",
"given",
"due",
"to",
"stoichiometric",
"changes",
"as",
"well",
"as",
"the",
"compound",
"density",
"(",
"if",
"density",
"is",
"not",
"locked",
")"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L462-L481 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__update_molar_mass | def __update_molar_mass(self, compound='', element=''):
"""Re-calculate the molar mass of the element given due to stoichiometric changes
Parameters:
==========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_molar_mass_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_mass = self.stack[compound][element]['isotopes']['mass']['value']
ratio_mass = zip(list_ratio, list_mass)
for _ratio, _mass in ratio_mass:
_molar_mass_element += np.float(_ratio) * np.float(_mass)
self.stack[compound][element]['molar_mass']['value'] = _molar_mass_element | python | def __update_molar_mass(self, compound='', element=''):
"""Re-calculate the molar mass of the element given due to stoichiometric changes
Parameters:
==========
compound: string (default is '') name of compound
element: string (default is '') name of element
"""
_molar_mass_element = 0
list_ratio = self.stack[compound][element]['isotopes']['isotopic_ratio']
list_mass = self.stack[compound][element]['isotopes']['mass']['value']
ratio_mass = zip(list_ratio, list_mass)
for _ratio, _mass in ratio_mass:
_molar_mass_element += np.float(_ratio) * np.float(_mass)
self.stack[compound][element]['molar_mass']['value'] = _molar_mass_element | [
"def",
"__update_molar_mass",
"(",
"self",
",",
"compound",
"=",
"''",
",",
"element",
"=",
"''",
")",
":",
"_molar_mass_element",
"=",
"0",
"list_ratio",
"=",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'isotopic_ratio'",
"]",
"list_mass",
"=",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'mass'",
"]",
"[",
"'value'",
"]",
"ratio_mass",
"=",
"zip",
"(",
"list_ratio",
",",
"list_mass",
")",
"for",
"_ratio",
",",
"_mass",
"in",
"ratio_mass",
":",
"_molar_mass_element",
"+=",
"np",
".",
"float",
"(",
"_ratio",
")",
"*",
"np",
".",
"float",
"(",
"_mass",
")",
"self",
".",
"stack",
"[",
"compound",
"]",
"[",
"element",
"]",
"[",
"'molar_mass'",
"]",
"[",
"'value'",
"]",
"=",
"_molar_mass_element"
] | Re-calculate the molar mass of the element given due to stoichiometric changes
Parameters:
==========
compound: string (default is '') name of compound
element: string (default is '') name of element | [
"Re",
"-",
"calculate",
"the",
"molar",
"mass",
"of",
"the",
"element",
"given",
"due",
"to",
"stoichiometric",
"changes"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L498-L512 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.__get_sigmas | def __get_sigmas(self):
"""will populate the stack_sigma dictionary with the energy and sigma array
for all the compound/element and isotopes"""
stack_sigma = {}
_stack = self.stack
_file_path = os.path.abspath(os.path.dirname(__file__))
_database_folder = os.path.join(_file_path, 'reference_data', self.database)
_list_compounds = _stack.keys()
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
stack_sigma[_compound] = {}
for _element in _list_element:
stack_sigma[_compound][_element] = {}
_list_isotopes = _stack[_compound][_element]['isotopes']['list']
_list_file_names = _stack[_compound][_element]['isotopes']['file_names']
_list_isotopic_ratio = _stack[_compound][_element]['isotopes']['isotopic_ratio']
_iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio)
stack_sigma[_compound][_element]['isotopic_ratio'] = _list_isotopic_ratio
# _dict_sigma_isotopes_sum = {}
_sigma_all_isotopes = 0
_energy_all_isotpes = 0
for _iso, _file, _ratio in _iso_file_ratio:
stack_sigma[_compound][_element][_iso] = {}
_file = os.path.join(_database_folder, _file)
_dict = _utilities.get_sigma(database_file_name=_file,
e_min=self.energy_min,
e_max=self.energy_max,
e_step=self.energy_step)
stack_sigma[_compound][_element][_iso]['energy_eV'] = _dict['energy_eV']
stack_sigma[_compound][_element][_iso]['sigma_b'] = _dict['sigma_b'] * _ratio
stack_sigma[_compound][_element][_iso]['sigma_b_raw'] = _dict['sigma_b']
# sigma for all isotopes with their isotopic ratio
_sigma_all_isotopes += _dict['sigma_b'] * _ratio
_energy_all_isotpes += _dict['energy_eV']
# energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes
_mean_energy_all_isotopes = _energy_all_isotpes / len(_list_isotopes)
stack_sigma[_compound][_element]['energy_eV'] = _mean_energy_all_isotopes
stack_sigma[_compound][_element]['sigma_b'] = _sigma_all_isotopes
self.stack_sigma = stack_sigma | python | def __get_sigmas(self):
"""will populate the stack_sigma dictionary with the energy and sigma array
for all the compound/element and isotopes"""
stack_sigma = {}
_stack = self.stack
_file_path = os.path.abspath(os.path.dirname(__file__))
_database_folder = os.path.join(_file_path, 'reference_data', self.database)
_list_compounds = _stack.keys()
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
stack_sigma[_compound] = {}
for _element in _list_element:
stack_sigma[_compound][_element] = {}
_list_isotopes = _stack[_compound][_element]['isotopes']['list']
_list_file_names = _stack[_compound][_element]['isotopes']['file_names']
_list_isotopic_ratio = _stack[_compound][_element]['isotopes']['isotopic_ratio']
_iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio)
stack_sigma[_compound][_element]['isotopic_ratio'] = _list_isotopic_ratio
# _dict_sigma_isotopes_sum = {}
_sigma_all_isotopes = 0
_energy_all_isotpes = 0
for _iso, _file, _ratio in _iso_file_ratio:
stack_sigma[_compound][_element][_iso] = {}
_file = os.path.join(_database_folder, _file)
_dict = _utilities.get_sigma(database_file_name=_file,
e_min=self.energy_min,
e_max=self.energy_max,
e_step=self.energy_step)
stack_sigma[_compound][_element][_iso]['energy_eV'] = _dict['energy_eV']
stack_sigma[_compound][_element][_iso]['sigma_b'] = _dict['sigma_b'] * _ratio
stack_sigma[_compound][_element][_iso]['sigma_b_raw'] = _dict['sigma_b']
# sigma for all isotopes with their isotopic ratio
_sigma_all_isotopes += _dict['sigma_b'] * _ratio
_energy_all_isotpes += _dict['energy_eV']
# energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes
_mean_energy_all_isotopes = _energy_all_isotpes / len(_list_isotopes)
stack_sigma[_compound][_element]['energy_eV'] = _mean_energy_all_isotopes
stack_sigma[_compound][_element]['sigma_b'] = _sigma_all_isotopes
self.stack_sigma = stack_sigma | [
"def",
"__get_sigmas",
"(",
"self",
")",
":",
"stack_sigma",
"=",
"{",
"}",
"_stack",
"=",
"self",
".",
"stack",
"_file_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"_database_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_file_path",
",",
"'reference_data'",
",",
"self",
".",
"database",
")",
"_list_compounds",
"=",
"_stack",
".",
"keys",
"(",
")",
"for",
"_compound",
"in",
"_list_compounds",
":",
"_list_element",
"=",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
"stack_sigma",
"[",
"_compound",
"]",
"=",
"{",
"}",
"for",
"_element",
"in",
"_list_element",
":",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"=",
"{",
"}",
"_list_isotopes",
"=",
"_stack",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'list'",
"]",
"_list_file_names",
"=",
"_stack",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'file_names'",
"]",
"_list_isotopic_ratio",
"=",
"_stack",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'isotopic_ratio'",
"]",
"_iso_file_ratio",
"=",
"zip",
"(",
"_list_isotopes",
",",
"_list_file_names",
",",
"_list_isotopic_ratio",
")",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'isotopic_ratio'",
"]",
"=",
"_list_isotopic_ratio",
"# _dict_sigma_isotopes_sum = {}",
"_sigma_all_isotopes",
"=",
"0",
"_energy_all_isotpes",
"=",
"0",
"for",
"_iso",
",",
"_file",
",",
"_ratio",
"in",
"_iso_file_ratio",
":",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_iso",
"]",
"=",
"{",
"}",
"_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_database_folder",
",",
"_file",
")",
"_dict",
"=",
"_utilities",
".",
"get_sigma",
"(",
"database_file_name",
"=",
"_file",
",",
"e_min",
"=",
"self",
".",
"energy_min",
",",
"e_max",
"=",
"self",
".",
"energy_max",
",",
"e_step",
"=",
"self",
".",
"energy_step",
")",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_iso",
"]",
"[",
"'energy_eV'",
"]",
"=",
"_dict",
"[",
"'energy_eV'",
"]",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_iso",
"]",
"[",
"'sigma_b'",
"]",
"=",
"_dict",
"[",
"'sigma_b'",
"]",
"*",
"_ratio",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_iso",
"]",
"[",
"'sigma_b_raw'",
"]",
"=",
"_dict",
"[",
"'sigma_b'",
"]",
"# sigma for all isotopes with their isotopic ratio",
"_sigma_all_isotopes",
"+=",
"_dict",
"[",
"'sigma_b'",
"]",
"*",
"_ratio",
"_energy_all_isotpes",
"+=",
"_dict",
"[",
"'energy_eV'",
"]",
"# energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes",
"_mean_energy_all_isotopes",
"=",
"_energy_all_isotpes",
"/",
"len",
"(",
"_list_isotopes",
")",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'energy_eV'",
"]",
"=",
"_mean_energy_all_isotopes",
"stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'sigma_b'",
"]",
"=",
"_sigma_all_isotopes",
"self",
".",
"stack_sigma",
"=",
"stack_sigma"
] | will populate the stack_sigma dictionary with the energy and sigma array
for all the compound/element and isotopes | [
"will",
"populate",
"the",
"stack_sigma",
"dictionary",
"with",
"the",
"energy",
"and",
"sigma",
"array",
"for",
"all",
"the",
"compound",
"/",
"element",
"and",
"isotopes"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L514-L560 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.plot | def plot(self, y_axis='attenuation', x_axis='energy',
logx=False, logy=False,
mixed=True, all_layers=False, all_elements=False,
all_isotopes=False, items_to_plot=None,
time_unit='us', offset_us=0., source_to_detector_m=16.,
time_resolution_us=0.16, t_start_us=1,
plotly=False, ax_mpl=None,
fmt='-', ms='2', lw='1.5', alpha=1):
# offset delay values is normal 2.99 us with NONE actual MCP delay settings
"""display the transmission or attenuation of compound, element and/or isotopes specified
Parameters:
===========
:param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number']
:type x_axis: str
:param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'miu_per_cm']
:type y_axis: str
:param logx: True -> display x in log scale
:type logx: boolean.
:param logy: True -> display y in log scale
:type logy: boolean.
:param mixed: boolean. True -> display the total of each layer
False -> not displayed
:param all_layers: boolean. True -> display all layers
False -> not displayed
:param all_elements: boolean. True -> display all elements signal
False -> not displayed
:param all_isotopes: boolean. True -> display all isotopes signal
False -> not displayed
:param items_to_plot: array that describes what to plot
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either ['s'|'us'|'ns']
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:param plotly: control to use plotly to display or not.
:type plotly: bool
:param ax_mpl: matplotlib.axes to plot against
:type ax_mpl: matplotlib.axes
:param fmt: matplotlib.axes.plot kwargs
:type fmt: str
:param ms: matplotlib.axes.plot kwargs
:type ms: float
:param lw: matplotlib.axes.plot kwargs
:type lw: float
:param alpha: matplotlib.axes.plot kwargs
:type alpha: float
"""
if x_axis not in x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list))
if time_unit not in time_unit_list:
raise ValueError("Please specify the time unit using one from '{}'.".format(time_unit_list))
if y_axis not in y_type_list:
raise ValueError("Please specify the y-axis type using one from '{}'.".format(y_type_list))
# figure size
# plt.figure(figsize=(8, 8))
# stack from self
_stack_signal = self.stack_signal
_stack = self.stack
_stack_sigma = self.stack_sigma
_x_axis = self.total_signal['energy_eV']
x_axis_label = None
# Creating the matplotlib graph..
if ax_mpl is None:
fig_mpl, ax_mpl = plt.subplots()
"""X-axis"""
# determine values and labels for x-axis with options from
# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'
if x_axis == 'energy':
x_axis_label = 'Energy (eV)'
if x_axis == 'lambda':
x_axis_label = u"Wavelength (\u212B)"
_x_axis = _utilities.ev_to_angstroms(array=_x_axis)
if x_axis == 'time':
if time_unit == 's':
x_axis_label = 'Time (s)'
_x_axis = _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'us':
x_axis_label = 'Time (us)'
_x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'ns':
x_axis_label = 'Time (ns)'
_x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}"
.format(x_axis_label, source_to_detector_m, offset_us))
if x_axis == 'number':
x_axis_label = 'Image number (#)'
_x_axis = _utilities.ev_to_image_number(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
time_resolution_us=time_resolution_us,
t_start_us=t_start_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}"
.format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us))
if x_axis_label is None:
raise ValueError("x_axis_label does NOT exist, please check.")
"""Y-axis"""
# determine to plot transmission or attenuation
# determine to put transmission or attenuation words for y-axis
y_axis_tag = y_axis
if y_axis == 'transmission':
y_axis_label = 'Neutron Transmission'
elif y_axis == 'attenuation':
y_axis_label = 'Neutron Attenuation'
elif y_axis == 'sigma':
y_axis_tag = 'sigma_b'
y_axis_label = 'Cross-section (barns)'
elif y_axis == 'sigma_raw':
y_axis_tag = 'sigma_b_raw'
y_axis_label = 'Cross-section (barns)'
else:
y_axis_tag = 'miu_per_cm'
y_axis_label = "Attenuation coefficient (cm\u207B\u00B9)"
if y_axis_tag[:5] == 'sigma':
mixed = False
all_layers = False
print("'y_axis='sigma'' is selected. Auto force 'mixed=False', 'all_layers=False'")
if y_axis_tag[-3:] == 'raw':
all_elements = False
print("'y_axis='sigma_raw'' is selected. Auto force 'all_elements=False'")
if y_axis_tag == 'miu_per_cm':
mixed = False
print("'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'")
# Plotting begins
if mixed:
_y_axis = self.total_signal[y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="Total")
if all_layers:
for _compound in _stack.keys():
_y_axis = _stack_signal[_compound][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_compound)
if all_elements:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
if y_axis_tag[:5] != 'sigma':
_y_axis = _stack_signal[_compound][_element][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}".format(_compound, _element))
else:
_y_axis = _stack_sigma[_compound][_element]['sigma_b']
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}".format(_compound, _element))
if all_isotopes:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
if y_axis_tag[:5] != 'sigma':
_y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}/{}".format(_compound, _element, _isotope))
else:
_y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}/{}".format(_compound, _element, _isotope))
"""Y-axis for specified items_to_plot"""
if items_to_plot is not None:
for _path_to_plot in items_to_plot:
_path_to_plot = list(_path_to_plot)
if y_axis_tag[:5] != 'sigma':
_live_path = _stack_signal
else:
_len_of_path = len(_path_to_plot)
if y_axis_tag[-3:] == 'raw':
if _len_of_path < 3:
raise ValueError("'y_axis={}' is not supported for layer or element levels '{}'.".format(
y_axis_tag, _path_to_plot[-1]))
else:
if _len_of_path < 2:
raise ValueError("'y_axis={}' is not supported for layer level '{}'.".format(
y_axis_tag, _path_to_plot[-1]))
_live_path = _stack_sigma
_label = "/".join(_path_to_plot)
while _path_to_plot:
_item = _path_to_plot.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_label)
if y_axis_tag[:5] != 'sigma' and y_axis_tag != 'miu_per_cm':
ax_mpl.set_ylim(-0.01, 1.01)
if logy is True:
ax_mpl.set_yscale('log')
if logx is True:
ax_mpl.set_xscale('log')
ax_mpl.set_xlabel(x_axis_label)
ax_mpl.set_ylabel(y_axis_label)
if not plotly:
ax_mpl.legend(loc='best')
# plt.tight_layout()
return ax_mpl
else:
fig_mpl = ax_mpl.get_figure()
plotly_fig = tls.mpl_to_plotly(fig_mpl)
plotly_fig.layout.showlegend = True
return plotly_fig | python | def plot(self, y_axis='attenuation', x_axis='energy',
logx=False, logy=False,
mixed=True, all_layers=False, all_elements=False,
all_isotopes=False, items_to_plot=None,
time_unit='us', offset_us=0., source_to_detector_m=16.,
time_resolution_us=0.16, t_start_us=1,
plotly=False, ax_mpl=None,
fmt='-', ms='2', lw='1.5', alpha=1):
# offset delay values is normal 2.99 us with NONE actual MCP delay settings
"""display the transmission or attenuation of compound, element and/or isotopes specified
Parameters:
===========
:param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number']
:type x_axis: str
:param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'miu_per_cm']
:type y_axis: str
:param logx: True -> display x in log scale
:type logx: boolean.
:param logy: True -> display y in log scale
:type logy: boolean.
:param mixed: boolean. True -> display the total of each layer
False -> not displayed
:param all_layers: boolean. True -> display all layers
False -> not displayed
:param all_elements: boolean. True -> display all elements signal
False -> not displayed
:param all_isotopes: boolean. True -> display all isotopes signal
False -> not displayed
:param items_to_plot: array that describes what to plot
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either ['s'|'us'|'ns']
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:param plotly: control to use plotly to display or not.
:type plotly: bool
:param ax_mpl: matplotlib.axes to plot against
:type ax_mpl: matplotlib.axes
:param fmt: matplotlib.axes.plot kwargs
:type fmt: str
:param ms: matplotlib.axes.plot kwargs
:type ms: float
:param lw: matplotlib.axes.plot kwargs
:type lw: float
:param alpha: matplotlib.axes.plot kwargs
:type alpha: float
"""
if x_axis not in x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list))
if time_unit not in time_unit_list:
raise ValueError("Please specify the time unit using one from '{}'.".format(time_unit_list))
if y_axis not in y_type_list:
raise ValueError("Please specify the y-axis type using one from '{}'.".format(y_type_list))
# figure size
# plt.figure(figsize=(8, 8))
# stack from self
_stack_signal = self.stack_signal
_stack = self.stack
_stack_sigma = self.stack_sigma
_x_axis = self.total_signal['energy_eV']
x_axis_label = None
# Creating the matplotlib graph..
if ax_mpl is None:
fig_mpl, ax_mpl = plt.subplots()
"""X-axis"""
# determine values and labels for x-axis with options from
# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'
if x_axis == 'energy':
x_axis_label = 'Energy (eV)'
if x_axis == 'lambda':
x_axis_label = u"Wavelength (\u212B)"
_x_axis = _utilities.ev_to_angstroms(array=_x_axis)
if x_axis == 'time':
if time_unit == 's':
x_axis_label = 'Time (s)'
_x_axis = _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'us':
x_axis_label = 'Time (us)'
_x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'ns':
x_axis_label = 'Time (ns)'
_x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}"
.format(x_axis_label, source_to_detector_m, offset_us))
if x_axis == 'number':
x_axis_label = 'Image number (#)'
_x_axis = _utilities.ev_to_image_number(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
time_resolution_us=time_resolution_us,
t_start_us=t_start_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}"
.format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us))
if x_axis_label is None:
raise ValueError("x_axis_label does NOT exist, please check.")
"""Y-axis"""
# determine to plot transmission or attenuation
# determine to put transmission or attenuation words for y-axis
y_axis_tag = y_axis
if y_axis == 'transmission':
y_axis_label = 'Neutron Transmission'
elif y_axis == 'attenuation':
y_axis_label = 'Neutron Attenuation'
elif y_axis == 'sigma':
y_axis_tag = 'sigma_b'
y_axis_label = 'Cross-section (barns)'
elif y_axis == 'sigma_raw':
y_axis_tag = 'sigma_b_raw'
y_axis_label = 'Cross-section (barns)'
else:
y_axis_tag = 'miu_per_cm'
y_axis_label = "Attenuation coefficient (cm\u207B\u00B9)"
if y_axis_tag[:5] == 'sigma':
mixed = False
all_layers = False
print("'y_axis='sigma'' is selected. Auto force 'mixed=False', 'all_layers=False'")
if y_axis_tag[-3:] == 'raw':
all_elements = False
print("'y_axis='sigma_raw'' is selected. Auto force 'all_elements=False'")
if y_axis_tag == 'miu_per_cm':
mixed = False
print("'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'")
# Plotting begins
if mixed:
_y_axis = self.total_signal[y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="Total")
if all_layers:
for _compound in _stack.keys():
_y_axis = _stack_signal[_compound][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_compound)
if all_elements:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
if y_axis_tag[:5] != 'sigma':
_y_axis = _stack_signal[_compound][_element][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}".format(_compound, _element))
else:
_y_axis = _stack_sigma[_compound][_element]['sigma_b']
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}".format(_compound, _element))
if all_isotopes:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
if y_axis_tag[:5] != 'sigma':
_y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}/{}".format(_compound, _element, _isotope))
else:
_y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha,
label="{}/{}/{}".format(_compound, _element, _isotope))
"""Y-axis for specified items_to_plot"""
if items_to_plot is not None:
for _path_to_plot in items_to_plot:
_path_to_plot = list(_path_to_plot)
if y_axis_tag[:5] != 'sigma':
_live_path = _stack_signal
else:
_len_of_path = len(_path_to_plot)
if y_axis_tag[-3:] == 'raw':
if _len_of_path < 3:
raise ValueError("'y_axis={}' is not supported for layer or element levels '{}'.".format(
y_axis_tag, _path_to_plot[-1]))
else:
if _len_of_path < 2:
raise ValueError("'y_axis={}' is not supported for layer level '{}'.".format(
y_axis_tag, _path_to_plot[-1]))
_live_path = _stack_sigma
_label = "/".join(_path_to_plot)
while _path_to_plot:
_item = _path_to_plot.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_label)
if y_axis_tag[:5] != 'sigma' and y_axis_tag != 'miu_per_cm':
ax_mpl.set_ylim(-0.01, 1.01)
if logy is True:
ax_mpl.set_yscale('log')
if logx is True:
ax_mpl.set_xscale('log')
ax_mpl.set_xlabel(x_axis_label)
ax_mpl.set_ylabel(y_axis_label)
if not plotly:
ax_mpl.legend(loc='best')
# plt.tight_layout()
return ax_mpl
else:
fig_mpl = ax_mpl.get_figure()
plotly_fig = tls.mpl_to_plotly(fig_mpl)
plotly_fig.layout.showlegend = True
return plotly_fig | [
"def",
"plot",
"(",
"self",
",",
"y_axis",
"=",
"'attenuation'",
",",
"x_axis",
"=",
"'energy'",
",",
"logx",
"=",
"False",
",",
"logy",
"=",
"False",
",",
"mixed",
"=",
"True",
",",
"all_layers",
"=",
"False",
",",
"all_elements",
"=",
"False",
",",
"all_isotopes",
"=",
"False",
",",
"items_to_plot",
"=",
"None",
",",
"time_unit",
"=",
"'us'",
",",
"offset_us",
"=",
"0.",
",",
"source_to_detector_m",
"=",
"16.",
",",
"time_resolution_us",
"=",
"0.16",
",",
"t_start_us",
"=",
"1",
",",
"plotly",
"=",
"False",
",",
"ax_mpl",
"=",
"None",
",",
"fmt",
"=",
"'-'",
",",
"ms",
"=",
"'2'",
",",
"lw",
"=",
"'1.5'",
",",
"alpha",
"=",
"1",
")",
":",
"# offset delay values is normal 2.99 us with NONE actual MCP delay settings",
"if",
"x_axis",
"not",
"in",
"x_type_list",
":",
"raise",
"ValueError",
"(",
"\"Please specify the x-axis type using one from '{}'.\"",
".",
"format",
"(",
"x_type_list",
")",
")",
"if",
"time_unit",
"not",
"in",
"time_unit_list",
":",
"raise",
"ValueError",
"(",
"\"Please specify the time unit using one from '{}'.\"",
".",
"format",
"(",
"time_unit_list",
")",
")",
"if",
"y_axis",
"not",
"in",
"y_type_list",
":",
"raise",
"ValueError",
"(",
"\"Please specify the y-axis type using one from '{}'.\"",
".",
"format",
"(",
"y_type_list",
")",
")",
"# figure size",
"# plt.figure(figsize=(8, 8))",
"# stack from self",
"_stack_signal",
"=",
"self",
".",
"stack_signal",
"_stack",
"=",
"self",
".",
"stack",
"_stack_sigma",
"=",
"self",
".",
"stack_sigma",
"_x_axis",
"=",
"self",
".",
"total_signal",
"[",
"'energy_eV'",
"]",
"x_axis_label",
"=",
"None",
"# Creating the matplotlib graph..",
"if",
"ax_mpl",
"is",
"None",
":",
"fig_mpl",
",",
"ax_mpl",
"=",
"plt",
".",
"subplots",
"(",
")",
"\"\"\"X-axis\"\"\"",
"# determine values and labels for x-axis with options from",
"# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'",
"if",
"x_axis",
"==",
"'energy'",
":",
"x_axis_label",
"=",
"'Energy (eV)'",
"if",
"x_axis",
"==",
"'lambda'",
":",
"x_axis_label",
"=",
"u\"Wavelength (\\u212B)\"",
"_x_axis",
"=",
"_utilities",
".",
"ev_to_angstroms",
"(",
"array",
"=",
"_x_axis",
")",
"if",
"x_axis",
"==",
"'time'",
":",
"if",
"time_unit",
"==",
"'s'",
":",
"x_axis_label",
"=",
"'Time (s)'",
"_x_axis",
"=",
"_utilities",
".",
"ev_to_s",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
")",
"if",
"time_unit",
"==",
"'us'",
":",
"x_axis_label",
"=",
"'Time (us)'",
"_x_axis",
"=",
"1e6",
"*",
"_utilities",
".",
"ev_to_s",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
")",
"if",
"time_unit",
"==",
"'ns'",
":",
"x_axis_label",
"=",
"'Time (ns)'",
"_x_axis",
"=",
"1e9",
"*",
"_utilities",
".",
"ev_to_s",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
")",
"print",
"(",
"\"'{}' was obtained with the following:\\nsource_to_detector_m={}\\noffset_us={}\"",
".",
"format",
"(",
"x_axis_label",
",",
"source_to_detector_m",
",",
"offset_us",
")",
")",
"if",
"x_axis",
"==",
"'number'",
":",
"x_axis_label",
"=",
"'Image number (#)'",
"_x_axis",
"=",
"_utilities",
".",
"ev_to_image_number",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
",",
"time_resolution_us",
"=",
"time_resolution_us",
",",
"t_start_us",
"=",
"t_start_us",
")",
"print",
"(",
"\"'{}' was obtained with the following:\\nsource_to_detector_m={}\\noffset_us={}\\ntime_resolution_us={}\"",
".",
"format",
"(",
"x_axis_label",
",",
"source_to_detector_m",
",",
"offset_us",
",",
"time_resolution_us",
")",
")",
"if",
"x_axis_label",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"x_axis_label does NOT exist, please check.\"",
")",
"\"\"\"Y-axis\"\"\"",
"# determine to plot transmission or attenuation",
"# determine to put transmission or attenuation words for y-axis",
"y_axis_tag",
"=",
"y_axis",
"if",
"y_axis",
"==",
"'transmission'",
":",
"y_axis_label",
"=",
"'Neutron Transmission'",
"elif",
"y_axis",
"==",
"'attenuation'",
":",
"y_axis_label",
"=",
"'Neutron Attenuation'",
"elif",
"y_axis",
"==",
"'sigma'",
":",
"y_axis_tag",
"=",
"'sigma_b'",
"y_axis_label",
"=",
"'Cross-section (barns)'",
"elif",
"y_axis",
"==",
"'sigma_raw'",
":",
"y_axis_tag",
"=",
"'sigma_b_raw'",
"y_axis_label",
"=",
"'Cross-section (barns)'",
"else",
":",
"y_axis_tag",
"=",
"'miu_per_cm'",
"y_axis_label",
"=",
"\"Attenuation coefficient (cm\\u207B\\u00B9)\"",
"if",
"y_axis_tag",
"[",
":",
"5",
"]",
"==",
"'sigma'",
":",
"mixed",
"=",
"False",
"all_layers",
"=",
"False",
"print",
"(",
"\"'y_axis='sigma'' is selected. Auto force 'mixed=False', 'all_layers=False'\"",
")",
"if",
"y_axis_tag",
"[",
"-",
"3",
":",
"]",
"==",
"'raw'",
":",
"all_elements",
"=",
"False",
"print",
"(",
"\"'y_axis='sigma_raw'' is selected. Auto force 'all_elements=False'\"",
")",
"if",
"y_axis_tag",
"==",
"'miu_per_cm'",
":",
"mixed",
"=",
"False",
"print",
"(",
"\"'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'\"",
")",
"# Plotting begins",
"if",
"mixed",
":",
"_y_axis",
"=",
"self",
".",
"total_signal",
"[",
"y_axis_tag",
"]",
"ax_mpl",
".",
"plot",
"(",
"_x_axis",
",",
"_y_axis",
",",
"fmt",
",",
"ms",
"=",
"ms",
",",
"lw",
"=",
"lw",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"\"Total\"",
")",
"if",
"all_layers",
":",
"for",
"_compound",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"_y_axis",
"=",
"_stack_signal",
"[",
"_compound",
"]",
"[",
"y_axis_tag",
"]",
"ax_mpl",
".",
"plot",
"(",
"_x_axis",
",",
"_y_axis",
",",
"fmt",
",",
"ms",
"=",
"ms",
",",
"lw",
"=",
"lw",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"_compound",
")",
"if",
"all_elements",
":",
"for",
"_compound",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"for",
"_element",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
":",
"if",
"y_axis_tag",
"[",
":",
"5",
"]",
"!=",
"'sigma'",
":",
"_y_axis",
"=",
"_stack_signal",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"y_axis_tag",
"]",
"ax_mpl",
".",
"plot",
"(",
"_x_axis",
",",
"_y_axis",
",",
"fmt",
",",
"ms",
"=",
"ms",
",",
"lw",
"=",
"lw",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"_compound",
",",
"_element",
")",
")",
"else",
":",
"_y_axis",
"=",
"_stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'sigma_b'",
"]",
"ax_mpl",
".",
"plot",
"(",
"_x_axis",
",",
"_y_axis",
",",
"fmt",
",",
"ms",
"=",
"ms",
",",
"lw",
"=",
"lw",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"_compound",
",",
"_element",
")",
")",
"if",
"all_isotopes",
":",
"for",
"_compound",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"for",
"_element",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
":",
"for",
"_isotope",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'list'",
"]",
":",
"if",
"y_axis_tag",
"[",
":",
"5",
"]",
"!=",
"'sigma'",
":",
"_y_axis",
"=",
"_stack_signal",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_isotope",
"]",
"[",
"y_axis_tag",
"]",
"ax_mpl",
".",
"plot",
"(",
"_x_axis",
",",
"_y_axis",
",",
"fmt",
",",
"ms",
"=",
"ms",
",",
"lw",
"=",
"lw",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"\"{}/{}/{}\"",
".",
"format",
"(",
"_compound",
",",
"_element",
",",
"_isotope",
")",
")",
"else",
":",
"_y_axis",
"=",
"_stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_isotope",
"]",
"[",
"y_axis_tag",
"]",
"ax_mpl",
".",
"plot",
"(",
"_x_axis",
",",
"_y_axis",
",",
"fmt",
",",
"ms",
"=",
"ms",
",",
"lw",
"=",
"lw",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"\"{}/{}/{}\"",
".",
"format",
"(",
"_compound",
",",
"_element",
",",
"_isotope",
")",
")",
"\"\"\"Y-axis for specified items_to_plot\"\"\"",
"if",
"items_to_plot",
"is",
"not",
"None",
":",
"for",
"_path_to_plot",
"in",
"items_to_plot",
":",
"_path_to_plot",
"=",
"list",
"(",
"_path_to_plot",
")",
"if",
"y_axis_tag",
"[",
":",
"5",
"]",
"!=",
"'sigma'",
":",
"_live_path",
"=",
"_stack_signal",
"else",
":",
"_len_of_path",
"=",
"len",
"(",
"_path_to_plot",
")",
"if",
"y_axis_tag",
"[",
"-",
"3",
":",
"]",
"==",
"'raw'",
":",
"if",
"_len_of_path",
"<",
"3",
":",
"raise",
"ValueError",
"(",
"\"'y_axis={}' is not supported for layer or element levels '{}'.\"",
".",
"format",
"(",
"y_axis_tag",
",",
"_path_to_plot",
"[",
"-",
"1",
"]",
")",
")",
"else",
":",
"if",
"_len_of_path",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"'y_axis={}' is not supported for layer level '{}'.\"",
".",
"format",
"(",
"y_axis_tag",
",",
"_path_to_plot",
"[",
"-",
"1",
"]",
")",
")",
"_live_path",
"=",
"_stack_sigma",
"_label",
"=",
"\"/\"",
".",
"join",
"(",
"_path_to_plot",
")",
"while",
"_path_to_plot",
":",
"_item",
"=",
"_path_to_plot",
".",
"pop",
"(",
"0",
")",
"_live_path",
"=",
"_live_path",
"[",
"_item",
"]",
"_y_axis",
"=",
"_live_path",
"[",
"y_axis_tag",
"]",
"ax_mpl",
".",
"plot",
"(",
"_x_axis",
",",
"_y_axis",
",",
"fmt",
",",
"ms",
"=",
"ms",
",",
"lw",
"=",
"lw",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"_label",
")",
"if",
"y_axis_tag",
"[",
":",
"5",
"]",
"!=",
"'sigma'",
"and",
"y_axis_tag",
"!=",
"'miu_per_cm'",
":",
"ax_mpl",
".",
"set_ylim",
"(",
"-",
"0.01",
",",
"1.01",
")",
"if",
"logy",
"is",
"True",
":",
"ax_mpl",
".",
"set_yscale",
"(",
"'log'",
")",
"if",
"logx",
"is",
"True",
":",
"ax_mpl",
".",
"set_xscale",
"(",
"'log'",
")",
"ax_mpl",
".",
"set_xlabel",
"(",
"x_axis_label",
")",
"ax_mpl",
".",
"set_ylabel",
"(",
"y_axis_label",
")",
"if",
"not",
"plotly",
":",
"ax_mpl",
".",
"legend",
"(",
"loc",
"=",
"'best'",
")",
"# plt.tight_layout()",
"return",
"ax_mpl",
"else",
":",
"fig_mpl",
"=",
"ax_mpl",
".",
"get_figure",
"(",
")",
"plotly_fig",
"=",
"tls",
".",
"mpl_to_plotly",
"(",
"fig_mpl",
")",
"plotly_fig",
".",
"layout",
".",
"showlegend",
"=",
"True",
"return",
"plotly_fig"
] | display the transmission or attenuation of compound, element and/or isotopes specified
Parameters:
===========
:param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number']
:type x_axis: str
:param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'miu_per_cm']
:type y_axis: str
:param logx: True -> display x in log scale
:type logx: boolean.
:param logy: True -> display y in log scale
:type logy: boolean.
:param mixed: boolean. True -> display the total of each layer
False -> not displayed
:param all_layers: boolean. True -> display all layers
False -> not displayed
:param all_elements: boolean. True -> display all elements signal
False -> not displayed
:param all_isotopes: boolean. True -> display all isotopes signal
False -> not displayed
:param items_to_plot: array that describes what to plot
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either ['s'|'us'|'ns']
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:param plotly: control to use plotly to display or not.
:type plotly: bool
:param ax_mpl: matplotlib.axes to plot against
:type ax_mpl: matplotlib.axes
:param fmt: matplotlib.axes.plot kwargs
:type fmt: str
:param ms: matplotlib.axes.plot kwargs
:type ms: float
:param lw: matplotlib.axes.plot kwargs
:type lw: float
:param alpha: matplotlib.axes.plot kwargs
:type alpha: float | [
"display",
"the",
"transmission",
"or",
"attenuation",
"of",
"compound",
"element",
"and",
"/",
"or",
"isotopes",
"specified"
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L562-L783 |
ornlneutronimaging/ImagingReso | ImagingReso/resonance.py | Resonance.export | def export(self, output_type='df', filename=None, x_axis='energy', y_axis='attenuation', mixed=True,
all_layers=False, all_elements=False, all_isotopes=False, items_to_export=None,
offset_us=0., source_to_detector_m=16.,
t_start_us=1, time_resolution_us=0.16, time_unit='us'):
"""
output x and y values to clipboard or .csv file
output the transmission or attenuation or sigma of compound, element and/or isotopes specified
'sigma_b' exported for each isotope is the product resulted from (sigma * isotopic ratio)
'atoms_per_cm3' of each element is also exported in 'sigma' mode based on molar mass within stack.
:param output_type: export type : ['df', 'csv', 'clip']
:type output_type: str
:param mixed: True -> display the total of each layer
False -> not displayed
:type mixed: boolean
:param filename: string. filename (with .csv suffix) you would like to save as
None -> export to clipboard
:type filename: string
:param x_axis: string. x type for export. Must in ['energy', 'lambda', 'time', 'number']
:param y_axis: string. y type for export. Must in ['transmission', 'attenuation', 'sigma', 'sigma_raw', 'miu_per_cm']
:param all_layers: boolean. True -> export all layers
False -> not export
:param all_elements: boolean. True -> export all elements signal
False -> not export
:param all_isotopes: boolean. True -> export all isotopes signal
False -> not export
:param items_to_export: array that describes what to export
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either 's' or 'us' or 'ns'
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:return: simulated resonance signals or sigma in the form of 'clipboard' or '.csv file' or 'pd.DataFrame'
"""
if x_axis not in x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list))
if time_unit not in time_unit_list:
raise ValueError("Please specify the time unit using one from '{}'.".format(time_unit_list))
if y_axis not in y_type_list:
raise ValueError("Please specify the y-axis type using one from '{}'.".format(y_type_list))
if output_type not in export_type_list:
raise ValueError("Please specify export type using one from '{}'.".format(export_type_list))
# stack from self
_stack_signal = self.stack_signal
_stack = self.stack
_x_axis = self.total_signal['energy_eV']
x_axis_label = None
df = pd.DataFrame()
"""X-axis"""
# determine values and labels for x-axis with options from
# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'
if x_axis == 'energy':
x_axis_label = 'Energy (eV)'
if x_axis == 'lambda':
x_axis_label = u"Wavelength (\u212B)"
_x_axis = _utilities.ev_to_angstroms(array=_x_axis)
if x_axis == 'time':
if time_unit == 's':
x_axis_label = 'Time (s)'
_x_axis = _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'us':
x_axis_label = 'Time (us)'
_x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'ns':
x_axis_label = 'Time (ns)'
_x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}"
.format(x_axis_label, source_to_detector_m, offset_us))
if x_axis == 'number':
x_axis_label = 'Image number (#)'
_x_axis = _utilities.ev_to_image_number(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
time_resolution_us=time_resolution_us,
t_start_us=t_start_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}"
.format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us))
if x_axis_label is None:
raise ValueError("x_axis_label does NOT exist, please check.")
df[x_axis_label] = _x_axis
"""Y-axis"""
if y_axis[:5] != 'sigma':
# export transmission or attenuation or miu_per_cm
y_axis_tag = y_axis
if y_axis_tag == 'miu_per_cm':
mixed = False
print("'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'")
if mixed:
_y_axis = self.total_signal[y_axis_tag]
df['Total_' + y_axis_tag] = _y_axis
if items_to_export is None:
# export based on specified level : layer|element|isotope
if all_layers:
for _compound in _stack.keys():
_y_axis = _stack_signal[_compound][y_axis_tag]
df[_compound] = _y_axis
if all_elements:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
_y_axis = _stack_signal[_compound][_element][y_axis_tag]
df[_compound + '/' + _element] = _y_axis
if all_isotopes:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
_y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag]
df[_compound + '/' + _element + '/' + _isotope] = _y_axis
else:
# export specified transmission or attenuation
for _path_to_export in items_to_export:
_path_to_export = list(_path_to_export)
_live_path = _stack_signal
_label = "/".join(_path_to_export)
while _path_to_export:
_item = _path_to_export.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
df[_label] = _y_axis
else:
# export sigma
if y_axis == 'sigma':
y_axis_tag = 'sigma_b'
else:
y_axis_tag = 'sigma_b_raw'
# y_axis_tag = 'sigma_b_raw'
_stack_sigma = self.stack_sigma
if items_to_export is None:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
_y_axis = _stack_sigma[_compound][_element]['sigma_b'] # No 'sigma_b_raw' at this level
df[_compound + '/' + _element + '/atoms_per_cm3'] = _stack[_compound][_element]['atoms_per_cm3']
df[_compound + '/' + _element] = _y_axis
if all_isotopes:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
_y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag]
df[_compound + '/' + _element + '/' + _isotope] = _y_axis
else:
# export specified sigma
for _path_to_export in items_to_export:
if y_axis_tag[-3:] == 'raw':
if len(_path_to_export) < 3:
raise ValueError(
"Getting raw sigma of '{}' at layer or element level is not supported. "
"If it is a single element layer, please follow "
"['layer', 'element', 'isotope'] format.".format(_path_to_export[0]))
else:
if len(_path_to_export) < 2:
raise ValueError(
"Getting weighted sigma of '{}' at layer level is not supported. "
"If it is a single element layer, please follow "
"['layer', 'element'] format.".format(_path_to_export[0]))
_path_to_export = list(_path_to_export)
_live_path = _stack_sigma
_label = "/".join(_path_to_export)
while _path_to_export:
_item = _path_to_export.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
df[_label] = _y_axis
if len(df.columns) <= 1:
raise ValueError("No y values have been selected to export!")
if output_type == 'csv':
if filename is None:
filename = 'data.csv'
if '.csv' not in filename:
filename += '.csv'
df.to_csv(filename, index=False)
print("Exporting to file ('./{}') completed.".format(filename))
elif output_type == 'clip':
df.to_clipboard(excel=True, index=False)
print('Exporting to clipboard completed.')
else: # output_type == 'df'
return df | python | def export(self, output_type='df', filename=None, x_axis='energy', y_axis='attenuation', mixed=True,
all_layers=False, all_elements=False, all_isotopes=False, items_to_export=None,
offset_us=0., source_to_detector_m=16.,
t_start_us=1, time_resolution_us=0.16, time_unit='us'):
"""
output x and y values to clipboard or .csv file
output the transmission or attenuation or sigma of compound, element and/or isotopes specified
'sigma_b' exported for each isotope is the product resulted from (sigma * isotopic ratio)
'atoms_per_cm3' of each element is also exported in 'sigma' mode based on molar mass within stack.
:param output_type: export type : ['df', 'csv', 'clip']
:type output_type: str
:param mixed: True -> display the total of each layer
False -> not displayed
:type mixed: boolean
:param filename: string. filename (with .csv suffix) you would like to save as
None -> export to clipboard
:type filename: string
:param x_axis: string. x type for export. Must in ['energy', 'lambda', 'time', 'number']
:param y_axis: string. y type for export. Must in ['transmission', 'attenuation', 'sigma', 'sigma_raw', 'miu_per_cm']
:param all_layers: boolean. True -> export all layers
False -> not export
:param all_elements: boolean. True -> export all elements signal
False -> not export
:param all_isotopes: boolean. True -> export all isotopes signal
False -> not export
:param items_to_export: array that describes what to export
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either 's' or 'us' or 'ns'
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:return: simulated resonance signals or sigma in the form of 'clipboard' or '.csv file' or 'pd.DataFrame'
"""
if x_axis not in x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list))
if time_unit not in time_unit_list:
raise ValueError("Please specify the time unit using one from '{}'.".format(time_unit_list))
if y_axis not in y_type_list:
raise ValueError("Please specify the y-axis type using one from '{}'.".format(y_type_list))
if output_type not in export_type_list:
raise ValueError("Please specify export type using one from '{}'.".format(export_type_list))
# stack from self
_stack_signal = self.stack_signal
_stack = self.stack
_x_axis = self.total_signal['energy_eV']
x_axis_label = None
df = pd.DataFrame()
"""X-axis"""
# determine values and labels for x-axis with options from
# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'
if x_axis == 'energy':
x_axis_label = 'Energy (eV)'
if x_axis == 'lambda':
x_axis_label = u"Wavelength (\u212B)"
_x_axis = _utilities.ev_to_angstroms(array=_x_axis)
if x_axis == 'time':
if time_unit == 's':
x_axis_label = 'Time (s)'
_x_axis = _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'us':
x_axis_label = 'Time (us)'
_x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
if time_unit == 'ns':
x_axis_label = 'Time (ns)'
_x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}"
.format(x_axis_label, source_to_detector_m, offset_us))
if x_axis == 'number':
x_axis_label = 'Image number (#)'
_x_axis = _utilities.ev_to_image_number(array=_x_axis,
source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
time_resolution_us=time_resolution_us,
t_start_us=t_start_us)
print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}"
.format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us))
if x_axis_label is None:
raise ValueError("x_axis_label does NOT exist, please check.")
df[x_axis_label] = _x_axis
"""Y-axis"""
if y_axis[:5] != 'sigma':
# export transmission or attenuation or miu_per_cm
y_axis_tag = y_axis
if y_axis_tag == 'miu_per_cm':
mixed = False
print("'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'")
if mixed:
_y_axis = self.total_signal[y_axis_tag]
df['Total_' + y_axis_tag] = _y_axis
if items_to_export is None:
# export based on specified level : layer|element|isotope
if all_layers:
for _compound in _stack.keys():
_y_axis = _stack_signal[_compound][y_axis_tag]
df[_compound] = _y_axis
if all_elements:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
_y_axis = _stack_signal[_compound][_element][y_axis_tag]
df[_compound + '/' + _element] = _y_axis
if all_isotopes:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
_y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag]
df[_compound + '/' + _element + '/' + _isotope] = _y_axis
else:
# export specified transmission or attenuation
for _path_to_export in items_to_export:
_path_to_export = list(_path_to_export)
_live_path = _stack_signal
_label = "/".join(_path_to_export)
while _path_to_export:
_item = _path_to_export.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
df[_label] = _y_axis
else:
# export sigma
if y_axis == 'sigma':
y_axis_tag = 'sigma_b'
else:
y_axis_tag = 'sigma_b_raw'
# y_axis_tag = 'sigma_b_raw'
_stack_sigma = self.stack_sigma
if items_to_export is None:
for _compound in _stack.keys():
for _element in _stack[_compound]['elements']:
_y_axis = _stack_sigma[_compound][_element]['sigma_b'] # No 'sigma_b_raw' at this level
df[_compound + '/' + _element + '/atoms_per_cm3'] = _stack[_compound][_element]['atoms_per_cm3']
df[_compound + '/' + _element] = _y_axis
if all_isotopes:
for _isotope in _stack[_compound][_element]['isotopes']['list']:
_y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag]
df[_compound + '/' + _element + '/' + _isotope] = _y_axis
else:
# export specified sigma
for _path_to_export in items_to_export:
if y_axis_tag[-3:] == 'raw':
if len(_path_to_export) < 3:
raise ValueError(
"Getting raw sigma of '{}' at layer or element level is not supported. "
"If it is a single element layer, please follow "
"['layer', 'element', 'isotope'] format.".format(_path_to_export[0]))
else:
if len(_path_to_export) < 2:
raise ValueError(
"Getting weighted sigma of '{}' at layer level is not supported. "
"If it is a single element layer, please follow "
"['layer', 'element'] format.".format(_path_to_export[0]))
_path_to_export = list(_path_to_export)
_live_path = _stack_sigma
_label = "/".join(_path_to_export)
while _path_to_export:
_item = _path_to_export.pop(0)
_live_path = _live_path[_item]
_y_axis = _live_path[y_axis_tag]
df[_label] = _y_axis
if len(df.columns) <= 1:
raise ValueError("No y values have been selected to export!")
if output_type == 'csv':
if filename is None:
filename = 'data.csv'
if '.csv' not in filename:
filename += '.csv'
df.to_csv(filename, index=False)
print("Exporting to file ('./{}') completed.".format(filename))
elif output_type == 'clip':
df.to_clipboard(excel=True, index=False)
print('Exporting to clipboard completed.')
else: # output_type == 'df'
return df | [
"def",
"export",
"(",
"self",
",",
"output_type",
"=",
"'df'",
",",
"filename",
"=",
"None",
",",
"x_axis",
"=",
"'energy'",
",",
"y_axis",
"=",
"'attenuation'",
",",
"mixed",
"=",
"True",
",",
"all_layers",
"=",
"False",
",",
"all_elements",
"=",
"False",
",",
"all_isotopes",
"=",
"False",
",",
"items_to_export",
"=",
"None",
",",
"offset_us",
"=",
"0.",
",",
"source_to_detector_m",
"=",
"16.",
",",
"t_start_us",
"=",
"1",
",",
"time_resolution_us",
"=",
"0.16",
",",
"time_unit",
"=",
"'us'",
")",
":",
"if",
"x_axis",
"not",
"in",
"x_type_list",
":",
"raise",
"ValueError",
"(",
"\"Please specify the x-axis type using one from '{}'.\"",
".",
"format",
"(",
"x_type_list",
")",
")",
"if",
"time_unit",
"not",
"in",
"time_unit_list",
":",
"raise",
"ValueError",
"(",
"\"Please specify the time unit using one from '{}'.\"",
".",
"format",
"(",
"time_unit_list",
")",
")",
"if",
"y_axis",
"not",
"in",
"y_type_list",
":",
"raise",
"ValueError",
"(",
"\"Please specify the y-axis type using one from '{}'.\"",
".",
"format",
"(",
"y_type_list",
")",
")",
"if",
"output_type",
"not",
"in",
"export_type_list",
":",
"raise",
"ValueError",
"(",
"\"Please specify export type using one from '{}'.\"",
".",
"format",
"(",
"export_type_list",
")",
")",
"# stack from self",
"_stack_signal",
"=",
"self",
".",
"stack_signal",
"_stack",
"=",
"self",
".",
"stack",
"_x_axis",
"=",
"self",
".",
"total_signal",
"[",
"'energy_eV'",
"]",
"x_axis_label",
"=",
"None",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"\"\"\"X-axis\"\"\"",
"# determine values and labels for x-axis with options from",
"# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'",
"if",
"x_axis",
"==",
"'energy'",
":",
"x_axis_label",
"=",
"'Energy (eV)'",
"if",
"x_axis",
"==",
"'lambda'",
":",
"x_axis_label",
"=",
"u\"Wavelength (\\u212B)\"",
"_x_axis",
"=",
"_utilities",
".",
"ev_to_angstroms",
"(",
"array",
"=",
"_x_axis",
")",
"if",
"x_axis",
"==",
"'time'",
":",
"if",
"time_unit",
"==",
"'s'",
":",
"x_axis_label",
"=",
"'Time (s)'",
"_x_axis",
"=",
"_utilities",
".",
"ev_to_s",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
")",
"if",
"time_unit",
"==",
"'us'",
":",
"x_axis_label",
"=",
"'Time (us)'",
"_x_axis",
"=",
"1e6",
"*",
"_utilities",
".",
"ev_to_s",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
")",
"if",
"time_unit",
"==",
"'ns'",
":",
"x_axis_label",
"=",
"'Time (ns)'",
"_x_axis",
"=",
"1e9",
"*",
"_utilities",
".",
"ev_to_s",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
")",
"print",
"(",
"\"'{}' was obtained with the following:\\nsource_to_detector_m={}\\noffset_us={}\"",
".",
"format",
"(",
"x_axis_label",
",",
"source_to_detector_m",
",",
"offset_us",
")",
")",
"if",
"x_axis",
"==",
"'number'",
":",
"x_axis_label",
"=",
"'Image number (#)'",
"_x_axis",
"=",
"_utilities",
".",
"ev_to_image_number",
"(",
"array",
"=",
"_x_axis",
",",
"source_to_detector_m",
"=",
"source_to_detector_m",
",",
"offset_us",
"=",
"offset_us",
",",
"time_resolution_us",
"=",
"time_resolution_us",
",",
"t_start_us",
"=",
"t_start_us",
")",
"print",
"(",
"\"'{}' was obtained with the following:\\nsource_to_detector_m={}\\noffset_us={}\\ntime_resolution_us={}\"",
".",
"format",
"(",
"x_axis_label",
",",
"source_to_detector_m",
",",
"offset_us",
",",
"time_resolution_us",
")",
")",
"if",
"x_axis_label",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"x_axis_label does NOT exist, please check.\"",
")",
"df",
"[",
"x_axis_label",
"]",
"=",
"_x_axis",
"\"\"\"Y-axis\"\"\"",
"if",
"y_axis",
"[",
":",
"5",
"]",
"!=",
"'sigma'",
":",
"# export transmission or attenuation or miu_per_cm",
"y_axis_tag",
"=",
"y_axis",
"if",
"y_axis_tag",
"==",
"'miu_per_cm'",
":",
"mixed",
"=",
"False",
"print",
"(",
"\"'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'\"",
")",
"if",
"mixed",
":",
"_y_axis",
"=",
"self",
".",
"total_signal",
"[",
"y_axis_tag",
"]",
"df",
"[",
"'Total_'",
"+",
"y_axis_tag",
"]",
"=",
"_y_axis",
"if",
"items_to_export",
"is",
"None",
":",
"# export based on specified level : layer|element|isotope",
"if",
"all_layers",
":",
"for",
"_compound",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"_y_axis",
"=",
"_stack_signal",
"[",
"_compound",
"]",
"[",
"y_axis_tag",
"]",
"df",
"[",
"_compound",
"]",
"=",
"_y_axis",
"if",
"all_elements",
":",
"for",
"_compound",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"for",
"_element",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
":",
"_y_axis",
"=",
"_stack_signal",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"y_axis_tag",
"]",
"df",
"[",
"_compound",
"+",
"'/'",
"+",
"_element",
"]",
"=",
"_y_axis",
"if",
"all_isotopes",
":",
"for",
"_compound",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"for",
"_element",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
":",
"for",
"_isotope",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'list'",
"]",
":",
"_y_axis",
"=",
"_stack_signal",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_isotope",
"]",
"[",
"y_axis_tag",
"]",
"df",
"[",
"_compound",
"+",
"'/'",
"+",
"_element",
"+",
"'/'",
"+",
"_isotope",
"]",
"=",
"_y_axis",
"else",
":",
"# export specified transmission or attenuation",
"for",
"_path_to_export",
"in",
"items_to_export",
":",
"_path_to_export",
"=",
"list",
"(",
"_path_to_export",
")",
"_live_path",
"=",
"_stack_signal",
"_label",
"=",
"\"/\"",
".",
"join",
"(",
"_path_to_export",
")",
"while",
"_path_to_export",
":",
"_item",
"=",
"_path_to_export",
".",
"pop",
"(",
"0",
")",
"_live_path",
"=",
"_live_path",
"[",
"_item",
"]",
"_y_axis",
"=",
"_live_path",
"[",
"y_axis_tag",
"]",
"df",
"[",
"_label",
"]",
"=",
"_y_axis",
"else",
":",
"# export sigma",
"if",
"y_axis",
"==",
"'sigma'",
":",
"y_axis_tag",
"=",
"'sigma_b'",
"else",
":",
"y_axis_tag",
"=",
"'sigma_b_raw'",
"# y_axis_tag = 'sigma_b_raw'",
"_stack_sigma",
"=",
"self",
".",
"stack_sigma",
"if",
"items_to_export",
"is",
"None",
":",
"for",
"_compound",
"in",
"_stack",
".",
"keys",
"(",
")",
":",
"for",
"_element",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"'elements'",
"]",
":",
"_y_axis",
"=",
"_stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'sigma_b'",
"]",
"# No 'sigma_b_raw' at this level",
"df",
"[",
"_compound",
"+",
"'/'",
"+",
"_element",
"+",
"'/atoms_per_cm3'",
"]",
"=",
"_stack",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'atoms_per_cm3'",
"]",
"df",
"[",
"_compound",
"+",
"'/'",
"+",
"_element",
"]",
"=",
"_y_axis",
"if",
"all_isotopes",
":",
"for",
"_isotope",
"in",
"_stack",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"'isotopes'",
"]",
"[",
"'list'",
"]",
":",
"_y_axis",
"=",
"_stack_sigma",
"[",
"_compound",
"]",
"[",
"_element",
"]",
"[",
"_isotope",
"]",
"[",
"y_axis_tag",
"]",
"df",
"[",
"_compound",
"+",
"'/'",
"+",
"_element",
"+",
"'/'",
"+",
"_isotope",
"]",
"=",
"_y_axis",
"else",
":",
"# export specified sigma",
"for",
"_path_to_export",
"in",
"items_to_export",
":",
"if",
"y_axis_tag",
"[",
"-",
"3",
":",
"]",
"==",
"'raw'",
":",
"if",
"len",
"(",
"_path_to_export",
")",
"<",
"3",
":",
"raise",
"ValueError",
"(",
"\"Getting raw sigma of '{}' at layer or element level is not supported. \"",
"\"If it is a single element layer, please follow \"",
"\"['layer', 'element', 'isotope'] format.\"",
".",
"format",
"(",
"_path_to_export",
"[",
"0",
"]",
")",
")",
"else",
":",
"if",
"len",
"(",
"_path_to_export",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Getting weighted sigma of '{}' at layer level is not supported. \"",
"\"If it is a single element layer, please follow \"",
"\"['layer', 'element'] format.\"",
".",
"format",
"(",
"_path_to_export",
"[",
"0",
"]",
")",
")",
"_path_to_export",
"=",
"list",
"(",
"_path_to_export",
")",
"_live_path",
"=",
"_stack_sigma",
"_label",
"=",
"\"/\"",
".",
"join",
"(",
"_path_to_export",
")",
"while",
"_path_to_export",
":",
"_item",
"=",
"_path_to_export",
".",
"pop",
"(",
"0",
")",
"_live_path",
"=",
"_live_path",
"[",
"_item",
"]",
"_y_axis",
"=",
"_live_path",
"[",
"y_axis_tag",
"]",
"df",
"[",
"_label",
"]",
"=",
"_y_axis",
"if",
"len",
"(",
"df",
".",
"columns",
")",
"<=",
"1",
":",
"raise",
"ValueError",
"(",
"\"No y values have been selected to export!\"",
")",
"if",
"output_type",
"==",
"'csv'",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"'data.csv'",
"if",
"'.csv'",
"not",
"in",
"filename",
":",
"filename",
"+=",
"'.csv'",
"df",
".",
"to_csv",
"(",
"filename",
",",
"index",
"=",
"False",
")",
"print",
"(",
"\"Exporting to file ('./{}') completed.\"",
".",
"format",
"(",
"filename",
")",
")",
"elif",
"output_type",
"==",
"'clip'",
":",
"df",
".",
"to_clipboard",
"(",
"excel",
"=",
"True",
",",
"index",
"=",
"False",
")",
"print",
"(",
"'Exporting to clipboard completed.'",
")",
"else",
":",
"# output_type == 'df'",
"return",
"df"
] | output x and y values to clipboard or .csv file
output the transmission or attenuation or sigma of compound, element and/or isotopes specified
'sigma_b' exported for each isotope is the product resulted from (sigma * isotopic ratio)
'atoms_per_cm3' of each element is also exported in 'sigma' mode based on molar mass within stack.
:param output_type: export type : ['df', 'csv', 'clip']
:type output_type: str
:param mixed: True -> display the total of each layer
False -> not displayed
:type mixed: boolean
:param filename: string. filename (with .csv suffix) you would like to save as
None -> export to clipboard
:type filename: string
:param x_axis: string. x type for export. Must in ['energy', 'lambda', 'time', 'number']
:param y_axis: string. y type for export. Must in ['transmission', 'attenuation', 'sigma', 'sigma_raw', 'miu_per_cm']
:param all_layers: boolean. True -> export all layers
False -> not export
:param all_elements: boolean. True -> export all elements signal
False -> not export
:param all_isotopes: boolean. True -> export all isotopes signal
False -> not export
:param items_to_export: array that describes what to export
ex:
[['CoAg','Ag','107-Ag'], ['CoAg']]
if the dictionary is empty, everything is exported
:param time_unit: string. Must be either 's' or 'us' or 'ns'
Note: this will be used only when x_axis='time'
:param offset_us: default: 0
Note: only used when x_axis='number' or 'time'
:param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time'
:param time_resolution_us: Note: this will be used only when x_axis='number'
:param t_start_us: when is the first acquisition occurred. default: 1
Note: this will be used only when x_axis='number'
:return: simulated resonance signals or sigma in the form of 'clipboard' or '.csv file' or 'pd.DataFrame' | [
"output",
"x",
"and",
"y",
"values",
"to",
"clipboard",
"or",
".",
"csv",
"file",
"output",
"the",
"transmission",
"or",
"attenuation",
"or",
"sigma",
"of",
"compound",
"element",
"and",
"/",
"or",
"isotopes",
"specified",
"sigma_b",
"exported",
"for",
"each",
"isotope",
"is",
"the",
"product",
"resulted",
"from",
"(",
"sigma",
"*",
"isotopic",
"ratio",
")",
"atoms_per_cm3",
"of",
"each",
"element",
"is",
"also",
"exported",
"in",
"sigma",
"mode",
"based",
"on",
"molar",
"mass",
"within",
"stack",
"."
] | train | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L785-L978 |
spockNinja/py-yaml-builder | yaml_builder/__init__.py | main | def main():
"""Builds a yaml file"""
parser = argparse.ArgumentParser(description='Compose a yaml file.')
parser.add_argument(
'root',
type=argparse.FileType('r'),
help='The root yaml file to compose.'
)
args = parser.parse_args()
result = yaml.load(args.root, Loader=ComposeLoader)
print(yaml.dump(result)) | python | def main():
"""Builds a yaml file"""
parser = argparse.ArgumentParser(description='Compose a yaml file.')
parser.add_argument(
'root',
type=argparse.FileType('r'),
help='The root yaml file to compose.'
)
args = parser.parse_args()
result = yaml.load(args.root, Loader=ComposeLoader)
print(yaml.dump(result)) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Compose a yaml file.'",
")",
"parser",
".",
"add_argument",
"(",
"'root'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r'",
")",
",",
"help",
"=",
"'The root yaml file to compose.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"result",
"=",
"yaml",
".",
"load",
"(",
"args",
".",
"root",
",",
"Loader",
"=",
"ComposeLoader",
")",
"print",
"(",
"yaml",
".",
"dump",
"(",
"result",
")",
")"
] | Builds a yaml file | [
"Builds",
"a",
"yaml",
"file"
] | train | https://github.com/spockNinja/py-yaml-builder/blob/9a7fb3067afe107397cebd07d950dbb4238a8730/yaml_builder/__init__.py#L7-L20 |
rapidpro/expressions | python/temba_expressions/dates.py | DateParser._parse | def _parse(self, text, mode):
"""
Returns a date, datetime or time depending on what information is available
"""
if text is None or not text.strip():
return None
# first try to parse as an ISO8601 date, if it doesn't work we'll try other options
if len(text) >= 16:
try:
parsed = iso8601.parse_date(text, default_timezone=None)
if not parsed.tzinfo:
parsed = self._timezone.localize(parsed)
return parsed
except iso8601.ParseError:
pass
# split the text into numerical and text tokens
tokens = regex.findall(r'([0-9]+|[^\W\d]+)', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0)
# get the possibilities for each token
token_possibilities = []
for token in tokens:
possibilities = self._get_token_possibilities(token, mode)
if len(possibilities) > 0:
token_possibilities.append(possibilities)
# see what valid sequences we can make
sequences = self._get_possible_sequences(mode, len(token_possibilities), self._date_style)
for sequence in sequences:
match = OrderedDict()
for c in range(len(sequence)):
component = sequence[c]
value = token_possibilities[c].get(component, None)
match[component] = value
if value is None:
break
else:
# try to make a valid result from this and return if successful
obj = self._make_result(match, self._now, self._timezone)
if obj is not None:
return obj
return None | python | def _parse(self, text, mode):
"""
Returns a date, datetime or time depending on what information is available
"""
if text is None or not text.strip():
return None
# first try to parse as an ISO8601 date, if it doesn't work we'll try other options
if len(text) >= 16:
try:
parsed = iso8601.parse_date(text, default_timezone=None)
if not parsed.tzinfo:
parsed = self._timezone.localize(parsed)
return parsed
except iso8601.ParseError:
pass
# split the text into numerical and text tokens
tokens = regex.findall(r'([0-9]+|[^\W\d]+)', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0)
# get the possibilities for each token
token_possibilities = []
for token in tokens:
possibilities = self._get_token_possibilities(token, mode)
if len(possibilities) > 0:
token_possibilities.append(possibilities)
# see what valid sequences we can make
sequences = self._get_possible_sequences(mode, len(token_possibilities), self._date_style)
for sequence in sequences:
match = OrderedDict()
for c in range(len(sequence)):
component = sequence[c]
value = token_possibilities[c].get(component, None)
match[component] = value
if value is None:
break
else:
# try to make a valid result from this and return if successful
obj = self._make_result(match, self._now, self._timezone)
if obj is not None:
return obj
return None | [
"def",
"_parse",
"(",
"self",
",",
"text",
",",
"mode",
")",
":",
"if",
"text",
"is",
"None",
"or",
"not",
"text",
".",
"strip",
"(",
")",
":",
"return",
"None",
"# first try to parse as an ISO8601 date, if it doesn't work we'll try other options",
"if",
"len",
"(",
"text",
")",
">=",
"16",
":",
"try",
":",
"parsed",
"=",
"iso8601",
".",
"parse_date",
"(",
"text",
",",
"default_timezone",
"=",
"None",
")",
"if",
"not",
"parsed",
".",
"tzinfo",
":",
"parsed",
"=",
"self",
".",
"_timezone",
".",
"localize",
"(",
"parsed",
")",
"return",
"parsed",
"except",
"iso8601",
".",
"ParseError",
":",
"pass",
"# split the text into numerical and text tokens",
"tokens",
"=",
"regex",
".",
"findall",
"(",
"r'([0-9]+|[^\\W\\d]+)'",
",",
"text",
",",
"flags",
"=",
"regex",
".",
"MULTILINE",
"|",
"regex",
".",
"UNICODE",
"|",
"regex",
".",
"V0",
")",
"# get the possibilities for each token",
"token_possibilities",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"possibilities",
"=",
"self",
".",
"_get_token_possibilities",
"(",
"token",
",",
"mode",
")",
"if",
"len",
"(",
"possibilities",
")",
">",
"0",
":",
"token_possibilities",
".",
"append",
"(",
"possibilities",
")",
"# see what valid sequences we can make",
"sequences",
"=",
"self",
".",
"_get_possible_sequences",
"(",
"mode",
",",
"len",
"(",
"token_possibilities",
")",
",",
"self",
".",
"_date_style",
")",
"for",
"sequence",
"in",
"sequences",
":",
"match",
"=",
"OrderedDict",
"(",
")",
"for",
"c",
"in",
"range",
"(",
"len",
"(",
"sequence",
")",
")",
":",
"component",
"=",
"sequence",
"[",
"c",
"]",
"value",
"=",
"token_possibilities",
"[",
"c",
"]",
".",
"get",
"(",
"component",
",",
"None",
")",
"match",
"[",
"component",
"]",
"=",
"value",
"if",
"value",
"is",
"None",
":",
"break",
"else",
":",
"# try to make a valid result from this and return if successful",
"obj",
"=",
"self",
".",
"_make_result",
"(",
"match",
",",
"self",
".",
"_now",
",",
"self",
".",
"_timezone",
")",
"if",
"obj",
"is",
"not",
"None",
":",
"return",
"obj",
"return",
"None"
] | Returns a date, datetime or time depending on what information is available | [
"Returns",
"a",
"date",
"datetime",
"or",
"time",
"depending",
"on",
"what",
"information",
"is",
"available"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/dates.py#L98-L145 |
rapidpro/expressions | python/temba_expressions/dates.py | DateParser._get_possible_sequences | def _get_possible_sequences(cls, mode, length, date_style):
"""
Gets possible component sequences in the given mode
:param mode: the mode
:param length: the length (only returns sequences of this length)
:param date_style: whether dates are usually entered day first or month first
:return:
"""
sequences = []
date_sequences = cls.DATE_SEQUENCES_DAY_FIRST if date_style == DateStyle.DAY_FIRST else cls.DATE_SEQUENCES_MONTH_FIRST
if mode == Mode.DATE or mode == Mode.AUTO:
for seq in date_sequences:
if len(seq) == length:
sequences.append(seq)
elif mode == Mode.TIME:
for seq in cls.TIME_SEQUENCES:
if len(seq) == length:
sequences.append(seq)
if mode == Mode.DATETIME or mode == Mode.AUTO:
for date_seq in date_sequences:
for time_seq in cls.TIME_SEQUENCES:
if len(date_seq) + len(time_seq) == length:
sequences.append(date_seq + time_seq)
return sequences | python | def _get_possible_sequences(cls, mode, length, date_style):
"""
Gets possible component sequences in the given mode
:param mode: the mode
:param length: the length (only returns sequences of this length)
:param date_style: whether dates are usually entered day first or month first
:return:
"""
sequences = []
date_sequences = cls.DATE_SEQUENCES_DAY_FIRST if date_style == DateStyle.DAY_FIRST else cls.DATE_SEQUENCES_MONTH_FIRST
if mode == Mode.DATE or mode == Mode.AUTO:
for seq in date_sequences:
if len(seq) == length:
sequences.append(seq)
elif mode == Mode.TIME:
for seq in cls.TIME_SEQUENCES:
if len(seq) == length:
sequences.append(seq)
if mode == Mode.DATETIME or mode == Mode.AUTO:
for date_seq in date_sequences:
for time_seq in cls.TIME_SEQUENCES:
if len(date_seq) + len(time_seq) == length:
sequences.append(date_seq + time_seq)
return sequences | [
"def",
"_get_possible_sequences",
"(",
"cls",
",",
"mode",
",",
"length",
",",
"date_style",
")",
":",
"sequences",
"=",
"[",
"]",
"date_sequences",
"=",
"cls",
".",
"DATE_SEQUENCES_DAY_FIRST",
"if",
"date_style",
"==",
"DateStyle",
".",
"DAY_FIRST",
"else",
"cls",
".",
"DATE_SEQUENCES_MONTH_FIRST",
"if",
"mode",
"==",
"Mode",
".",
"DATE",
"or",
"mode",
"==",
"Mode",
".",
"AUTO",
":",
"for",
"seq",
"in",
"date_sequences",
":",
"if",
"len",
"(",
"seq",
")",
"==",
"length",
":",
"sequences",
".",
"append",
"(",
"seq",
")",
"elif",
"mode",
"==",
"Mode",
".",
"TIME",
":",
"for",
"seq",
"in",
"cls",
".",
"TIME_SEQUENCES",
":",
"if",
"len",
"(",
"seq",
")",
"==",
"length",
":",
"sequences",
".",
"append",
"(",
"seq",
")",
"if",
"mode",
"==",
"Mode",
".",
"DATETIME",
"or",
"mode",
"==",
"Mode",
".",
"AUTO",
":",
"for",
"date_seq",
"in",
"date_sequences",
":",
"for",
"time_seq",
"in",
"cls",
".",
"TIME_SEQUENCES",
":",
"if",
"len",
"(",
"date_seq",
")",
"+",
"len",
"(",
"time_seq",
")",
"==",
"length",
":",
"sequences",
".",
"append",
"(",
"date_seq",
"+",
"time_seq",
")",
"return",
"sequences"
] | Gets possible component sequences in the given mode
:param mode: the mode
:param length: the length (only returns sequences of this length)
:param date_style: whether dates are usually entered day first or month first
:return: | [
"Gets",
"possible",
"component",
"sequences",
"in",
"the",
"given",
"mode",
":",
"param",
"mode",
":",
"the",
"mode",
":",
"param",
"length",
":",
"the",
"length",
"(",
"only",
"returns",
"sequences",
"of",
"this",
"length",
")",
":",
"param",
"date_style",
":",
"whether",
"dates",
"are",
"usually",
"entered",
"day",
"first",
"or",
"month",
"first",
":",
"return",
":"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/dates.py#L148-L175 |
rapidpro/expressions | python/temba_expressions/dates.py | DateParser._get_token_possibilities | def _get_token_possibilities(cls, token, mode):
"""
Returns all possible component types of a token without regard to its context. For example "26" could be year,
date or minute, but can't be a month or an hour.
:param token: the token to classify
:param mode: the parse mode
:return: the dict of possible types and values if token was of that type
"""
token = token.lower().strip()
possibilities = {}
try:
as_int = int(token)
if mode != Mode.TIME:
if 1 <= as_int <= 9999 and (len(token) == 2 or len(token) == 4):
possibilities[Component.YEAR] = as_int
if 1 <= as_int <= 12:
possibilities[Component.MONTH] = as_int
if 1 <= as_int <= 31:
possibilities[Component.DAY] = as_int
if mode != Mode.DATE:
if 0 <= as_int <= 23:
possibilities[Component.HOUR] = as_int
if 0 <= as_int <= 59:
possibilities[Component.MINUTE] = as_int
if 0 <= as_int <= 59:
possibilities[Component.SECOND] = as_int
if len(token) == 3 or len(token) == 6 or len(token) == 9:
nano = 0
if len(token) == 3: # millisecond precision
nano = as_int * 1000000
elif len(token) == 6: # microsecond precision
nano = as_int * 1000
elif len(token) == 9:
nano = as_int
possibilities[Component.NANO] = nano
if len(token) == 4:
hour = as_int // 100
minute = as_int - (hour * 100)
if 1 <= hour <= 24 and 1 <= minute <= 59:
possibilities[Component.HOUR_AND_MINUTE] = as_int
except ValueError:
if mode != Mode.TIME:
# could it be a month alias?
month = MONTHS_BY_ALIAS.get(token, None)
if month is not None:
possibilities[Component.MONTH] = month
if mode != Mode.DATE:
# could it be an AM/PM marker?
is_am_marker = token == "am"
is_pm_marker = token == "pm"
if is_am_marker or is_pm_marker:
possibilities[Component.AM_PM] = cls.AM if is_am_marker else cls.PM
# offset parsing is limited to Z meaning UTC for now
if token == "z":
possibilities[Component.OFFSET] = 0
return possibilities | python | def _get_token_possibilities(cls, token, mode):
"""
Returns all possible component types of a token without regard to its context. For example "26" could be year,
date or minute, but can't be a month or an hour.
:param token: the token to classify
:param mode: the parse mode
:return: the dict of possible types and values if token was of that type
"""
token = token.lower().strip()
possibilities = {}
try:
as_int = int(token)
if mode != Mode.TIME:
if 1 <= as_int <= 9999 and (len(token) == 2 or len(token) == 4):
possibilities[Component.YEAR] = as_int
if 1 <= as_int <= 12:
possibilities[Component.MONTH] = as_int
if 1 <= as_int <= 31:
possibilities[Component.DAY] = as_int
if mode != Mode.DATE:
if 0 <= as_int <= 23:
possibilities[Component.HOUR] = as_int
if 0 <= as_int <= 59:
possibilities[Component.MINUTE] = as_int
if 0 <= as_int <= 59:
possibilities[Component.SECOND] = as_int
if len(token) == 3 or len(token) == 6 or len(token) == 9:
nano = 0
if len(token) == 3: # millisecond precision
nano = as_int * 1000000
elif len(token) == 6: # microsecond precision
nano = as_int * 1000
elif len(token) == 9:
nano = as_int
possibilities[Component.NANO] = nano
if len(token) == 4:
hour = as_int // 100
minute = as_int - (hour * 100)
if 1 <= hour <= 24 and 1 <= minute <= 59:
possibilities[Component.HOUR_AND_MINUTE] = as_int
except ValueError:
if mode != Mode.TIME:
# could it be a month alias?
month = MONTHS_BY_ALIAS.get(token, None)
if month is not None:
possibilities[Component.MONTH] = month
if mode != Mode.DATE:
# could it be an AM/PM marker?
is_am_marker = token == "am"
is_pm_marker = token == "pm"
if is_am_marker or is_pm_marker:
possibilities[Component.AM_PM] = cls.AM if is_am_marker else cls.PM
# offset parsing is limited to Z meaning UTC for now
if token == "z":
possibilities[Component.OFFSET] = 0
return possibilities | [
"def",
"_get_token_possibilities",
"(",
"cls",
",",
"token",
",",
"mode",
")",
":",
"token",
"=",
"token",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"possibilities",
"=",
"{",
"}",
"try",
":",
"as_int",
"=",
"int",
"(",
"token",
")",
"if",
"mode",
"!=",
"Mode",
".",
"TIME",
":",
"if",
"1",
"<=",
"as_int",
"<=",
"9999",
"and",
"(",
"len",
"(",
"token",
")",
"==",
"2",
"or",
"len",
"(",
"token",
")",
"==",
"4",
")",
":",
"possibilities",
"[",
"Component",
".",
"YEAR",
"]",
"=",
"as_int",
"if",
"1",
"<=",
"as_int",
"<=",
"12",
":",
"possibilities",
"[",
"Component",
".",
"MONTH",
"]",
"=",
"as_int",
"if",
"1",
"<=",
"as_int",
"<=",
"31",
":",
"possibilities",
"[",
"Component",
".",
"DAY",
"]",
"=",
"as_int",
"if",
"mode",
"!=",
"Mode",
".",
"DATE",
":",
"if",
"0",
"<=",
"as_int",
"<=",
"23",
":",
"possibilities",
"[",
"Component",
".",
"HOUR",
"]",
"=",
"as_int",
"if",
"0",
"<=",
"as_int",
"<=",
"59",
":",
"possibilities",
"[",
"Component",
".",
"MINUTE",
"]",
"=",
"as_int",
"if",
"0",
"<=",
"as_int",
"<=",
"59",
":",
"possibilities",
"[",
"Component",
".",
"SECOND",
"]",
"=",
"as_int",
"if",
"len",
"(",
"token",
")",
"==",
"3",
"or",
"len",
"(",
"token",
")",
"==",
"6",
"or",
"len",
"(",
"token",
")",
"==",
"9",
":",
"nano",
"=",
"0",
"if",
"len",
"(",
"token",
")",
"==",
"3",
":",
"# millisecond precision",
"nano",
"=",
"as_int",
"*",
"1000000",
"elif",
"len",
"(",
"token",
")",
"==",
"6",
":",
"# microsecond precision",
"nano",
"=",
"as_int",
"*",
"1000",
"elif",
"len",
"(",
"token",
")",
"==",
"9",
":",
"nano",
"=",
"as_int",
"possibilities",
"[",
"Component",
".",
"NANO",
"]",
"=",
"nano",
"if",
"len",
"(",
"token",
")",
"==",
"4",
":",
"hour",
"=",
"as_int",
"//",
"100",
"minute",
"=",
"as_int",
"-",
"(",
"hour",
"*",
"100",
")",
"if",
"1",
"<=",
"hour",
"<=",
"24",
"and",
"1",
"<=",
"minute",
"<=",
"59",
":",
"possibilities",
"[",
"Component",
".",
"HOUR_AND_MINUTE",
"]",
"=",
"as_int",
"except",
"ValueError",
":",
"if",
"mode",
"!=",
"Mode",
".",
"TIME",
":",
"# could it be a month alias?",
"month",
"=",
"MONTHS_BY_ALIAS",
".",
"get",
"(",
"token",
",",
"None",
")",
"if",
"month",
"is",
"not",
"None",
":",
"possibilities",
"[",
"Component",
".",
"MONTH",
"]",
"=",
"month",
"if",
"mode",
"!=",
"Mode",
".",
"DATE",
":",
"# could it be an AM/PM marker?",
"is_am_marker",
"=",
"token",
"==",
"\"am\"",
"is_pm_marker",
"=",
"token",
"==",
"\"pm\"",
"if",
"is_am_marker",
"or",
"is_pm_marker",
":",
"possibilities",
"[",
"Component",
".",
"AM_PM",
"]",
"=",
"cls",
".",
"AM",
"if",
"is_am_marker",
"else",
"cls",
".",
"PM",
"# offset parsing is limited to Z meaning UTC for now",
"if",
"token",
"==",
"\"z\"",
":",
"possibilities",
"[",
"Component",
".",
"OFFSET",
"]",
"=",
"0",
"return",
"possibilities"
] | Returns all possible component types of a token without regard to its context. For example "26" could be year,
date or minute, but can't be a month or an hour.
:param token: the token to classify
:param mode: the parse mode
:return: the dict of possible types and values if token was of that type | [
"Returns",
"all",
"possible",
"component",
"types",
"of",
"a",
"token",
"without",
"regard",
"to",
"its",
"context",
".",
"For",
"example",
"26",
"could",
"be",
"year",
"date",
"or",
"minute",
"but",
"can",
"t",
"be",
"a",
"month",
"or",
"an",
"hour",
".",
":",
"param",
"token",
":",
"the",
"token",
"to",
"classify",
":",
"param",
"mode",
":",
"the",
"parse",
"mode",
":",
"return",
":",
"the",
"dict",
"of",
"possible",
"types",
"and",
"values",
"if",
"token",
"was",
"of",
"that",
"type"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/dates.py#L178-L239 |
rapidpro/expressions | python/temba_expressions/dates.py | DateParser._make_result | def _make_result(cls, values, now, timezone):
"""
Makes a date or datetime or time object from a map of component values
:param values: the component values
:param now: the current now
:param timezone: the current timezone
:return: the date, datetime, time or none if values are invalid
"""
date = None
time = None
if Component.MONTH in values:
year = cls._year_from_2digits(values.get(Component.YEAR, now.year), now.year)
month = values[Component.MONTH]
day = values.get(Component.DAY, 1)
try:
date = datetime.date(year, month, day)
except ValueError:
return None # not a valid date
if (Component.HOUR in values and Component.MINUTE in values) or Component.HOUR_AND_MINUTE in values:
if Component.HOUR_AND_MINUTE in values:
combined = values[Component.HOUR_AND_MINUTE]
hour = combined // 100
minute = combined - (hour * 100)
second = 0
nano = 0
else:
hour = values[Component.HOUR]
minute = values[Component.MINUTE]
second = values.get(Component.SECOND, 0)
nano = values.get(Component.NANO, 0)
if hour < 12 and values.get(Component.AM_PM) == cls.PM:
hour += 12
elif hour == 12 and values.get(Component.AM_PM) == cls.AM:
hour -= 12
try:
time = datetime.time(hour, minute, second, microsecond=nano // 1000)
except ValueError:
return None # not a valid time
if Component.OFFSET in values:
timezone = pytz.FixedOffset(values[Component.OFFSET] // 60)
if date is not None and time is not None:
return timezone.localize(datetime.datetime.combine(date, time))
elif date is not None:
return date
elif time is not None:
return time
else:
return None | python | def _make_result(cls, values, now, timezone):
"""
Makes a date or datetime or time object from a map of component values
:param values: the component values
:param now: the current now
:param timezone: the current timezone
:return: the date, datetime, time or none if values are invalid
"""
date = None
time = None
if Component.MONTH in values:
year = cls._year_from_2digits(values.get(Component.YEAR, now.year), now.year)
month = values[Component.MONTH]
day = values.get(Component.DAY, 1)
try:
date = datetime.date(year, month, day)
except ValueError:
return None # not a valid date
if (Component.HOUR in values and Component.MINUTE in values) or Component.HOUR_AND_MINUTE in values:
if Component.HOUR_AND_MINUTE in values:
combined = values[Component.HOUR_AND_MINUTE]
hour = combined // 100
minute = combined - (hour * 100)
second = 0
nano = 0
else:
hour = values[Component.HOUR]
minute = values[Component.MINUTE]
second = values.get(Component.SECOND, 0)
nano = values.get(Component.NANO, 0)
if hour < 12 and values.get(Component.AM_PM) == cls.PM:
hour += 12
elif hour == 12 and values.get(Component.AM_PM) == cls.AM:
hour -= 12
try:
time = datetime.time(hour, minute, second, microsecond=nano // 1000)
except ValueError:
return None # not a valid time
if Component.OFFSET in values:
timezone = pytz.FixedOffset(values[Component.OFFSET] // 60)
if date is not None and time is not None:
return timezone.localize(datetime.datetime.combine(date, time))
elif date is not None:
return date
elif time is not None:
return time
else:
return None | [
"def",
"_make_result",
"(",
"cls",
",",
"values",
",",
"now",
",",
"timezone",
")",
":",
"date",
"=",
"None",
"time",
"=",
"None",
"if",
"Component",
".",
"MONTH",
"in",
"values",
":",
"year",
"=",
"cls",
".",
"_year_from_2digits",
"(",
"values",
".",
"get",
"(",
"Component",
".",
"YEAR",
",",
"now",
".",
"year",
")",
",",
"now",
".",
"year",
")",
"month",
"=",
"values",
"[",
"Component",
".",
"MONTH",
"]",
"day",
"=",
"values",
".",
"get",
"(",
"Component",
".",
"DAY",
",",
"1",
")",
"try",
":",
"date",
"=",
"datetime",
".",
"date",
"(",
"year",
",",
"month",
",",
"day",
")",
"except",
"ValueError",
":",
"return",
"None",
"# not a valid date",
"if",
"(",
"Component",
".",
"HOUR",
"in",
"values",
"and",
"Component",
".",
"MINUTE",
"in",
"values",
")",
"or",
"Component",
".",
"HOUR_AND_MINUTE",
"in",
"values",
":",
"if",
"Component",
".",
"HOUR_AND_MINUTE",
"in",
"values",
":",
"combined",
"=",
"values",
"[",
"Component",
".",
"HOUR_AND_MINUTE",
"]",
"hour",
"=",
"combined",
"//",
"100",
"minute",
"=",
"combined",
"-",
"(",
"hour",
"*",
"100",
")",
"second",
"=",
"0",
"nano",
"=",
"0",
"else",
":",
"hour",
"=",
"values",
"[",
"Component",
".",
"HOUR",
"]",
"minute",
"=",
"values",
"[",
"Component",
".",
"MINUTE",
"]",
"second",
"=",
"values",
".",
"get",
"(",
"Component",
".",
"SECOND",
",",
"0",
")",
"nano",
"=",
"values",
".",
"get",
"(",
"Component",
".",
"NANO",
",",
"0",
")",
"if",
"hour",
"<",
"12",
"and",
"values",
".",
"get",
"(",
"Component",
".",
"AM_PM",
")",
"==",
"cls",
".",
"PM",
":",
"hour",
"+=",
"12",
"elif",
"hour",
"==",
"12",
"and",
"values",
".",
"get",
"(",
"Component",
".",
"AM_PM",
")",
"==",
"cls",
".",
"AM",
":",
"hour",
"-=",
"12",
"try",
":",
"time",
"=",
"datetime",
".",
"time",
"(",
"hour",
",",
"minute",
",",
"second",
",",
"microsecond",
"=",
"nano",
"//",
"1000",
")",
"except",
"ValueError",
":",
"return",
"None",
"# not a valid time",
"if",
"Component",
".",
"OFFSET",
"in",
"values",
":",
"timezone",
"=",
"pytz",
".",
"FixedOffset",
"(",
"values",
"[",
"Component",
".",
"OFFSET",
"]",
"//",
"60",
")",
"if",
"date",
"is",
"not",
"None",
"and",
"time",
"is",
"not",
"None",
":",
"return",
"timezone",
".",
"localize",
"(",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"date",
",",
"time",
")",
")",
"elif",
"date",
"is",
"not",
"None",
":",
"return",
"date",
"elif",
"time",
"is",
"not",
"None",
":",
"return",
"time",
"else",
":",
"return",
"None"
] | Makes a date or datetime or time object from a map of component values
:param values: the component values
:param now: the current now
:param timezone: the current timezone
:return: the date, datetime, time or none if values are invalid | [
"Makes",
"a",
"date",
"or",
"datetime",
"or",
"time",
"object",
"from",
"a",
"map",
"of",
"component",
"values",
":",
"param",
"values",
":",
"the",
"component",
"values",
":",
"param",
"now",
":",
"the",
"current",
"now",
":",
"param",
"timezone",
":",
"the",
"current",
"timezone",
":",
"return",
":",
"the",
"date",
"datetime",
"time",
"or",
"none",
"if",
"values",
"are",
"invalid"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/dates.py#L242-L295 |
rapidpro/expressions | python/temba_expressions/dates.py | DateParser._year_from_2digits | def _year_from_2digits(short_year, current_year):
"""
Converts a relative 2-digit year to an absolute 4-digit year
:param short_year: the relative year
:param current_year: the current year
:return: the absolute year
"""
if short_year < 100:
short_year += current_year - (current_year % 100)
if abs(short_year - current_year) >= 50:
if short_year < current_year:
return short_year + 100
else:
return short_year - 100
return short_year | python | def _year_from_2digits(short_year, current_year):
"""
Converts a relative 2-digit year to an absolute 4-digit year
:param short_year: the relative year
:param current_year: the current year
:return: the absolute year
"""
if short_year < 100:
short_year += current_year - (current_year % 100)
if abs(short_year - current_year) >= 50:
if short_year < current_year:
return short_year + 100
else:
return short_year - 100
return short_year | [
"def",
"_year_from_2digits",
"(",
"short_year",
",",
"current_year",
")",
":",
"if",
"short_year",
"<",
"100",
":",
"short_year",
"+=",
"current_year",
"-",
"(",
"current_year",
"%",
"100",
")",
"if",
"abs",
"(",
"short_year",
"-",
"current_year",
")",
">=",
"50",
":",
"if",
"short_year",
"<",
"current_year",
":",
"return",
"short_year",
"+",
"100",
"else",
":",
"return",
"short_year",
"-",
"100",
"return",
"short_year"
] | Converts a relative 2-digit year to an absolute 4-digit year
:param short_year: the relative year
:param current_year: the current year
:return: the absolute year | [
"Converts",
"a",
"relative",
"2",
"-",
"digit",
"year",
"to",
"an",
"absolute",
"4",
"-",
"digit",
"year",
":",
"param",
"short_year",
":",
"the",
"relative",
"year",
":",
"param",
"current_year",
":",
"the",
"current",
"year",
":",
"return",
":",
"the",
"absolute",
"year"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/dates.py#L298-L312 |
anteater/anteater | anteater/src/patch_scan.py | prepare_patchset | def prepare_patchset(project, patchset, binaries, ips, urls):
""" Create black/white lists and default / project waivers
and iterates over patchset file """
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(project)
# Get file content black list and project waivers
flag_list, ignore_list = lists.file_content_list(project)
# Get URL Ignore Lists
url_ignore = lists.url_ignore(project)
# Get URL Ignore Lists
ip_ignore = lists.ip_ignore(project)
# Get File Ignore Lists
file_ignore = lists.file_ignore()
# Get Directory Ignore Lists
ignore_directories = lists.ignore_directories(project)
if binaries or ips or urls:
try:
apikey = os.environ["VT_KEY"]
except KeyError:
logger.error("Please set your virustotal.com API key as an environment variable")
sys.exit(1)
try:
vt_rate_type = config.get('config', 'vt_rate_type')
except six.moves.configparser.NoSectionError:
logger.error("A config section is required for vt_rate_type with a public | private option ")
sys.exit(1)
patten = re.compile(r'\bpublic\b|\bprivate\b')
if not patten.match(vt_rate_type):
logger.error("Unrecognized %s option for vt_rate_type", vt_rate_type)
sys.exit(1)
else:
apikey = ""
# Open patch set to get file list
try:
fo = open(patchset, 'r')
lines = fo.readlines()
except IOError:
logger.error('%s does not exist', patchset)
sys.exit(1)
for line in lines:
patch_file = line.strip('\n')
# Perform binary and file / content checks
scan_patch(project, patch_file, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey)
# Process final result
process_failure(project) | python | def prepare_patchset(project, patchset, binaries, ips, urls):
""" Create black/white lists and default / project waivers
and iterates over patchset file """
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(project)
# Get file content black list and project waivers
flag_list, ignore_list = lists.file_content_list(project)
# Get URL Ignore Lists
url_ignore = lists.url_ignore(project)
# Get URL Ignore Lists
ip_ignore = lists.ip_ignore(project)
# Get File Ignore Lists
file_ignore = lists.file_ignore()
# Get Directory Ignore Lists
ignore_directories = lists.ignore_directories(project)
if binaries or ips or urls:
try:
apikey = os.environ["VT_KEY"]
except KeyError:
logger.error("Please set your virustotal.com API key as an environment variable")
sys.exit(1)
try:
vt_rate_type = config.get('config', 'vt_rate_type')
except six.moves.configparser.NoSectionError:
logger.error("A config section is required for vt_rate_type with a public | private option ")
sys.exit(1)
patten = re.compile(r'\bpublic\b|\bprivate\b')
if not patten.match(vt_rate_type):
logger.error("Unrecognized %s option for vt_rate_type", vt_rate_type)
sys.exit(1)
else:
apikey = ""
# Open patch set to get file list
try:
fo = open(patchset, 'r')
lines = fo.readlines()
except IOError:
logger.error('%s does not exist', patchset)
sys.exit(1)
for line in lines:
patch_file = line.strip('\n')
# Perform binary and file / content checks
scan_patch(project, patch_file, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey)
# Process final result
process_failure(project) | [
"def",
"prepare_patchset",
"(",
"project",
",",
"patchset",
",",
"binaries",
",",
"ips",
",",
"urls",
")",
":",
"# Get Various Lists / Project Waivers",
"lists",
"=",
"get_lists",
".",
"GetLists",
"(",
")",
"# Get file name black list and project waivers",
"file_audit_list",
",",
"file_audit_project_list",
"=",
"lists",
".",
"file_audit_list",
"(",
"project",
")",
"# Get file content black list and project waivers",
"flag_list",
",",
"ignore_list",
"=",
"lists",
".",
"file_content_list",
"(",
"project",
")",
"# Get URL Ignore Lists",
"url_ignore",
"=",
"lists",
".",
"url_ignore",
"(",
"project",
")",
"# Get URL Ignore Lists",
"ip_ignore",
"=",
"lists",
".",
"ip_ignore",
"(",
"project",
")",
"# Get File Ignore Lists",
"file_ignore",
"=",
"lists",
".",
"file_ignore",
"(",
")",
"# Get Directory Ignore Lists",
"ignore_directories",
"=",
"lists",
".",
"ignore_directories",
"(",
"project",
")",
"if",
"binaries",
"or",
"ips",
"or",
"urls",
":",
"try",
":",
"apikey",
"=",
"os",
".",
"environ",
"[",
"\"VT_KEY\"",
"]",
"except",
"KeyError",
":",
"logger",
".",
"error",
"(",
"\"Please set your virustotal.com API key as an environment variable\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"vt_rate_type",
"=",
"config",
".",
"get",
"(",
"'config'",
",",
"'vt_rate_type'",
")",
"except",
"six",
".",
"moves",
".",
"configparser",
".",
"NoSectionError",
":",
"logger",
".",
"error",
"(",
"\"A config section is required for vt_rate_type with a public | private option \"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"patten",
"=",
"re",
".",
"compile",
"(",
"r'\\bpublic\\b|\\bprivate\\b'",
")",
"if",
"not",
"patten",
".",
"match",
"(",
"vt_rate_type",
")",
":",
"logger",
".",
"error",
"(",
"\"Unrecognized %s option for vt_rate_type\"",
",",
"vt_rate_type",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"apikey",
"=",
"\"\"",
"# Open patch set to get file list",
"try",
":",
"fo",
"=",
"open",
"(",
"patchset",
",",
"'r'",
")",
"lines",
"=",
"fo",
".",
"readlines",
"(",
")",
"except",
"IOError",
":",
"logger",
".",
"error",
"(",
"'%s does not exist'",
",",
"patchset",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"for",
"line",
"in",
"lines",
":",
"patch_file",
"=",
"line",
".",
"strip",
"(",
"'\\n'",
")",
"# Perform binary and file / content checks",
"scan_patch",
"(",
"project",
",",
"patch_file",
",",
"binaries",
",",
"ips",
",",
"urls",
",",
"file_audit_list",
",",
"file_audit_project_list",
",",
"flag_list",
",",
"ignore_list",
",",
"file_ignore",
",",
"ignore_directories",
",",
"url_ignore",
",",
"ip_ignore",
",",
"apikey",
")",
"# Process final result",
"process_failure",
"(",
"project",
")"
] | Create black/white lists and default / project waivers
and iterates over patchset file | [
"Create",
"black",
"/",
"white",
"lists",
"and",
"default",
"/",
"project",
"waivers",
"and",
"iterates",
"over",
"patchset",
"file"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L42-L101 |
anteater/anteater | anteater/src/patch_scan.py | scan_patch | def scan_patch(project, patch_file, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, file_ignore,
ignore_directories, url_ignore, ip_ignore, apikey):
"""
Scan actions for each commited file in patch set
"""
global failure
split_path = patch_file.split(project + '/', 1)[-1]
if not any(x in split_path for x in ignore_directories):
if is_binary(patch_file) and binaries:
hashlist = get_lists.GetLists()
binary_hash = hashlist.binary_hash(project, split_path)
with open(patch_file, 'rb') as afile:
hasher = hashlib.sha256()
buf = afile.read()
hasher.update(buf)
sha256hash = hasher.hexdigest()
if sha256hash in binary_hash:
logger.info('Found matching file hash for: %s',
patch_file)
else:
logger.info('sha256hash: %s', sha256hash)
logger.error('Non Whitelisted Binary file: %s',
patch_file)
scan_binary(patch_file, project, sha256hash, apikey)
failure = True
with open(reports_dir + "binaries-" + project + ".log", "a") \
as gate_report:
gate_report.write('Non Whitelisted Binary file: {0}\n'.
format(patch_file))
else:
# Check file names / extensions
if file_audit_list.search(patch_file) and not \
file_audit_project_list.search(patch_file):
match = file_audit_list.search(patch_file)
logger.error('Blacklisted file: %s', patch_file)
logger.error('Matched String: %s', match.group())
failure = True
with open(reports_dir + "file-names_" + project + ".log", "a") \
as gate_report:
gate_report.write('Blacklisted file: {0}\n'.
format(patch_file))
gate_report.write('Matched String: {0}'.
format(match.group()))
# Open file to check for blacklisted content
if not is_binary(patch_file):
try:
fo = open(patch_file, 'r')
lines = fo.readlines()
file_exists = True
except IOError:
file_exists = False
if file_exists and not patch_file.endswith(tuple(file_ignore)):
for line in lines:
# Find IP Addresses and send for report to Virus Total
if ips:
ipaddr = re.findall(r'(?:\d{1,3}\.)+(?:\d{1,3})', line)
if ipaddr:
ipaddr = ipaddr[0]
if re.search(ip_ignore, ipaddr):
logger.info('%s is in IP ignore list.', ipaddr)
else:
try:
ipaddress.ip_address(ipaddr).is_global
scan_ipaddr(ipaddr, apikey)
except:
pass # Ok to pass here, as this captures the odd string which is not an IP Address
# Check for URLs and send for report to Virus Total
if urls:
url = re.search("(?P<url>https?://[^\s]+)", line) or re.search("(?P<url>www[^\s]+)", line)
if url:
url = url.group("url")
if re.search(url_ignore, url):
logger.info('%s is in URL ignore list.', url)
else:
scan_url(url, apikey)
# Perform search within text files
for key, value in flag_list.items():
regex = value['regex']
desc = value['desc']
if re.search(regex, line) and not re.search(
ignore_list, line):
logger.error('File contains violation: %s', patch_file)
logger.error('Flagged Content: %s', line.rstrip())
logger.error('Rationale: %s', desc.rstrip())
failure = True
with open(reports_dir + "contents_" + project + ".log", "a") as gate_report:
gate_report.write('File contains violation: {0}\n'.format(patch_file))
gate_report.write('Flagged Content: {0}'.format(line))
gate_report.write('Matched Regular Exp: {0}\n'.format(regex))
gate_report.write('Rationale: {0}\n'.format(desc.rstrip())) | python | def scan_patch(project, patch_file, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, file_ignore,
ignore_directories, url_ignore, ip_ignore, apikey):
"""
Scan actions for each commited file in patch set
"""
global failure
split_path = patch_file.split(project + '/', 1)[-1]
if not any(x in split_path for x in ignore_directories):
if is_binary(patch_file) and binaries:
hashlist = get_lists.GetLists()
binary_hash = hashlist.binary_hash(project, split_path)
with open(patch_file, 'rb') as afile:
hasher = hashlib.sha256()
buf = afile.read()
hasher.update(buf)
sha256hash = hasher.hexdigest()
if sha256hash in binary_hash:
logger.info('Found matching file hash for: %s',
patch_file)
else:
logger.info('sha256hash: %s', sha256hash)
logger.error('Non Whitelisted Binary file: %s',
patch_file)
scan_binary(patch_file, project, sha256hash, apikey)
failure = True
with open(reports_dir + "binaries-" + project + ".log", "a") \
as gate_report:
gate_report.write('Non Whitelisted Binary file: {0}\n'.
format(patch_file))
else:
# Check file names / extensions
if file_audit_list.search(patch_file) and not \
file_audit_project_list.search(patch_file):
match = file_audit_list.search(patch_file)
logger.error('Blacklisted file: %s', patch_file)
logger.error('Matched String: %s', match.group())
failure = True
with open(reports_dir + "file-names_" + project + ".log", "a") \
as gate_report:
gate_report.write('Blacklisted file: {0}\n'.
format(patch_file))
gate_report.write('Matched String: {0}'.
format(match.group()))
# Open file to check for blacklisted content
if not is_binary(patch_file):
try:
fo = open(patch_file, 'r')
lines = fo.readlines()
file_exists = True
except IOError:
file_exists = False
if file_exists and not patch_file.endswith(tuple(file_ignore)):
for line in lines:
# Find IP Addresses and send for report to Virus Total
if ips:
ipaddr = re.findall(r'(?:\d{1,3}\.)+(?:\d{1,3})', line)
if ipaddr:
ipaddr = ipaddr[0]
if re.search(ip_ignore, ipaddr):
logger.info('%s is in IP ignore list.', ipaddr)
else:
try:
ipaddress.ip_address(ipaddr).is_global
scan_ipaddr(ipaddr, apikey)
except:
pass # Ok to pass here, as this captures the odd string which is not an IP Address
# Check for URLs and send for report to Virus Total
if urls:
url = re.search("(?P<url>https?://[^\s]+)", line) or re.search("(?P<url>www[^\s]+)", line)
if url:
url = url.group("url")
if re.search(url_ignore, url):
logger.info('%s is in URL ignore list.', url)
else:
scan_url(url, apikey)
# Perform search within text files
for key, value in flag_list.items():
regex = value['regex']
desc = value['desc']
if re.search(regex, line) and not re.search(
ignore_list, line):
logger.error('File contains violation: %s', patch_file)
logger.error('Flagged Content: %s', line.rstrip())
logger.error('Rationale: %s', desc.rstrip())
failure = True
with open(reports_dir + "contents_" + project + ".log", "a") as gate_report:
gate_report.write('File contains violation: {0}\n'.format(patch_file))
gate_report.write('Flagged Content: {0}'.format(line))
gate_report.write('Matched Regular Exp: {0}\n'.format(regex))
gate_report.write('Rationale: {0}\n'.format(desc.rstrip())) | [
"def",
"scan_patch",
"(",
"project",
",",
"patch_file",
",",
"binaries",
",",
"ips",
",",
"urls",
",",
"file_audit_list",
",",
"file_audit_project_list",
",",
"flag_list",
",",
"ignore_list",
",",
"file_ignore",
",",
"ignore_directories",
",",
"url_ignore",
",",
"ip_ignore",
",",
"apikey",
")",
":",
"global",
"failure",
"split_path",
"=",
"patch_file",
".",
"split",
"(",
"project",
"+",
"'/'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"if",
"not",
"any",
"(",
"x",
"in",
"split_path",
"for",
"x",
"in",
"ignore_directories",
")",
":",
"if",
"is_binary",
"(",
"patch_file",
")",
"and",
"binaries",
":",
"hashlist",
"=",
"get_lists",
".",
"GetLists",
"(",
")",
"binary_hash",
"=",
"hashlist",
".",
"binary_hash",
"(",
"project",
",",
"split_path",
")",
"with",
"open",
"(",
"patch_file",
",",
"'rb'",
")",
"as",
"afile",
":",
"hasher",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"buf",
"=",
"afile",
".",
"read",
"(",
")",
"hasher",
".",
"update",
"(",
"buf",
")",
"sha256hash",
"=",
"hasher",
".",
"hexdigest",
"(",
")",
"if",
"sha256hash",
"in",
"binary_hash",
":",
"logger",
".",
"info",
"(",
"'Found matching file hash for: %s'",
",",
"patch_file",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'sha256hash: %s'",
",",
"sha256hash",
")",
"logger",
".",
"error",
"(",
"'Non Whitelisted Binary file: %s'",
",",
"patch_file",
")",
"scan_binary",
"(",
"patch_file",
",",
"project",
",",
"sha256hash",
",",
"apikey",
")",
"failure",
"=",
"True",
"with",
"open",
"(",
"reports_dir",
"+",
"\"binaries-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'Non Whitelisted Binary file: {0}\\n'",
".",
"format",
"(",
"patch_file",
")",
")",
"else",
":",
"# Check file names / extensions",
"if",
"file_audit_list",
".",
"search",
"(",
"patch_file",
")",
"and",
"not",
"file_audit_project_list",
".",
"search",
"(",
"patch_file",
")",
":",
"match",
"=",
"file_audit_list",
".",
"search",
"(",
"patch_file",
")",
"logger",
".",
"error",
"(",
"'Blacklisted file: %s'",
",",
"patch_file",
")",
"logger",
".",
"error",
"(",
"'Matched String: %s'",
",",
"match",
".",
"group",
"(",
")",
")",
"failure",
"=",
"True",
"with",
"open",
"(",
"reports_dir",
"+",
"\"file-names_\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'Blacklisted file: {0}\\n'",
".",
"format",
"(",
"patch_file",
")",
")",
"gate_report",
".",
"write",
"(",
"'Matched String: {0}'",
".",
"format",
"(",
"match",
".",
"group",
"(",
")",
")",
")",
"# Open file to check for blacklisted content",
"if",
"not",
"is_binary",
"(",
"patch_file",
")",
":",
"try",
":",
"fo",
"=",
"open",
"(",
"patch_file",
",",
"'r'",
")",
"lines",
"=",
"fo",
".",
"readlines",
"(",
")",
"file_exists",
"=",
"True",
"except",
"IOError",
":",
"file_exists",
"=",
"False",
"if",
"file_exists",
"and",
"not",
"patch_file",
".",
"endswith",
"(",
"tuple",
"(",
"file_ignore",
")",
")",
":",
"for",
"line",
"in",
"lines",
":",
"# Find IP Addresses and send for report to Virus Total",
"if",
"ips",
":",
"ipaddr",
"=",
"re",
".",
"findall",
"(",
"r'(?:\\d{1,3}\\.)+(?:\\d{1,3})'",
",",
"line",
")",
"if",
"ipaddr",
":",
"ipaddr",
"=",
"ipaddr",
"[",
"0",
"]",
"if",
"re",
".",
"search",
"(",
"ip_ignore",
",",
"ipaddr",
")",
":",
"logger",
".",
"info",
"(",
"'%s is in IP ignore list.'",
",",
"ipaddr",
")",
"else",
":",
"try",
":",
"ipaddress",
".",
"ip_address",
"(",
"ipaddr",
")",
".",
"is_global",
"scan_ipaddr",
"(",
"ipaddr",
",",
"apikey",
")",
"except",
":",
"pass",
"# Ok to pass here, as this captures the odd string which is not an IP Address",
"# Check for URLs and send for report to Virus Total",
"if",
"urls",
":",
"url",
"=",
"re",
".",
"search",
"(",
"\"(?P<url>https?://[^\\s]+)\"",
",",
"line",
")",
"or",
"re",
".",
"search",
"(",
"\"(?P<url>www[^\\s]+)\"",
",",
"line",
")",
"if",
"url",
":",
"url",
"=",
"url",
".",
"group",
"(",
"\"url\"",
")",
"if",
"re",
".",
"search",
"(",
"url_ignore",
",",
"url",
")",
":",
"logger",
".",
"info",
"(",
"'%s is in URL ignore list.'",
",",
"url",
")",
"else",
":",
"scan_url",
"(",
"url",
",",
"apikey",
")",
"# Perform search within text files",
"for",
"key",
",",
"value",
"in",
"flag_list",
".",
"items",
"(",
")",
":",
"regex",
"=",
"value",
"[",
"'regex'",
"]",
"desc",
"=",
"value",
"[",
"'desc'",
"]",
"if",
"re",
".",
"search",
"(",
"regex",
",",
"line",
")",
"and",
"not",
"re",
".",
"search",
"(",
"ignore_list",
",",
"line",
")",
":",
"logger",
".",
"error",
"(",
"'File contains violation: %s'",
",",
"patch_file",
")",
"logger",
".",
"error",
"(",
"'Flagged Content: %s'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"logger",
".",
"error",
"(",
"'Rationale: %s'",
",",
"desc",
".",
"rstrip",
"(",
")",
")",
"failure",
"=",
"True",
"with",
"open",
"(",
"reports_dir",
"+",
"\"contents_\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'File contains violation: {0}\\n'",
".",
"format",
"(",
"patch_file",
")",
")",
"gate_report",
".",
"write",
"(",
"'Flagged Content: {0}'",
".",
"format",
"(",
"line",
")",
")",
"gate_report",
".",
"write",
"(",
"'Matched Regular Exp: {0}\\n'",
".",
"format",
"(",
"regex",
")",
")",
"gate_report",
".",
"write",
"(",
"'Rationale: {0}\\n'",
".",
"format",
"(",
"desc",
".",
"rstrip",
"(",
")",
")",
")"
] | Scan actions for each commited file in patch set | [
"Scan",
"actions",
"for",
"each",
"commited",
"file",
"in",
"patch",
"set"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L104-L206 |
anteater/anteater | anteater/src/patch_scan.py | scan_binary | def scan_binary(patch_file, project, sha256hash, apikey):
"""
Sends Binary (sha256hash) to Virus Total API
"""
v_api = virus_total.VirusTotal()
while True:
binary_report = v_api.binary_report(sha256hash, apikey)
response_code = binary_report['response_code']
# report does not exist, need to scan
if response_code == 0:
logger.info('Performing new scan of %s.', patch_file)
scan_file = v_api.scan_file(patch_file, apikey)
logger.info('VirusTotal Response: %s', scan_file['verbose_msg'])
logger.info('Report will be rendered at: %s', scan_file['permalink'])
binary_report = v_api.binary_report(sha256hash, apikey)
# Item is still queued
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 1:
logger.info('Report found, job complete.')
break
positives = binary_report['positives']
if positives == 0:
negative_report(binary_report, sha256hash, project, patch_file)
else:
positive_report(binary_report, sha256hash, project, patch_file) | python | def scan_binary(patch_file, project, sha256hash, apikey):
"""
Sends Binary (sha256hash) to Virus Total API
"""
v_api = virus_total.VirusTotal()
while True:
binary_report = v_api.binary_report(sha256hash, apikey)
response_code = binary_report['response_code']
# report does not exist, need to scan
if response_code == 0:
logger.info('Performing new scan of %s.', patch_file)
scan_file = v_api.scan_file(patch_file, apikey)
logger.info('VirusTotal Response: %s', scan_file['verbose_msg'])
logger.info('Report will be rendered at: %s', scan_file['permalink'])
binary_report = v_api.binary_report(sha256hash, apikey)
# Item is still queued
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 1:
logger.info('Report found, job complete.')
break
positives = binary_report['positives']
if positives == 0:
negative_report(binary_report, sha256hash, project, patch_file)
else:
positive_report(binary_report, sha256hash, project, patch_file) | [
"def",
"scan_binary",
"(",
"patch_file",
",",
"project",
",",
"sha256hash",
",",
"apikey",
")",
":",
"v_api",
"=",
"virus_total",
".",
"VirusTotal",
"(",
")",
"while",
"True",
":",
"binary_report",
"=",
"v_api",
".",
"binary_report",
"(",
"sha256hash",
",",
"apikey",
")",
"response_code",
"=",
"binary_report",
"[",
"'response_code'",
"]",
"# report does not exist, need to scan",
"if",
"response_code",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"'Performing new scan of %s.'",
",",
"patch_file",
")",
"scan_file",
"=",
"v_api",
".",
"scan_file",
"(",
"patch_file",
",",
"apikey",
")",
"logger",
".",
"info",
"(",
"'VirusTotal Response: %s'",
",",
"scan_file",
"[",
"'verbose_msg'",
"]",
")",
"logger",
".",
"info",
"(",
"'Report will be rendered at: %s'",
",",
"scan_file",
"[",
"'permalink'",
"]",
")",
"binary_report",
"=",
"v_api",
".",
"binary_report",
"(",
"sha256hash",
",",
"apikey",
")",
"# Item is still queued",
"if",
"response_code",
"==",
"-",
"2",
":",
"logger",
".",
"info",
"(",
"'Report job still queued..'",
")",
"if",
"response_code",
"==",
"1",
":",
"logger",
".",
"info",
"(",
"'Report found, job complete.'",
")",
"break",
"positives",
"=",
"binary_report",
"[",
"'positives'",
"]",
"if",
"positives",
"==",
"0",
":",
"negative_report",
"(",
"binary_report",
",",
"sha256hash",
",",
"project",
",",
"patch_file",
")",
"else",
":",
"positive_report",
"(",
"binary_report",
",",
"sha256hash",
",",
"project",
",",
"patch_file",
")"
] | Sends Binary (sha256hash) to Virus Total API | [
"Sends",
"Binary",
"(",
"sha256hash",
")",
"to",
"Virus",
"Total",
"API"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L209-L240 |
anteater/anteater | anteater/src/patch_scan.py | negative_report | def negative_report(binary_report, sha256hash, project, patch_file):
"""
If no match is made and file is clean
"""
report_url = binary_report['permalink']
scan_date = binary_report['scan_date']
logger.info('File scan date for %s shows a clean status on: %s', patch_file, scan_date)
logger.info('Full report avaliable here: %s', report_url)
logger.info('The following sha256 hash can be used in your %s.yaml file to suppress this scan:', project)
logger.info('%s', sha256hash)
with open(reports_dir + "binaries-" + project + ".log", "a") as gate_report:
gate_report.write('Non Whitelisted Binary: {}\n'.format(patch_file))
gate_report.write('File scan date for {} shows a clean status on {}\n'.format(patch_file, scan_date))
gate_report.write('The following sha256 hash can be used in your {}.yaml file to suppress this scan:\n'.format(project))
gate_report.write('{}\n'.format(sha256hash)) | python | def negative_report(binary_report, sha256hash, project, patch_file):
"""
If no match is made and file is clean
"""
report_url = binary_report['permalink']
scan_date = binary_report['scan_date']
logger.info('File scan date for %s shows a clean status on: %s', patch_file, scan_date)
logger.info('Full report avaliable here: %s', report_url)
logger.info('The following sha256 hash can be used in your %s.yaml file to suppress this scan:', project)
logger.info('%s', sha256hash)
with open(reports_dir + "binaries-" + project + ".log", "a") as gate_report:
gate_report.write('Non Whitelisted Binary: {}\n'.format(patch_file))
gate_report.write('File scan date for {} shows a clean status on {}\n'.format(patch_file, scan_date))
gate_report.write('The following sha256 hash can be used in your {}.yaml file to suppress this scan:\n'.format(project))
gate_report.write('{}\n'.format(sha256hash)) | [
"def",
"negative_report",
"(",
"binary_report",
",",
"sha256hash",
",",
"project",
",",
"patch_file",
")",
":",
"report_url",
"=",
"binary_report",
"[",
"'permalink'",
"]",
"scan_date",
"=",
"binary_report",
"[",
"'scan_date'",
"]",
"logger",
".",
"info",
"(",
"'File scan date for %s shows a clean status on: %s'",
",",
"patch_file",
",",
"scan_date",
")",
"logger",
".",
"info",
"(",
"'Full report avaliable here: %s'",
",",
"report_url",
")",
"logger",
".",
"info",
"(",
"'The following sha256 hash can be used in your %s.yaml file to suppress this scan:'",
",",
"project",
")",
"logger",
".",
"info",
"(",
"'%s'",
",",
"sha256hash",
")",
"with",
"open",
"(",
"reports_dir",
"+",
"\"binaries-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'Non Whitelisted Binary: {}\\n'",
".",
"format",
"(",
"patch_file",
")",
")",
"gate_report",
".",
"write",
"(",
"'File scan date for {} shows a clean status on {}\\n'",
".",
"format",
"(",
"patch_file",
",",
"scan_date",
")",
")",
"gate_report",
".",
"write",
"(",
"'The following sha256 hash can be used in your {}.yaml file to suppress this scan:\\n'",
".",
"format",
"(",
"project",
")",
")",
"gate_report",
".",
"write",
"(",
"'{}\\n'",
".",
"format",
"(",
"sha256hash",
")",
")"
] | If no match is made and file is clean | [
"If",
"no",
"match",
"is",
"made",
"and",
"file",
"is",
"clean"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L243-L257 |
anteater/anteater | anteater/src/patch_scan.py | positive_report | def positive_report(binary_report, sha256hash, project, patch_file):
"""
If a Positive match is found
"""
failure = True
report_url = binary_report['permalink']
scan_date = binary_report['scan_date']
logger.error("Virus Found!")
logger.info('File scan date for %s shows a infected status on: %s', patch_file, scan_date)
logger.info('Full report avaliable here: %s', report_url) | python | def positive_report(binary_report, sha256hash, project, patch_file):
"""
If a Positive match is found
"""
failure = True
report_url = binary_report['permalink']
scan_date = binary_report['scan_date']
logger.error("Virus Found!")
logger.info('File scan date for %s shows a infected status on: %s', patch_file, scan_date)
logger.info('Full report avaliable here: %s', report_url) | [
"def",
"positive_report",
"(",
"binary_report",
",",
"sha256hash",
",",
"project",
",",
"patch_file",
")",
":",
"failure",
"=",
"True",
"report_url",
"=",
"binary_report",
"[",
"'permalink'",
"]",
"scan_date",
"=",
"binary_report",
"[",
"'scan_date'",
"]",
"logger",
".",
"error",
"(",
"\"Virus Found!\"",
")",
"logger",
".",
"info",
"(",
"'File scan date for %s shows a infected status on: %s'",
",",
"patch_file",
",",
"scan_date",
")",
"logger",
".",
"info",
"(",
"'Full report avaliable here: %s'",
",",
"report_url",
")"
] | If a Positive match is found | [
"If",
"a",
"Positive",
"match",
"is",
"found"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L260-L269 |
anteater/anteater | anteater/src/patch_scan.py | scan_ipaddr | def scan_ipaddr(ipaddr, apikey):
"""
If an IP Address is found, scan it
"""
logger.info('Query VirusTotal API for Public IP Found: %s', ipaddr)
v_api = virus_total.VirusTotal()
scan_ip = v_api.send_ip(ipaddr, apikey)
response_code = scan_ip['response_code']
verbose_msg = scan_ip['verbose_msg']
urls = scan_ip['detected_urls']
if urls:
failure = True
logger.error('%s has been known to resolve to malicious urls', ipaddr)
for url in urls:
logger.error('%s on date: %s', url['url'], url['scan_date'])
else:
logger.info('%s has no record of resolving to malicious urls', ipaddr) | python | def scan_ipaddr(ipaddr, apikey):
"""
If an IP Address is found, scan it
"""
logger.info('Query VirusTotal API for Public IP Found: %s', ipaddr)
v_api = virus_total.VirusTotal()
scan_ip = v_api.send_ip(ipaddr, apikey)
response_code = scan_ip['response_code']
verbose_msg = scan_ip['verbose_msg']
urls = scan_ip['detected_urls']
if urls:
failure = True
logger.error('%s has been known to resolve to malicious urls', ipaddr)
for url in urls:
logger.error('%s on date: %s', url['url'], url['scan_date'])
else:
logger.info('%s has no record of resolving to malicious urls', ipaddr) | [
"def",
"scan_ipaddr",
"(",
"ipaddr",
",",
"apikey",
")",
":",
"logger",
".",
"info",
"(",
"'Query VirusTotal API for Public IP Found: %s'",
",",
"ipaddr",
")",
"v_api",
"=",
"virus_total",
".",
"VirusTotal",
"(",
")",
"scan_ip",
"=",
"v_api",
".",
"send_ip",
"(",
"ipaddr",
",",
"apikey",
")",
"response_code",
"=",
"scan_ip",
"[",
"'response_code'",
"]",
"verbose_msg",
"=",
"scan_ip",
"[",
"'verbose_msg'",
"]",
"urls",
"=",
"scan_ip",
"[",
"'detected_urls'",
"]",
"if",
"urls",
":",
"failure",
"=",
"True",
"logger",
".",
"error",
"(",
"'%s has been known to resolve to malicious urls'",
",",
"ipaddr",
")",
"for",
"url",
"in",
"urls",
":",
"logger",
".",
"error",
"(",
"'%s on date: %s'",
",",
"url",
"[",
"'url'",
"]",
",",
"url",
"[",
"'scan_date'",
"]",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'%s has no record of resolving to malicious urls'",
",",
"ipaddr",
")"
] | If an IP Address is found, scan it | [
"If",
"an",
"IP",
"Address",
"is",
"found",
"scan",
"it"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L272-L289 |
anteater/anteater | anteater/src/patch_scan.py | scan_url | def scan_url(url, apikey):
"""
If URL is found, scan it
"""
logger.info('Found what I believe is a URL: %s', url)
v_api = virus_total.VirusTotal()
while True:
url_report = v_api.url_report(url, apikey)
response_code = url_report['response_code']
# report does not exist, need to scan
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 0:
logger.info('No report for %s', url)
break
if response_code == 1:
logger.info('Report found, job complete for %s.', url)
break
try:
positives = url_report['positives']
if positives > 0:
for site, results in url_report['scans'].items():
if results['detected']:
detected = True
failure = True
logger.error("%s is recorded as a %s by %s", url, results['result'], site)
if detected:
logger.error("Full report available here: %s", url_report['permalink'])
else:
logger.info("%s is recorded as a clean", url)
except:
pass | python | def scan_url(url, apikey):
"""
If URL is found, scan it
"""
logger.info('Found what I believe is a URL: %s', url)
v_api = virus_total.VirusTotal()
while True:
url_report = v_api.url_report(url, apikey)
response_code = url_report['response_code']
# report does not exist, need to scan
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 0:
logger.info('No report for %s', url)
break
if response_code == 1:
logger.info('Report found, job complete for %s.', url)
break
try:
positives = url_report['positives']
if positives > 0:
for site, results in url_report['scans'].items():
if results['detected']:
detected = True
failure = True
logger.error("%s is recorded as a %s by %s", url, results['result'], site)
if detected:
logger.error("Full report available here: %s", url_report['permalink'])
else:
logger.info("%s is recorded as a clean", url)
except:
pass | [
"def",
"scan_url",
"(",
"url",
",",
"apikey",
")",
":",
"logger",
".",
"info",
"(",
"'Found what I believe is a URL: %s'",
",",
"url",
")",
"v_api",
"=",
"virus_total",
".",
"VirusTotal",
"(",
")",
"while",
"True",
":",
"url_report",
"=",
"v_api",
".",
"url_report",
"(",
"url",
",",
"apikey",
")",
"response_code",
"=",
"url_report",
"[",
"'response_code'",
"]",
"# report does not exist, need to scan",
"if",
"response_code",
"==",
"-",
"2",
":",
"logger",
".",
"info",
"(",
"'Report job still queued..'",
")",
"if",
"response_code",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"'No report for %s'",
",",
"url",
")",
"break",
"if",
"response_code",
"==",
"1",
":",
"logger",
".",
"info",
"(",
"'Report found, job complete for %s.'",
",",
"url",
")",
"break",
"try",
":",
"positives",
"=",
"url_report",
"[",
"'positives'",
"]",
"if",
"positives",
">",
"0",
":",
"for",
"site",
",",
"results",
"in",
"url_report",
"[",
"'scans'",
"]",
".",
"items",
"(",
")",
":",
"if",
"results",
"[",
"'detected'",
"]",
":",
"detected",
"=",
"True",
"failure",
"=",
"True",
"logger",
".",
"error",
"(",
"\"%s is recorded as a %s by %s\"",
",",
"url",
",",
"results",
"[",
"'result'",
"]",
",",
"site",
")",
"if",
"detected",
":",
"logger",
".",
"error",
"(",
"\"Full report available here: %s\"",
",",
"url_report",
"[",
"'permalink'",
"]",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"%s is recorded as a clean\"",
",",
"url",
")",
"except",
":",
"pass"
] | If URL is found, scan it | [
"If",
"URL",
"is",
"found",
"scan",
"it"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L292-L327 |
anteater/anteater | anteater/src/patch_scan.py | process_failure | def process_failure(project):
"""
If any scan operations register a failure, sys.exit(1) is called
to allow build to register a failure
"""
if failure:
lists = get_lists.GetLists()
report_url = lists.report_url(project)
if report_url:
print(report_url)
sys.exit(1) | python | def process_failure(project):
"""
If any scan operations register a failure, sys.exit(1) is called
to allow build to register a failure
"""
if failure:
lists = get_lists.GetLists()
report_url = lists.report_url(project)
if report_url:
print(report_url)
sys.exit(1) | [
"def",
"process_failure",
"(",
"project",
")",
":",
"if",
"failure",
":",
"lists",
"=",
"get_lists",
".",
"GetLists",
"(",
")",
"report_url",
"=",
"lists",
".",
"report_url",
"(",
"project",
")",
"if",
"report_url",
":",
"print",
"(",
"report_url",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | If any scan operations register a failure, sys.exit(1) is called
to allow build to register a failure | [
"If",
"any",
"scan",
"operations",
"register",
"a",
"failure",
"sys",
".",
"exit",
"(",
"1",
")",
"is",
"called",
"to",
"allow",
"build",
"to",
"register",
"a",
"failure"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/patch_scan.py#L330-L340 |
RI-imaging/nrefocus | nrefocus/metrics.py | average_gradient | def average_gradient(data, *kwargs):
""" Compute average gradient norm of an image
"""
return np.average(np.array(np.gradient(data))**2) | python | def average_gradient(data, *kwargs):
""" Compute average gradient norm of an image
"""
return np.average(np.array(np.gradient(data))**2) | [
"def",
"average_gradient",
"(",
"data",
",",
"*",
"kwargs",
")",
":",
"return",
"np",
".",
"average",
"(",
"np",
".",
"array",
"(",
"np",
".",
"gradient",
"(",
"data",
")",
")",
"**",
"2",
")"
] | Compute average gradient norm of an image | [
"Compute",
"average",
"gradient",
"norm",
"of",
"an",
"image"
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/metrics.py#L4-L7 |
RI-imaging/nrefocus | nrefocus/metrics.py | contrast_rms | def contrast_rms(data, *kwargs):
""" Compute RMS contrast norm of an image
"""
av = np.average(data, *kwargs)
mal = 1 / (data.shape[0] * data.shape[1])
return np.sqrt(mal * np.sum(np.square(data - av))) | python | def contrast_rms(data, *kwargs):
""" Compute RMS contrast norm of an image
"""
av = np.average(data, *kwargs)
mal = 1 / (data.shape[0] * data.shape[1])
return np.sqrt(mal * np.sum(np.square(data - av))) | [
"def",
"contrast_rms",
"(",
"data",
",",
"*",
"kwargs",
")",
":",
"av",
"=",
"np",
".",
"average",
"(",
"data",
",",
"*",
"kwargs",
")",
"mal",
"=",
"1",
"/",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"*",
"data",
".",
"shape",
"[",
"1",
"]",
")",
"return",
"np",
".",
"sqrt",
"(",
"mal",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"square",
"(",
"data",
"-",
"av",
")",
")",
")"
] | Compute RMS contrast norm of an image | [
"Compute",
"RMS",
"contrast",
"norm",
"of",
"an",
"image"
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/metrics.py#L10-L15 |
RI-imaging/nrefocus | nrefocus/metrics.py | spectral | def spectral(data, lambd, *kwargs):
""" Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels
"""
# Set up fast fourier transform
# if not data.dtype == np.dtype(np.complex):
# data = np.array(data, dtype=np.complex)
# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,
# direction="forward", flags=_fftwflags)
# fftdata = np.zeros(data.shape, dtype=np.complex)
# fftplan.guru_execute_dft(data, fftdata)
# fftw.destroy_plan(fftplan)
fftdata = np.fft.fftn(data)
# Filter Fourier transform
fftdata[0, 0] = 0
kx = 2 * np.pi * np.fft.fftfreq(data.shape[0]).reshape(1, -1)
ky = 2 * np.pi * np.fft.fftfreq(data.shape[1]).reshape(-1, 1)
kmax = (2 * np.pi) / (2 * lambd)
fftdata[np.where(kx**2 + ky**2 > kmax**2)] = 0
spec = np.sum(np.log(1 + np.abs(fftdata))) / np.sqrt(np.prod(data.shape))
return spec | python | def spectral(data, lambd, *kwargs):
""" Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels
"""
# Set up fast fourier transform
# if not data.dtype == np.dtype(np.complex):
# data = np.array(data, dtype=np.complex)
# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,
# direction="forward", flags=_fftwflags)
# fftdata = np.zeros(data.shape, dtype=np.complex)
# fftplan.guru_execute_dft(data, fftdata)
# fftw.destroy_plan(fftplan)
fftdata = np.fft.fftn(data)
# Filter Fourier transform
fftdata[0, 0] = 0
kx = 2 * np.pi * np.fft.fftfreq(data.shape[0]).reshape(1, -1)
ky = 2 * np.pi * np.fft.fftfreq(data.shape[1]).reshape(-1, 1)
kmax = (2 * np.pi) / (2 * lambd)
fftdata[np.where(kx**2 + ky**2 > kmax**2)] = 0
spec = np.sum(np.log(1 + np.abs(fftdata))) / np.sqrt(np.prod(data.shape))
return spec | [
"def",
"spectral",
"(",
"data",
",",
"lambd",
",",
"*",
"kwargs",
")",
":",
"# Set up fast fourier transform",
"# if not data.dtype == np.dtype(np.complex):",
"# data = np.array(data, dtype=np.complex)",
"# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,",
"# direction=\"forward\", flags=_fftwflags)",
"# fftdata = np.zeros(data.shape, dtype=np.complex)",
"# fftplan.guru_execute_dft(data, fftdata)",
"# fftw.destroy_plan(fftplan)",
"fftdata",
"=",
"np",
".",
"fft",
".",
"fftn",
"(",
"data",
")",
"# Filter Fourier transform",
"fftdata",
"[",
"0",
",",
"0",
"]",
"=",
"0",
"kx",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"ky",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"kmax",
"=",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"/",
"(",
"2",
"*",
"lambd",
")",
"fftdata",
"[",
"np",
".",
"where",
"(",
"kx",
"**",
"2",
"+",
"ky",
"**",
"2",
">",
"kmax",
"**",
"2",
")",
"]",
"=",
"0",
"spec",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"log",
"(",
"1",
"+",
"np",
".",
"abs",
"(",
"fftdata",
")",
")",
")",
"/",
"np",
".",
"sqrt",
"(",
"np",
".",
"prod",
"(",
"data",
".",
"shape",
")",
")",
"return",
"spec"
] | Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels | [
"Compute",
"spectral",
"contrast",
"of",
"image"
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/metrics.py#L18-L52 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | is_gzip_file | def is_gzip_file(abspath):
"""Parse file extension.
- *.json: uncompressed, utf-8 encode json file
- *.gz: compressed, utf-8 encode json file
"""
abspath = abspath.lower()
_, ext = os.path.splitext(abspath)
if ext in [".gz", ".zip"]:
is_gzip = True
else:
is_gzip = False
return is_gzip | python | def is_gzip_file(abspath):
"""Parse file extension.
- *.json: uncompressed, utf-8 encode json file
- *.gz: compressed, utf-8 encode json file
"""
abspath = abspath.lower()
_, ext = os.path.splitext(abspath)
if ext in [".gz", ".zip"]:
is_gzip = True
else:
is_gzip = False
return is_gzip | [
"def",
"is_gzip_file",
"(",
"abspath",
")",
":",
"abspath",
"=",
"abspath",
".",
"lower",
"(",
")",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"if",
"ext",
"in",
"[",
"\".gz\"",
",",
"\".zip\"",
"]",
":",
"is_gzip",
"=",
"True",
"else",
":",
"is_gzip",
"=",
"False",
"return",
"is_gzip"
] | Parse file extension.
- *.json: uncompressed, utf-8 encode json file
- *.gz: compressed, utf-8 encode json file | [
"Parse",
"file",
"extension",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L29-L41 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | write | def write(s, path, encoding="utf-8"):
"""Write string to text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "wb") as f:
if is_gzip:
f.write(zlib.compress(s.encode(encoding)))
else:
f.write(s.encode(encoding)) | python | def write(s, path, encoding="utf-8"):
"""Write string to text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "wb") as f:
if is_gzip:
f.write(zlib.compress(s.encode(encoding)))
else:
f.write(s.encode(encoding)) | [
"def",
"write",
"(",
"s",
",",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"is_gzip",
"=",
"is_gzip_file",
"(",
"path",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"if",
"is_gzip",
":",
"f",
".",
"write",
"(",
"zlib",
".",
"compress",
"(",
"s",
".",
"encode",
"(",
"encoding",
")",
")",
")",
"else",
":",
"f",
".",
"write",
"(",
"s",
".",
"encode",
"(",
"encoding",
")",
")"
] | Write string to text file. | [
"Write",
"string",
"to",
"text",
"file",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L44-L53 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | read | def read(path, encoding="utf-8"):
"""Read string from text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "rb") as f:
if is_gzip:
return zlib.decompress(f.read()).decode(encoding)
else:
return f.read().decode(encoding) | python | def read(path, encoding="utf-8"):
"""Read string from text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "rb") as f:
if is_gzip:
return zlib.decompress(f.read()).decode(encoding)
else:
return f.read().decode(encoding) | [
"def",
"read",
"(",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"is_gzip",
"=",
"is_gzip_file",
"(",
"path",
")",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"if",
"is_gzip",
":",
"return",
"zlib",
".",
"decompress",
"(",
"f",
".",
"read",
"(",
")",
")",
".",
"decode",
"(",
"encoding",
")",
"else",
":",
"return",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"encoding",
")"
] | Read string from text file. | [
"Read",
"string",
"from",
"text",
"file",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L63-L72 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | smartread | def smartread(path):
"""Read text from file, automatically detect encoding. ``chardet`` required.
"""
with open(path, "rb") as f:
content = f.read()
result = chardet.detect(content)
return content.decode(result["encoding"]) | python | def smartread(path):
"""Read text from file, automatically detect encoding. ``chardet`` required.
"""
with open(path, "rb") as f:
content = f.read()
result = chardet.detect(content)
return content.decode(result["encoding"]) | [
"def",
"smartread",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"result",
"=",
"chardet",
".",
"detect",
"(",
"content",
")",
"return",
"content",
".",
"decode",
"(",
"result",
"[",
"\"encoding\"",
"]",
")"
] | Read text from file, automatically detect encoding. ``chardet`` required. | [
"Read",
"text",
"from",
"file",
"automatically",
"detect",
"encoding",
".",
"chardet",
"required",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L82-L88 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | to_utf8 | def to_utf8(path, output_path=None):
"""Convert any text file to utf8 encoding.
"""
if output_path is None:
basename, ext = os.path.splitext(path)
output_path = basename + "-UTF8Encode" + ext
text = smartread(path)
write(text, output_path) | python | def to_utf8(path, output_path=None):
"""Convert any text file to utf8 encoding.
"""
if output_path is None:
basename, ext = os.path.splitext(path)
output_path = basename + "-UTF8Encode" + ext
text = smartread(path)
write(text, output_path) | [
"def",
"to_utf8",
"(",
"path",
",",
"output_path",
"=",
"None",
")",
":",
"if",
"output_path",
"is",
"None",
":",
"basename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"output_path",
"=",
"basename",
"+",
"\"-UTF8Encode\"",
"+",
"ext",
"text",
"=",
"smartread",
"(",
"path",
")",
"write",
"(",
"text",
",",
"output_path",
")"
] | Convert any text file to utf8 encoding. | [
"Convert",
"any",
"text",
"file",
"to",
"utf8",
"encoding",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L91-L99 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | readlines | def readlines(path, encoding="utf-8", skiplines=None, nlines=None, strip='right'):
"""skip n lines and fetch the next n lines.
:param skiplines: default None, skip first n lines
:param nlines: default None, yield next n lines
:param strip: default None, available option 'left', 'right', 'both'
**中文文档**
跳过前#skiplines行, 然后读取#nlines行。可对字符串进行strip预处理。
"""
strip_method = str(strip).lower()
if strip_method in _strip_method_mapping:
strip_func = _strip_method_mapping[strip_method]
else:
raise ValueError("'strip' keyword has to be one of "
"None, 'left', 'right', 'both'.")
with open(path, "rb") as file:
if skiplines:
for _ in range(skiplines):
next(file)
if nlines:
for _ in range(nlines):
yield strip_func(next(file).decode(encoding))
else:
for line in file:
yield strip_func(line.decode(encoding)) | python | def readlines(path, encoding="utf-8", skiplines=None, nlines=None, strip='right'):
"""skip n lines and fetch the next n lines.
:param skiplines: default None, skip first n lines
:param nlines: default None, yield next n lines
:param strip: default None, available option 'left', 'right', 'both'
**中文文档**
跳过前#skiplines行, 然后读取#nlines行。可对字符串进行strip预处理。
"""
strip_method = str(strip).lower()
if strip_method in _strip_method_mapping:
strip_func = _strip_method_mapping[strip_method]
else:
raise ValueError("'strip' keyword has to be one of "
"None, 'left', 'right', 'both'.")
with open(path, "rb") as file:
if skiplines:
for _ in range(skiplines):
next(file)
if nlines:
for _ in range(nlines):
yield strip_func(next(file).decode(encoding))
else:
for line in file:
yield strip_func(line.decode(encoding)) | [
"def",
"readlines",
"(",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"skiplines",
"=",
"None",
",",
"nlines",
"=",
"None",
",",
"strip",
"=",
"'right'",
")",
":",
"strip_method",
"=",
"str",
"(",
"strip",
")",
".",
"lower",
"(",
")",
"if",
"strip_method",
"in",
"_strip_method_mapping",
":",
"strip_func",
"=",
"_strip_method_mapping",
"[",
"strip_method",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"'strip' keyword has to be one of \"",
"\"None, 'left', 'right', 'both'.\"",
")",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"file",
":",
"if",
"skiplines",
":",
"for",
"_",
"in",
"range",
"(",
"skiplines",
")",
":",
"next",
"(",
"file",
")",
"if",
"nlines",
":",
"for",
"_",
"in",
"range",
"(",
"nlines",
")",
":",
"yield",
"strip_func",
"(",
"next",
"(",
"file",
")",
".",
"decode",
"(",
"encoding",
")",
")",
"else",
":",
"for",
"line",
"in",
"file",
":",
"yield",
"strip_func",
"(",
"line",
".",
"decode",
"(",
"encoding",
")",
")"
] | skip n lines and fetch the next n lines.
:param skiplines: default None, skip first n lines
:param nlines: default None, yield next n lines
:param strip: default None, available option 'left', 'right', 'both'
**中文文档**
跳过前#skiplines行, 然后读取#nlines行。可对字符串进行strip预处理。 | [
"skip",
"n",
"lines",
"and",
"fetch",
"the",
"next",
"n",
"lines",
".",
":",
"param",
"skiplines",
":",
"default",
"None",
"skip",
"first",
"n",
"lines",
":",
"param",
"nlines",
":",
"default",
"None",
"yield",
"next",
"n",
"lines",
":",
"param",
"strip",
":",
"default",
"None",
"available",
"option",
"left",
"right",
"both",
"**",
"中文文档",
"**",
"跳过前#skiplines行",
"然后读取#nlines行。可对字符串进行strip预处理。"
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L127-L155 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | readchunks | def readchunks(path, encoding="utf-8", skiplines=None, chunksize=None, strip='right'):
"""skip n lines and fetch the next n lines as a chunk, and repeat fetching.
:param skiplines: default None, skip first n lines
:param chunksize: default None (size-1 chunk), lines chunk size
:param strip: default None, avaliable option 'left', 'right', 'both'
**中文文档**
跳过前#skiplines行, 每次读取#chunksize行yield。可对字符串进行strip预处理。
"""
strip_method = str(strip).lower()
if strip_method in _strip_method_mapping:
strip_func = _strip_method_mapping[strip_method]
else:
raise ValueError("'strip' keyword has to be one of "
"None, 'left', 'right', 'both'.")
with open(path, "rb") as file:
if skiplines:
for _ in range(skiplines):
next(file)
if chunksize is None:
chunksize = 1
elif not isinstance(chunksize, int_type):
raise ValueError("'chunksize' has to be None or an integer.")
chunk = list()
while 1:
for _ in range(chunksize):
chunk.append(strip_func(next(file).decode(encoding)))
if len(chunk) < chunksize:
break
yield chunk
chunk = list()
yield chunk | python | def readchunks(path, encoding="utf-8", skiplines=None, chunksize=None, strip='right'):
"""skip n lines and fetch the next n lines as a chunk, and repeat fetching.
:param skiplines: default None, skip first n lines
:param chunksize: default None (size-1 chunk), lines chunk size
:param strip: default None, avaliable option 'left', 'right', 'both'
**中文文档**
跳过前#skiplines行, 每次读取#chunksize行yield。可对字符串进行strip预处理。
"""
strip_method = str(strip).lower()
if strip_method in _strip_method_mapping:
strip_func = _strip_method_mapping[strip_method]
else:
raise ValueError("'strip' keyword has to be one of "
"None, 'left', 'right', 'both'.")
with open(path, "rb") as file:
if skiplines:
for _ in range(skiplines):
next(file)
if chunksize is None:
chunksize = 1
elif not isinstance(chunksize, int_type):
raise ValueError("'chunksize' has to be None or an integer.")
chunk = list()
while 1:
for _ in range(chunksize):
chunk.append(strip_func(next(file).decode(encoding)))
if len(chunk) < chunksize:
break
yield chunk
chunk = list()
yield chunk | [
"def",
"readchunks",
"(",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"skiplines",
"=",
"None",
",",
"chunksize",
"=",
"None",
",",
"strip",
"=",
"'right'",
")",
":",
"strip_method",
"=",
"str",
"(",
"strip",
")",
".",
"lower",
"(",
")",
"if",
"strip_method",
"in",
"_strip_method_mapping",
":",
"strip_func",
"=",
"_strip_method_mapping",
"[",
"strip_method",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"'strip' keyword has to be one of \"",
"\"None, 'left', 'right', 'both'.\"",
")",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"file",
":",
"if",
"skiplines",
":",
"for",
"_",
"in",
"range",
"(",
"skiplines",
")",
":",
"next",
"(",
"file",
")",
"if",
"chunksize",
"is",
"None",
":",
"chunksize",
"=",
"1",
"elif",
"not",
"isinstance",
"(",
"chunksize",
",",
"int_type",
")",
":",
"raise",
"ValueError",
"(",
"\"'chunksize' has to be None or an integer.\"",
")",
"chunk",
"=",
"list",
"(",
")",
"while",
"1",
":",
"for",
"_",
"in",
"range",
"(",
"chunksize",
")",
":",
"chunk",
".",
"append",
"(",
"strip_func",
"(",
"next",
"(",
"file",
")",
".",
"decode",
"(",
"encoding",
")",
")",
")",
"if",
"len",
"(",
"chunk",
")",
"<",
"chunksize",
":",
"break",
"yield",
"chunk",
"chunk",
"=",
"list",
"(",
")",
"yield",
"chunk"
] | skip n lines and fetch the next n lines as a chunk, and repeat fetching.
:param skiplines: default None, skip first n lines
:param chunksize: default None (size-1 chunk), lines chunk size
:param strip: default None, avaliable option 'left', 'right', 'both'
**中文文档**
跳过前#skiplines行, 每次读取#chunksize行yield。可对字符串进行strip预处理。 | [
"skip",
"n",
"lines",
"and",
"fetch",
"the",
"next",
"n",
"lines",
"as",
"a",
"chunk",
"and",
"repeat",
"fetching",
".",
":",
"param",
"skiplines",
":",
"default",
"None",
"skip",
"first",
"n",
"lines",
":",
"param",
"chunksize",
":",
"default",
"None",
"(",
"size",
"-",
"1",
"chunk",
")",
"lines",
"chunk",
"size",
":",
"param",
"strip",
":",
"default",
"None",
"avaliable",
"option",
"left",
"right",
"both",
"**",
"中文文档",
"**",
"跳过前#skiplines行",
"每次读取#chunksize行yield。可对字符串进行strip预处理。"
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L158-L194 |
rapidpro/expressions | python/temba_expressions/evaluator.py | EvaluationContext._coerce_to_supported_type | def _coerce_to_supported_type(cls, value):
"""
Since we let users populate the context with whatever they want, this ensures the resolved value is something
which the expression engine understands.
:param value: the resolved value
:return: the value converted to a supported data type
"""
if value is None:
return "" # empty string rather than none
elif isinstance(value, dict):
if '*' in value:
return value['*']
elif '__default__' in value:
return value['__default__']
else:
return json.dumps(value, separators=(',', ':')) # return serialized JSON if no default
elif isinstance(value, bool):
return value
elif isinstance(value, float) or isinstance(value, int):
return Decimal(value)
else:
return value | python | def _coerce_to_supported_type(cls, value):
"""
Since we let users populate the context with whatever they want, this ensures the resolved value is something
which the expression engine understands.
:param value: the resolved value
:return: the value converted to a supported data type
"""
if value is None:
return "" # empty string rather than none
elif isinstance(value, dict):
if '*' in value:
return value['*']
elif '__default__' in value:
return value['__default__']
else:
return json.dumps(value, separators=(',', ':')) # return serialized JSON if no default
elif isinstance(value, bool):
return value
elif isinstance(value, float) or isinstance(value, int):
return Decimal(value)
else:
return value | [
"def",
"_coerce_to_supported_type",
"(",
"cls",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"\"\"",
"# empty string rather than none",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"'*'",
"in",
"value",
":",
"return",
"value",
"[",
"'*'",
"]",
"elif",
"'__default__'",
"in",
"value",
":",
"return",
"value",
"[",
"'__default__'",
"]",
"else",
":",
"return",
"json",
".",
"dumps",
"(",
"value",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
")",
"# return serialized JSON if no default",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"float",
")",
"or",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"Decimal",
"(",
"value",
")",
"else",
":",
"return",
"value"
] | Since we let users populate the context with whatever they want, this ensures the resolved value is something
which the expression engine understands.
:param value: the resolved value
:return: the value converted to a supported data type | [
"Since",
"we",
"let",
"users",
"populate",
"the",
"context",
"with",
"whatever",
"they",
"want",
"this",
"ensures",
"the",
"resolved",
"value",
"is",
"something",
"which",
"the",
"expression",
"engine",
"understands",
".",
":",
"param",
"value",
":",
"the",
"resolved",
"value",
":",
"return",
":",
"the",
"value",
"converted",
"to",
"a",
"supported",
"data",
"type"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L87-L108 |
rapidpro/expressions | python/temba_expressions/evaluator.py | Evaluator.evaluate_template | def evaluate_template(self, template, context, url_encode=False, strategy=EvaluationStrategy.COMPLETE):
"""
Evaluates a template string, e.g. "Hello @contact.name you have @(contact.reports * 2) reports"
:param template: the template string
:param context: the evaluation context
:param url_encode: whether or not values should be URL encoded
:return: a tuple of the evaluated template and a list of evaluation errors
"""
input_chars = list(template)
output_chars = []
errors = []
state = State.BODY
current_expression_chars = []
current_expression_terminated = False
parentheses_level = 0
# determines whether the given character is a word character, i.e. \w in a regex
is_word_char = lambda c: c and (c.isalnum() or c == '_')
for pos, ch in enumerate(input_chars):
# in order to determine if the b in a.b terminates an identifier, we have to peek two characters ahead as it
# could be a.b. (b terminates) or a.b.c (b doesn't terminate)
next_ch = input_chars[pos + 1] if (pos < (len(input_chars) - 1)) else None
next_next_ch = input_chars[pos + 2] if (pos < (len(input_chars) - 2)) else None
if state == State.BODY:
if ch == self._expression_prefix and (is_word_char(next_ch) or next_ch == '('):
state = State.PREFIX
current_expression_chars = [ch]
elif ch == self._expression_prefix and next_ch == self._expression_prefix:
state = State.ESCAPED_PREFIX
else:
output_chars.append(ch)
elif state == State.PREFIX:
if is_word_char(ch):
# we're parsing an expression like @XXX
state = State.IDENTIFIER
elif ch == '(':
# we're parsing an expression like @(1 + 2)
state = State.BALANCED
parentheses_level += 1
current_expression_chars.append(ch)
elif state == State.IDENTIFIER:
current_expression_chars.append(ch)
elif state == State.BALANCED:
if ch == '(':
parentheses_level += 1
elif ch == ')':
parentheses_level -= 1
elif ch == '"':
state = State.STRING_LITERAL
current_expression_chars.append(ch)
# expression terminates if parentheses balance
if parentheses_level == 0:
current_expression_terminated = True
elif state == State.STRING_LITERAL:
if ch == '"':
state = State.BALANCED
current_expression_chars.append(ch)
elif state == State.ESCAPED_PREFIX:
state = State.BODY
output_chars.append(ch)
# identifier can terminate expression in 3 ways:
# 1. next char is null (i.e. end of the input)
# 2. next char is not a word character or period
# 3. next char is a period, but it's not followed by a word character
if state == State.IDENTIFIER:
if not next_ch or (not is_word_char(next_ch) and next_ch != '.') or (next_ch == '.' and not is_word_char(next_next_ch)):
current_expression_terminated = True
if current_expression_terminated:
expression = ''.join(current_expression_chars)
output_chars.append(self._resolve_expression_block(expression, context, url_encode, strategy, errors))
current_expression_chars = []
current_expression_terminated = False
state = State.BODY
# if last expression didn't terminate - add to output as is
if not current_expression_terminated and current_expression_chars:
output_chars.append(''.join(current_expression_chars))
output = ''.join(output_chars) # joining is fastest way to build strings in Python
return output, errors | python | def evaluate_template(self, template, context, url_encode=False, strategy=EvaluationStrategy.COMPLETE):
"""
Evaluates a template string, e.g. "Hello @contact.name you have @(contact.reports * 2) reports"
:param template: the template string
:param context: the evaluation context
:param url_encode: whether or not values should be URL encoded
:return: a tuple of the evaluated template and a list of evaluation errors
"""
input_chars = list(template)
output_chars = []
errors = []
state = State.BODY
current_expression_chars = []
current_expression_terminated = False
parentheses_level = 0
# determines whether the given character is a word character, i.e. \w in a regex
is_word_char = lambda c: c and (c.isalnum() or c == '_')
for pos, ch in enumerate(input_chars):
# in order to determine if the b in a.b terminates an identifier, we have to peek two characters ahead as it
# could be a.b. (b terminates) or a.b.c (b doesn't terminate)
next_ch = input_chars[pos + 1] if (pos < (len(input_chars) - 1)) else None
next_next_ch = input_chars[pos + 2] if (pos < (len(input_chars) - 2)) else None
if state == State.BODY:
if ch == self._expression_prefix and (is_word_char(next_ch) or next_ch == '('):
state = State.PREFIX
current_expression_chars = [ch]
elif ch == self._expression_prefix and next_ch == self._expression_prefix:
state = State.ESCAPED_PREFIX
else:
output_chars.append(ch)
elif state == State.PREFIX:
if is_word_char(ch):
# we're parsing an expression like @XXX
state = State.IDENTIFIER
elif ch == '(':
# we're parsing an expression like @(1 + 2)
state = State.BALANCED
parentheses_level += 1
current_expression_chars.append(ch)
elif state == State.IDENTIFIER:
current_expression_chars.append(ch)
elif state == State.BALANCED:
if ch == '(':
parentheses_level += 1
elif ch == ')':
parentheses_level -= 1
elif ch == '"':
state = State.STRING_LITERAL
current_expression_chars.append(ch)
# expression terminates if parentheses balance
if parentheses_level == 0:
current_expression_terminated = True
elif state == State.STRING_LITERAL:
if ch == '"':
state = State.BALANCED
current_expression_chars.append(ch)
elif state == State.ESCAPED_PREFIX:
state = State.BODY
output_chars.append(ch)
# identifier can terminate expression in 3 ways:
# 1. next char is null (i.e. end of the input)
# 2. next char is not a word character or period
# 3. next char is a period, but it's not followed by a word character
if state == State.IDENTIFIER:
if not next_ch or (not is_word_char(next_ch) and next_ch != '.') or (next_ch == '.' and not is_word_char(next_next_ch)):
current_expression_terminated = True
if current_expression_terminated:
expression = ''.join(current_expression_chars)
output_chars.append(self._resolve_expression_block(expression, context, url_encode, strategy, errors))
current_expression_chars = []
current_expression_terminated = False
state = State.BODY
# if last expression didn't terminate - add to output as is
if not current_expression_terminated and current_expression_chars:
output_chars.append(''.join(current_expression_chars))
output = ''.join(output_chars) # joining is fastest way to build strings in Python
return output, errors | [
"def",
"evaluate_template",
"(",
"self",
",",
"template",
",",
"context",
",",
"url_encode",
"=",
"False",
",",
"strategy",
"=",
"EvaluationStrategy",
".",
"COMPLETE",
")",
":",
"input_chars",
"=",
"list",
"(",
"template",
")",
"output_chars",
"=",
"[",
"]",
"errors",
"=",
"[",
"]",
"state",
"=",
"State",
".",
"BODY",
"current_expression_chars",
"=",
"[",
"]",
"current_expression_terminated",
"=",
"False",
"parentheses_level",
"=",
"0",
"# determines whether the given character is a word character, i.e. \\w in a regex",
"is_word_char",
"=",
"lambda",
"c",
":",
"c",
"and",
"(",
"c",
".",
"isalnum",
"(",
")",
"or",
"c",
"==",
"'_'",
")",
"for",
"pos",
",",
"ch",
"in",
"enumerate",
"(",
"input_chars",
")",
":",
"# in order to determine if the b in a.b terminates an identifier, we have to peek two characters ahead as it",
"# could be a.b. (b terminates) or a.b.c (b doesn't terminate)",
"next_ch",
"=",
"input_chars",
"[",
"pos",
"+",
"1",
"]",
"if",
"(",
"pos",
"<",
"(",
"len",
"(",
"input_chars",
")",
"-",
"1",
")",
")",
"else",
"None",
"next_next_ch",
"=",
"input_chars",
"[",
"pos",
"+",
"2",
"]",
"if",
"(",
"pos",
"<",
"(",
"len",
"(",
"input_chars",
")",
"-",
"2",
")",
")",
"else",
"None",
"if",
"state",
"==",
"State",
".",
"BODY",
":",
"if",
"ch",
"==",
"self",
".",
"_expression_prefix",
"and",
"(",
"is_word_char",
"(",
"next_ch",
")",
"or",
"next_ch",
"==",
"'('",
")",
":",
"state",
"=",
"State",
".",
"PREFIX",
"current_expression_chars",
"=",
"[",
"ch",
"]",
"elif",
"ch",
"==",
"self",
".",
"_expression_prefix",
"and",
"next_ch",
"==",
"self",
".",
"_expression_prefix",
":",
"state",
"=",
"State",
".",
"ESCAPED_PREFIX",
"else",
":",
"output_chars",
".",
"append",
"(",
"ch",
")",
"elif",
"state",
"==",
"State",
".",
"PREFIX",
":",
"if",
"is_word_char",
"(",
"ch",
")",
":",
"# we're parsing an expression like @XXX",
"state",
"=",
"State",
".",
"IDENTIFIER",
"elif",
"ch",
"==",
"'('",
":",
"# we're parsing an expression like @(1 + 2)",
"state",
"=",
"State",
".",
"BALANCED",
"parentheses_level",
"+=",
"1",
"current_expression_chars",
".",
"append",
"(",
"ch",
")",
"elif",
"state",
"==",
"State",
".",
"IDENTIFIER",
":",
"current_expression_chars",
".",
"append",
"(",
"ch",
")",
"elif",
"state",
"==",
"State",
".",
"BALANCED",
":",
"if",
"ch",
"==",
"'('",
":",
"parentheses_level",
"+=",
"1",
"elif",
"ch",
"==",
"')'",
":",
"parentheses_level",
"-=",
"1",
"elif",
"ch",
"==",
"'\"'",
":",
"state",
"=",
"State",
".",
"STRING_LITERAL",
"current_expression_chars",
".",
"append",
"(",
"ch",
")",
"# expression terminates if parentheses balance",
"if",
"parentheses_level",
"==",
"0",
":",
"current_expression_terminated",
"=",
"True",
"elif",
"state",
"==",
"State",
".",
"STRING_LITERAL",
":",
"if",
"ch",
"==",
"'\"'",
":",
"state",
"=",
"State",
".",
"BALANCED",
"current_expression_chars",
".",
"append",
"(",
"ch",
")",
"elif",
"state",
"==",
"State",
".",
"ESCAPED_PREFIX",
":",
"state",
"=",
"State",
".",
"BODY",
"output_chars",
".",
"append",
"(",
"ch",
")",
"# identifier can terminate expression in 3 ways:",
"# 1. next char is null (i.e. end of the input)",
"# 2. next char is not a word character or period",
"# 3. next char is a period, but it's not followed by a word character",
"if",
"state",
"==",
"State",
".",
"IDENTIFIER",
":",
"if",
"not",
"next_ch",
"or",
"(",
"not",
"is_word_char",
"(",
"next_ch",
")",
"and",
"next_ch",
"!=",
"'.'",
")",
"or",
"(",
"next_ch",
"==",
"'.'",
"and",
"not",
"is_word_char",
"(",
"next_next_ch",
")",
")",
":",
"current_expression_terminated",
"=",
"True",
"if",
"current_expression_terminated",
":",
"expression",
"=",
"''",
".",
"join",
"(",
"current_expression_chars",
")",
"output_chars",
".",
"append",
"(",
"self",
".",
"_resolve_expression_block",
"(",
"expression",
",",
"context",
",",
"url_encode",
",",
"strategy",
",",
"errors",
")",
")",
"current_expression_chars",
"=",
"[",
"]",
"current_expression_terminated",
"=",
"False",
"state",
"=",
"State",
".",
"BODY",
"# if last expression didn't terminate - add to output as is",
"if",
"not",
"current_expression_terminated",
"and",
"current_expression_chars",
":",
"output_chars",
".",
"append",
"(",
"''",
".",
"join",
"(",
"current_expression_chars",
")",
")",
"output",
"=",
"''",
".",
"join",
"(",
"output_chars",
")",
"# joining is fastest way to build strings in Python",
"return",
"output",
",",
"errors"
] | Evaluates a template string, e.g. "Hello @contact.name you have @(contact.reports * 2) reports"
:param template: the template string
:param context: the evaluation context
:param url_encode: whether or not values should be URL encoded
:return: a tuple of the evaluated template and a list of evaluation errors | [
"Evaluates",
"a",
"template",
"string",
"e",
".",
"g",
".",
"Hello"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L143-L234 |
rapidpro/expressions | python/temba_expressions/evaluator.py | Evaluator._resolve_expression_block | def _resolve_expression_block(self, expression, context, url_encode, strategy, errors):
"""
Resolves an expression block found in the template, e.g. @(...). If an evaluation error occurs, expression is
returned as is.
"""
try:
body = expression[1:] # strip prefix
# if expression doesn't start with ( then check it's an allowed top level context reference
if not body.startswith('('):
top_level = body.split('.')[0].lower()
if top_level not in self._allowed_top_levels:
return expression
evaluated = self.evaluate_expression(body, context, strategy)
# convert result to string
result = conversions.to_string(evaluated, context)
return urlquote(result) if url_encode else result
except EvaluationError as e:
logger.debug("EvaluationError: %s" % str(e))
# if we can't evaluate expression, include it as is in the output
errors.append(str(e))
return expression | python | def _resolve_expression_block(self, expression, context, url_encode, strategy, errors):
"""
Resolves an expression block found in the template, e.g. @(...). If an evaluation error occurs, expression is
returned as is.
"""
try:
body = expression[1:] # strip prefix
# if expression doesn't start with ( then check it's an allowed top level context reference
if not body.startswith('('):
top_level = body.split('.')[0].lower()
if top_level not in self._allowed_top_levels:
return expression
evaluated = self.evaluate_expression(body, context, strategy)
# convert result to string
result = conversions.to_string(evaluated, context)
return urlquote(result) if url_encode else result
except EvaluationError as e:
logger.debug("EvaluationError: %s" % str(e))
# if we can't evaluate expression, include it as is in the output
errors.append(str(e))
return expression | [
"def",
"_resolve_expression_block",
"(",
"self",
",",
"expression",
",",
"context",
",",
"url_encode",
",",
"strategy",
",",
"errors",
")",
":",
"try",
":",
"body",
"=",
"expression",
"[",
"1",
":",
"]",
"# strip prefix",
"# if expression doesn't start with ( then check it's an allowed top level context reference",
"if",
"not",
"body",
".",
"startswith",
"(",
"'('",
")",
":",
"top_level",
"=",
"body",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"if",
"top_level",
"not",
"in",
"self",
".",
"_allowed_top_levels",
":",
"return",
"expression",
"evaluated",
"=",
"self",
".",
"evaluate_expression",
"(",
"body",
",",
"context",
",",
"strategy",
")",
"# convert result to string",
"result",
"=",
"conversions",
".",
"to_string",
"(",
"evaluated",
",",
"context",
")",
"return",
"urlquote",
"(",
"result",
")",
"if",
"url_encode",
"else",
"result",
"except",
"EvaluationError",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"EvaluationError: %s\"",
"%",
"str",
"(",
"e",
")",
")",
"# if we can't evaluate expression, include it as is in the output",
"errors",
".",
"append",
"(",
"str",
"(",
"e",
")",
")",
"return",
"expression"
] | Resolves an expression block found in the template, e.g. @(...). If an evaluation error occurs, expression is
returned as is. | [
"Resolves",
"an",
"expression",
"block",
"found",
"in",
"the",
"template",
"e",
".",
"g",
"."
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L236-L261 |
rapidpro/expressions | python/temba_expressions/evaluator.py | Evaluator.evaluate_expression | def evaluate_expression(self, expression, context, strategy=EvaluationStrategy.COMPLETE):
"""
Evaluates a single expression, e.g. "contact.reports * 2"
:param expression: the expression string
:param context: the evaluation context
:param strategy: the evaluation strategy
:return: the evaluated expression value
"""
from .gen.ExcellentLexer import ExcellentLexer
from .gen.ExcellentParser import ExcellentParser
stream = InputStream(expression)
lexer = ExcellentLexer(stream)
tokens = CommonTokenStream(lexer)
parser = ExcellentParser(tokens)
parser._errHandler = BailErrorStrategy()
try:
tree = parser.parse()
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Expression '%s' parsed as %s" % (expression, tree.toStringTree()))
except ParseCancellationException as ex:
message = None
if ex.args and isinstance(ex.args[0], NoViableAltException):
token = ex.args[0].offendingToken
if token is not None and token.type != ExcellentParser.EOF:
message = "Expression error at: %s" % token.text
if message is None:
message = "Expression is invalid"
raise EvaluationError(message, ex)
if strategy == EvaluationStrategy.RESOLVE_AVAILABLE:
resolved = self._resolve_available(tokens, context)
if resolved is not None:
return resolved
visitor = ExcellentVisitor(self._function_manager, context)
return visitor.visit(tree) | python | def evaluate_expression(self, expression, context, strategy=EvaluationStrategy.COMPLETE):
"""
Evaluates a single expression, e.g. "contact.reports * 2"
:param expression: the expression string
:param context: the evaluation context
:param strategy: the evaluation strategy
:return: the evaluated expression value
"""
from .gen.ExcellentLexer import ExcellentLexer
from .gen.ExcellentParser import ExcellentParser
stream = InputStream(expression)
lexer = ExcellentLexer(stream)
tokens = CommonTokenStream(lexer)
parser = ExcellentParser(tokens)
parser._errHandler = BailErrorStrategy()
try:
tree = parser.parse()
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Expression '%s' parsed as %s" % (expression, tree.toStringTree()))
except ParseCancellationException as ex:
message = None
if ex.args and isinstance(ex.args[0], NoViableAltException):
token = ex.args[0].offendingToken
if token is not None and token.type != ExcellentParser.EOF:
message = "Expression error at: %s" % token.text
if message is None:
message = "Expression is invalid"
raise EvaluationError(message, ex)
if strategy == EvaluationStrategy.RESOLVE_AVAILABLE:
resolved = self._resolve_available(tokens, context)
if resolved is not None:
return resolved
visitor = ExcellentVisitor(self._function_manager, context)
return visitor.visit(tree) | [
"def",
"evaluate_expression",
"(",
"self",
",",
"expression",
",",
"context",
",",
"strategy",
"=",
"EvaluationStrategy",
".",
"COMPLETE",
")",
":",
"from",
".",
"gen",
".",
"ExcellentLexer",
"import",
"ExcellentLexer",
"from",
".",
"gen",
".",
"ExcellentParser",
"import",
"ExcellentParser",
"stream",
"=",
"InputStream",
"(",
"expression",
")",
"lexer",
"=",
"ExcellentLexer",
"(",
"stream",
")",
"tokens",
"=",
"CommonTokenStream",
"(",
"lexer",
")",
"parser",
"=",
"ExcellentParser",
"(",
"tokens",
")",
"parser",
".",
"_errHandler",
"=",
"BailErrorStrategy",
"(",
")",
"try",
":",
"tree",
"=",
"parser",
".",
"parse",
"(",
")",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"debug",
"(",
"\"Expression '%s' parsed as %s\"",
"%",
"(",
"expression",
",",
"tree",
".",
"toStringTree",
"(",
")",
")",
")",
"except",
"ParseCancellationException",
"as",
"ex",
":",
"message",
"=",
"None",
"if",
"ex",
".",
"args",
"and",
"isinstance",
"(",
"ex",
".",
"args",
"[",
"0",
"]",
",",
"NoViableAltException",
")",
":",
"token",
"=",
"ex",
".",
"args",
"[",
"0",
"]",
".",
"offendingToken",
"if",
"token",
"is",
"not",
"None",
"and",
"token",
".",
"type",
"!=",
"ExcellentParser",
".",
"EOF",
":",
"message",
"=",
"\"Expression error at: %s\"",
"%",
"token",
".",
"text",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"\"Expression is invalid\"",
"raise",
"EvaluationError",
"(",
"message",
",",
"ex",
")",
"if",
"strategy",
"==",
"EvaluationStrategy",
".",
"RESOLVE_AVAILABLE",
":",
"resolved",
"=",
"self",
".",
"_resolve_available",
"(",
"tokens",
",",
"context",
")",
"if",
"resolved",
"is",
"not",
"None",
":",
"return",
"resolved",
"visitor",
"=",
"ExcellentVisitor",
"(",
"self",
".",
"_function_manager",
",",
"context",
")",
"return",
"visitor",
".",
"visit",
"(",
"tree",
")"
] | Evaluates a single expression, e.g. "contact.reports * 2"
:param expression: the expression string
:param context: the evaluation context
:param strategy: the evaluation strategy
:return: the evaluated expression value | [
"Evaluates",
"a",
"single",
"expression",
"e",
".",
"g",
".",
"contact",
".",
"reports",
"*",
"2",
":",
"param",
"expression",
":",
"the",
"expression",
"string",
":",
"param",
"context",
":",
"the",
"evaluation",
"context",
":",
"param",
"strategy",
":",
"the",
"evaluation",
"strategy",
":",
"return",
":",
"the",
"evaluated",
"expression",
"value"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L263-L304 |
rapidpro/expressions | python/temba_expressions/evaluator.py | Evaluator._resolve_available | def _resolve_available(self, tokens, context):
"""
Checks the token stream for context references and if there are missing references - substitutes available
references and returns a partially evaluated expression.
:param tokens: the token stream (all tokens fetched)
:param context: the evaluation context
:return: the partially evaluated expression or none if expression can be fully evaluated
"""
from .gen.ExcellentParser import ExcellentParser
has_missing = False
output_components = []
for t in range(len(tokens.tokens) - 1): # we can ignore the final EOF token
token = tokens.get(t)
next_token = tokens.get(t + 1)
# if token is a NAME not followed by ( then it's a context reference
if token.type == ExcellentParser.NAME and next_token.type != ExcellentParser.LPAREN:
try:
output_components.append(context.resolve_variable(token.text))
except EvaluationError:
has_missing = True
output_components.append(token)
else:
output_components.append(token)
# if we don't have missing context references, perform evaluation as normal
if not has_missing:
return None
# re-combine the tokens and context values back into an expression
output = [self._expression_prefix]
for output_component in output_components:
if isinstance(output_component, Token):
comp_val = output_component.text
else:
comp_val = conversions.to_repr(output_component, context)
output.append(comp_val)
return ''.join(output) | python | def _resolve_available(self, tokens, context):
"""
Checks the token stream for context references and if there are missing references - substitutes available
references and returns a partially evaluated expression.
:param tokens: the token stream (all tokens fetched)
:param context: the evaluation context
:return: the partially evaluated expression or none if expression can be fully evaluated
"""
from .gen.ExcellentParser import ExcellentParser
has_missing = False
output_components = []
for t in range(len(tokens.tokens) - 1): # we can ignore the final EOF token
token = tokens.get(t)
next_token = tokens.get(t + 1)
# if token is a NAME not followed by ( then it's a context reference
if token.type == ExcellentParser.NAME and next_token.type != ExcellentParser.LPAREN:
try:
output_components.append(context.resolve_variable(token.text))
except EvaluationError:
has_missing = True
output_components.append(token)
else:
output_components.append(token)
# if we don't have missing context references, perform evaluation as normal
if not has_missing:
return None
# re-combine the tokens and context values back into an expression
output = [self._expression_prefix]
for output_component in output_components:
if isinstance(output_component, Token):
comp_val = output_component.text
else:
comp_val = conversions.to_repr(output_component, context)
output.append(comp_val)
return ''.join(output) | [
"def",
"_resolve_available",
"(",
"self",
",",
"tokens",
",",
"context",
")",
":",
"from",
".",
"gen",
".",
"ExcellentParser",
"import",
"ExcellentParser",
"has_missing",
"=",
"False",
"output_components",
"=",
"[",
"]",
"for",
"t",
"in",
"range",
"(",
"len",
"(",
"tokens",
".",
"tokens",
")",
"-",
"1",
")",
":",
"# we can ignore the final EOF token",
"token",
"=",
"tokens",
".",
"get",
"(",
"t",
")",
"next_token",
"=",
"tokens",
".",
"get",
"(",
"t",
"+",
"1",
")",
"# if token is a NAME not followed by ( then it's a context reference",
"if",
"token",
".",
"type",
"==",
"ExcellentParser",
".",
"NAME",
"and",
"next_token",
".",
"type",
"!=",
"ExcellentParser",
".",
"LPAREN",
":",
"try",
":",
"output_components",
".",
"append",
"(",
"context",
".",
"resolve_variable",
"(",
"token",
".",
"text",
")",
")",
"except",
"EvaluationError",
":",
"has_missing",
"=",
"True",
"output_components",
".",
"append",
"(",
"token",
")",
"else",
":",
"output_components",
".",
"append",
"(",
"token",
")",
"# if we don't have missing context references, perform evaluation as normal",
"if",
"not",
"has_missing",
":",
"return",
"None",
"# re-combine the tokens and context values back into an expression",
"output",
"=",
"[",
"self",
".",
"_expression_prefix",
"]",
"for",
"output_component",
"in",
"output_components",
":",
"if",
"isinstance",
"(",
"output_component",
",",
"Token",
")",
":",
"comp_val",
"=",
"output_component",
".",
"text",
"else",
":",
"comp_val",
"=",
"conversions",
".",
"to_repr",
"(",
"output_component",
",",
"context",
")",
"output",
".",
"append",
"(",
"comp_val",
")",
"return",
"''",
".",
"join",
"(",
"output",
")"
] | Checks the token stream for context references and if there are missing references - substitutes available
references and returns a partially evaluated expression.
:param tokens: the token stream (all tokens fetched)
:param context: the evaluation context
:return: the partially evaluated expression or none if expression can be fully evaluated | [
"Checks",
"the",
"token",
"stream",
"for",
"context",
"references",
"and",
"if",
"there",
"are",
"missing",
"references",
"-",
"substitutes",
"available",
"references",
"and",
"returns",
"a",
"partially",
"evaluated",
"expression",
".",
":",
"param",
"tokens",
":",
"the",
"token",
"stream",
"(",
"all",
"tokens",
"fetched",
")",
":",
"param",
"context",
":",
"the",
"evaluation",
"context",
":",
"return",
":",
"the",
"partially",
"evaluated",
"expression",
"or",
"none",
"if",
"expression",
"can",
"be",
"fully",
"evaluated"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L306-L347 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitFunctionCall | def visitFunctionCall(self, ctx):
"""
expression : fnname LPAREN parameters? RPAREN
"""
func_name = ctx.fnname().getText()
if ctx.parameters() is not None:
parameters = self.visit(ctx.parameters())
else:
parameters = []
return self._functions.invoke_function(self._eval_context, func_name, parameters) | python | def visitFunctionCall(self, ctx):
"""
expression : fnname LPAREN parameters? RPAREN
"""
func_name = ctx.fnname().getText()
if ctx.parameters() is not None:
parameters = self.visit(ctx.parameters())
else:
parameters = []
return self._functions.invoke_function(self._eval_context, func_name, parameters) | [
"def",
"visitFunctionCall",
"(",
"self",
",",
"ctx",
")",
":",
"func_name",
"=",
"ctx",
".",
"fnname",
"(",
")",
".",
"getText",
"(",
")",
"if",
"ctx",
".",
"parameters",
"(",
")",
"is",
"not",
"None",
":",
"parameters",
"=",
"self",
".",
"visit",
"(",
"ctx",
".",
"parameters",
"(",
")",
")",
"else",
":",
"parameters",
"=",
"[",
"]",
"return",
"self",
".",
"_functions",
".",
"invoke_function",
"(",
"self",
".",
"_eval_context",
",",
"func_name",
",",
"parameters",
")"
] | expression : fnname LPAREN parameters? RPAREN | [
"expression",
":",
"fnname",
"LPAREN",
"parameters?",
"RPAREN"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L359-L370 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitNegation | def visitNegation(self, ctx):
"""
expression: MINUS expression
"""
return -conversions.to_decimal(self.visit(ctx.expression()), self._eval_context) | python | def visitNegation(self, ctx):
"""
expression: MINUS expression
"""
return -conversions.to_decimal(self.visit(ctx.expression()), self._eval_context) | [
"def",
"visitNegation",
"(",
"self",
",",
"ctx",
")",
":",
"return",
"-",
"conversions",
".",
"to_decimal",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
")",
")",
",",
"self",
".",
"_eval_context",
")"
] | expression: MINUS expression | [
"expression",
":",
"MINUS",
"expression"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L378-L382 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitExponentExpression | def visitExponentExpression(self, ctx):
"""
expression: expression EXPONENT expression
"""
arg1 = conversions.to_decimal(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_decimal(self.visit(ctx.expression(1)), self._eval_context)
return conversions.to_decimal(decimal_pow(arg1, arg2), ctx) | python | def visitExponentExpression(self, ctx):
"""
expression: expression EXPONENT expression
"""
arg1 = conversions.to_decimal(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_decimal(self.visit(ctx.expression(1)), self._eval_context)
return conversions.to_decimal(decimal_pow(arg1, arg2), ctx) | [
"def",
"visitExponentExpression",
"(",
"self",
",",
"ctx",
")",
":",
"arg1",
"=",
"conversions",
".",
"to_decimal",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"arg2",
"=",
"conversions",
".",
"to_decimal",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"return",
"conversions",
".",
"to_decimal",
"(",
"decimal_pow",
"(",
"arg1",
",",
"arg2",
")",
",",
"ctx",
")"
] | expression: expression EXPONENT expression | [
"expression",
":",
"expression",
"EXPONENT",
"expression"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L384-L390 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitMultiplicationOrDivisionExpression | def visitMultiplicationOrDivisionExpression(self, ctx):
"""
expression: expression (TIMES | DIVIDE) expression
"""
is_mul = ctx.TIMES() is not None
arg1 = conversions.to_decimal(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_decimal(self.visit(ctx.expression(1)), self._eval_context)
if not is_mul and arg2 == Decimal(0):
raise EvaluationError("Division by zero")
return arg1 * arg2 if is_mul else arg1 / arg2 | python | def visitMultiplicationOrDivisionExpression(self, ctx):
"""
expression: expression (TIMES | DIVIDE) expression
"""
is_mul = ctx.TIMES() is not None
arg1 = conversions.to_decimal(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_decimal(self.visit(ctx.expression(1)), self._eval_context)
if not is_mul and arg2 == Decimal(0):
raise EvaluationError("Division by zero")
return arg1 * arg2 if is_mul else arg1 / arg2 | [
"def",
"visitMultiplicationOrDivisionExpression",
"(",
"self",
",",
"ctx",
")",
":",
"is_mul",
"=",
"ctx",
".",
"TIMES",
"(",
")",
"is",
"not",
"None",
"arg1",
"=",
"conversions",
".",
"to_decimal",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"arg2",
"=",
"conversions",
".",
"to_decimal",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"if",
"not",
"is_mul",
"and",
"arg2",
"==",
"Decimal",
"(",
"0",
")",
":",
"raise",
"EvaluationError",
"(",
"\"Division by zero\"",
")",
"return",
"arg1",
"*",
"arg2",
"if",
"is_mul",
"else",
"arg1",
"/",
"arg2"
] | expression: expression (TIMES | DIVIDE) expression | [
"expression",
":",
"expression",
"(",
"TIMES",
"|",
"DIVIDE",
")",
"expression"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L392-L404 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitAdditionOrSubtractionExpression | def visitAdditionOrSubtractionExpression(self, ctx):
"""
expression: expression (PLUS | MINUS) expression
"""
is_add = ctx.PLUS() is not None
arg1 = self.visit(ctx.expression(0))
arg2 = self.visit(ctx.expression(1))
# first try as decimals
try:
_arg1 = conversions.to_decimal(arg1, self._eval_context)
_arg2 = conversions.to_decimal(arg2, self._eval_context)
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError:
pass
# then as date + something
try:
_arg1 = conversions.to_date_or_datetime(arg1, self._eval_context)
if isinstance(arg2, datetime.time):
# upgrade our date to datetime
_arg1 = conversions.to_datetime(_arg1, self._eval_context)
# convert time value to a duration
_arg2 = datetime.timedelta(hours=arg2.hour, minutes=arg2.minute, seconds=arg2.second, microseconds=arg2.microsecond)
else:
_arg2 = datetime.timedelta(days=conversions.to_integer(arg2, self._eval_context))
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError as ex:
raise EvaluationError("Expression could not be evaluated as decimal or date arithmetic", ex) | python | def visitAdditionOrSubtractionExpression(self, ctx):
"""
expression: expression (PLUS | MINUS) expression
"""
is_add = ctx.PLUS() is not None
arg1 = self.visit(ctx.expression(0))
arg2 = self.visit(ctx.expression(1))
# first try as decimals
try:
_arg1 = conversions.to_decimal(arg1, self._eval_context)
_arg2 = conversions.to_decimal(arg2, self._eval_context)
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError:
pass
# then as date + something
try:
_arg1 = conversions.to_date_or_datetime(arg1, self._eval_context)
if isinstance(arg2, datetime.time):
# upgrade our date to datetime
_arg1 = conversions.to_datetime(_arg1, self._eval_context)
# convert time value to a duration
_arg2 = datetime.timedelta(hours=arg2.hour, minutes=arg2.minute, seconds=arg2.second, microseconds=arg2.microsecond)
else:
_arg2 = datetime.timedelta(days=conversions.to_integer(arg2, self._eval_context))
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError as ex:
raise EvaluationError("Expression could not be evaluated as decimal or date arithmetic", ex) | [
"def",
"visitAdditionOrSubtractionExpression",
"(",
"self",
",",
"ctx",
")",
":",
"is_add",
"=",
"ctx",
".",
"PLUS",
"(",
")",
"is",
"not",
"None",
"arg1",
"=",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
"arg2",
"=",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
"# first try as decimals",
"try",
":",
"_arg1",
"=",
"conversions",
".",
"to_decimal",
"(",
"arg1",
",",
"self",
".",
"_eval_context",
")",
"_arg2",
"=",
"conversions",
".",
"to_decimal",
"(",
"arg2",
",",
"self",
".",
"_eval_context",
")",
"return",
"_arg1",
"+",
"_arg2",
"if",
"is_add",
"else",
"_arg1",
"-",
"_arg2",
"except",
"EvaluationError",
":",
"pass",
"# then as date + something",
"try",
":",
"_arg1",
"=",
"conversions",
".",
"to_date_or_datetime",
"(",
"arg1",
",",
"self",
".",
"_eval_context",
")",
"if",
"isinstance",
"(",
"arg2",
",",
"datetime",
".",
"time",
")",
":",
"# upgrade our date to datetime",
"_arg1",
"=",
"conversions",
".",
"to_datetime",
"(",
"_arg1",
",",
"self",
".",
"_eval_context",
")",
"# convert time value to a duration",
"_arg2",
"=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"arg2",
".",
"hour",
",",
"minutes",
"=",
"arg2",
".",
"minute",
",",
"seconds",
"=",
"arg2",
".",
"second",
",",
"microseconds",
"=",
"arg2",
".",
"microsecond",
")",
"else",
":",
"_arg2",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"conversions",
".",
"to_integer",
"(",
"arg2",
",",
"self",
".",
"_eval_context",
")",
")",
"return",
"_arg1",
"+",
"_arg2",
"if",
"is_add",
"else",
"_arg1",
"-",
"_arg2",
"except",
"EvaluationError",
"as",
"ex",
":",
"raise",
"EvaluationError",
"(",
"\"Expression could not be evaluated as decimal or date arithmetic\"",
",",
"ex",
")"
] | expression: expression (PLUS | MINUS) expression | [
"expression",
":",
"expression",
"(",
"PLUS",
"|",
"MINUS",
")",
"expression"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L406-L438 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitComparisonExpression | def visitComparisonExpression(self, ctx):
"""
expression: expression (LTE | LT | GTE | GT) expression
"""
arg1, arg2 = conversions.to_same(self.visit(ctx.expression(0)), self.visit(ctx.expression(1)), self._eval_context)
if isinstance(arg1, str):
# string comparison is case-insensitive
compared = (arg1.lower() > arg2.lower()) - (arg1.lower() < arg2.lower())
else:
compared = (arg1 > arg2) - (arg1 < arg2)
if ctx.LTE() is not None:
return compared <= 0
elif ctx.LT() is not None:
return compared < 0
elif ctx.GTE() is not None:
return compared >= 0
else: # GT
return compared > 0 | python | def visitComparisonExpression(self, ctx):
"""
expression: expression (LTE | LT | GTE | GT) expression
"""
arg1, arg2 = conversions.to_same(self.visit(ctx.expression(0)), self.visit(ctx.expression(1)), self._eval_context)
if isinstance(arg1, str):
# string comparison is case-insensitive
compared = (arg1.lower() > arg2.lower()) - (arg1.lower() < arg2.lower())
else:
compared = (arg1 > arg2) - (arg1 < arg2)
if ctx.LTE() is not None:
return compared <= 0
elif ctx.LT() is not None:
return compared < 0
elif ctx.GTE() is not None:
return compared >= 0
else: # GT
return compared > 0 | [
"def",
"visitComparisonExpression",
"(",
"self",
",",
"ctx",
")",
":",
"arg1",
",",
"arg2",
"=",
"conversions",
".",
"to_same",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
",",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"if",
"isinstance",
"(",
"arg1",
",",
"str",
")",
":",
"# string comparison is case-insensitive",
"compared",
"=",
"(",
"arg1",
".",
"lower",
"(",
")",
">",
"arg2",
".",
"lower",
"(",
")",
")",
"-",
"(",
"arg1",
".",
"lower",
"(",
")",
"<",
"arg2",
".",
"lower",
"(",
")",
")",
"else",
":",
"compared",
"=",
"(",
"arg1",
">",
"arg2",
")",
"-",
"(",
"arg1",
"<",
"arg2",
")",
"if",
"ctx",
".",
"LTE",
"(",
")",
"is",
"not",
"None",
":",
"return",
"compared",
"<=",
"0",
"elif",
"ctx",
".",
"LT",
"(",
")",
"is",
"not",
"None",
":",
"return",
"compared",
"<",
"0",
"elif",
"ctx",
".",
"GTE",
"(",
")",
"is",
"not",
"None",
":",
"return",
"compared",
">=",
"0",
"else",
":",
"# GT",
"return",
"compared",
">",
"0"
] | expression: expression (LTE | LT | GTE | GT) expression | [
"expression",
":",
"expression",
"(",
"LTE",
"|",
"LT",
"|",
"GTE",
"|",
"GT",
")",
"expression"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L440-L459 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitEqualityExpression | def visitEqualityExpression(self, ctx):
"""
expression: expression (EQ | NEQ) expression
"""
arg1, arg2 = conversions.to_same(self.visit(ctx.expression(0)), self.visit(ctx.expression(1)), self._eval_context)
if isinstance(arg1, str):
# string equality is case-insensitive
equal = arg1.lower() == arg2.lower()
else:
equal = arg1 == arg2
return equal if ctx.EQ() is not None else not equal | python | def visitEqualityExpression(self, ctx):
"""
expression: expression (EQ | NEQ) expression
"""
arg1, arg2 = conversions.to_same(self.visit(ctx.expression(0)), self.visit(ctx.expression(1)), self._eval_context)
if isinstance(arg1, str):
# string equality is case-insensitive
equal = arg1.lower() == arg2.lower()
else:
equal = arg1 == arg2
return equal if ctx.EQ() is not None else not equal | [
"def",
"visitEqualityExpression",
"(",
"self",
",",
"ctx",
")",
":",
"arg1",
",",
"arg2",
"=",
"conversions",
".",
"to_same",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
",",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"if",
"isinstance",
"(",
"arg1",
",",
"str",
")",
":",
"# string equality is case-insensitive",
"equal",
"=",
"arg1",
".",
"lower",
"(",
")",
"==",
"arg2",
".",
"lower",
"(",
")",
"else",
":",
"equal",
"=",
"arg1",
"==",
"arg2",
"return",
"equal",
"if",
"ctx",
".",
"EQ",
"(",
")",
"is",
"not",
"None",
"else",
"not",
"equal"
] | expression: expression (EQ | NEQ) expression | [
"expression",
":",
"expression",
"(",
"EQ",
"|",
"NEQ",
")",
"expression"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L461-L473 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitConcatenation | def visitConcatenation(self, ctx):
"""
expression: expression AMPERSAND expression
"""
arg1 = conversions.to_string(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_string(self.visit(ctx.expression(1)), self._eval_context)
return arg1 + arg2 | python | def visitConcatenation(self, ctx):
"""
expression: expression AMPERSAND expression
"""
arg1 = conversions.to_string(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_string(self.visit(ctx.expression(1)), self._eval_context)
return arg1 + arg2 | [
"def",
"visitConcatenation",
"(",
"self",
",",
"ctx",
")",
":",
"arg1",
"=",
"conversions",
".",
"to_string",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"arg2",
"=",
"conversions",
".",
"to_string",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"return",
"arg1",
"+",
"arg2"
] | expression: expression AMPERSAND expression | [
"expression",
":",
"expression",
"AMPERSAND",
"expression"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L475-L481 |
rapidpro/expressions | python/temba_expressions/evaluator.py | ExcellentVisitor.visitContextReference | def visitContextReference(self, ctx):
"""
expression: NAME
"""
identifier = ctx.NAME().getText()
return self._eval_context.resolve_variable(identifier) | python | def visitContextReference(self, ctx):
"""
expression: NAME
"""
identifier = ctx.NAME().getText()
return self._eval_context.resolve_variable(identifier) | [
"def",
"visitContextReference",
"(",
"self",
",",
"ctx",
")",
":",
"identifier",
"=",
"ctx",
".",
"NAME",
"(",
")",
".",
"getText",
"(",
")",
"return",
"self",
".",
"_eval_context",
".",
"resolve_variable",
"(",
"identifier",
")"
] | expression: NAME | [
"expression",
":",
"NAME"
] | train | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L508-L513 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.load_cache | def load_cache(self):
"""Load the cached Zotero data."""
with open(self.cache_path, "rb") as f:
print("Loading cached Zotero data...")
cache = pickle.load(f)
self._references = cache[self.CACHE_REFERENCE_LIST]
self.reference_types = cache[self.CACHE_REFERENCE_TYPES]
self.reference_templates = cache[self.CACHE_REFERENCE_TEMPLATES]
print("Cached Zotero data loaded.") | python | def load_cache(self):
"""Load the cached Zotero data."""
with open(self.cache_path, "rb") as f:
print("Loading cached Zotero data...")
cache = pickle.load(f)
self._references = cache[self.CACHE_REFERENCE_LIST]
self.reference_types = cache[self.CACHE_REFERENCE_TYPES]
self.reference_templates = cache[self.CACHE_REFERENCE_TEMPLATES]
print("Cached Zotero data loaded.") | [
"def",
"load_cache",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"cache_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"print",
"(",
"\"Loading cached Zotero data...\"",
")",
"cache",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"self",
".",
"_references",
"=",
"cache",
"[",
"self",
".",
"CACHE_REFERENCE_LIST",
"]",
"self",
".",
"reference_types",
"=",
"cache",
"[",
"self",
".",
"CACHE_REFERENCE_TYPES",
"]",
"self",
".",
"reference_templates",
"=",
"cache",
"[",
"self",
".",
"CACHE_REFERENCE_TEMPLATES",
"]",
"print",
"(",
"\"Cached Zotero data loaded.\"",
")"
] | Load the cached Zotero data. | [
"Load",
"the",
"cached",
"Zotero",
"data",
"."
] | train | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L38-L46 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.