repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
Metatab/metapack | metapack/jupyter/script.py | ScriptIPython.system_piped | def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1)) | python | def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1)) | [
"def",
"system_piped",
"(",
"self",
",",
"cmd",
")",
":",
"if",
"cmd",
".",
"rstrip",
"(",
")",
".",
"endswith",
"(",
"'&'",
")",
":",
"# this is *far* from a rigorous test",
"# We do not support backgrounding processes because we either use",
"# pexpect or pipes to read from. Users can always just call",
"# os.system() or use ip.system=ip.system_raw",
"# if they really want a background process.",
"raise",
"OSError",
"(",
"\"Background processes not supported.\"",
")",
"# we explicitly do NOT return the subprocess status code, because",
"# a non-None value would trigger :func:`sys.displayhook` calls.",
"# Instead, we store the exit_code in user_ns.",
"self",
".",
"user_ns",
"[",
"'_exit_code'",
"]",
"=",
"system",
"(",
"self",
".",
"var_expand",
"(",
"cmd",
",",
"depth",
"=",
"1",
")",
")"
] | Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text. | [
"Call",
"the",
"given",
"cmd",
"in",
"a",
"subprocess",
"piping",
"stdout",
"/",
"err"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/script.py#L61-L82 |
Metatab/metapack | metapack/jupyter/script.py | ScriptIPython.var_expand | def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth + 1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd | python | def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth + 1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd | [
"def",
"var_expand",
"(",
"self",
",",
"cmd",
",",
"depth",
"=",
"0",
",",
"formatter",
"=",
"DollarFormatter",
"(",
")",
")",
":",
"ns",
"=",
"self",
".",
"user_ns",
".",
"copy",
"(",
")",
"try",
":",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"depth",
"+",
"1",
")",
"except",
"ValueError",
":",
"# This is thrown if there aren't that many frames on the stack,",
"# e.g. if a script called run_line_magic() directly.",
"pass",
"else",
":",
"ns",
".",
"update",
"(",
"frame",
".",
"f_locals",
")",
"try",
":",
"# We have to use .vformat() here, because 'self' is a valid and common",
"# name, and expanding **ns for .format() would make it collide with",
"# the 'self' argument of the method.",
"cmd",
"=",
"formatter",
".",
"vformat",
"(",
"cmd",
",",
"args",
"=",
"[",
"]",
",",
"kwargs",
"=",
"ns",
")",
"except",
"Exception",
":",
"# if formatter couldn't format, just let it go untransformed",
"pass",
"return",
"cmd"
] | Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace. | [
"Expand",
"python",
"variables",
"in",
"a",
"string",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/script.py#L85-L112 |
Metatab/metapack | metapack/jupyter/script.py | ScriptIPython.shebang | def shebang(self, line, cell):
"""Run a cell via a shell command
The `%%script` line is like the #! line of script,
specifying a program (bash, perl, ruby, etc.) with which to run.
The rest of the cell is run by that program.
Examples
--------
::
In [1]: %%script bash
...: for i in 1 2 3; do
...: echo $i
...: done
1
2
3
"""
argv = arg_split(line, posix=not sys.platform.startswith('win'))
args, cmd = self.shebang.parser.parse_known_args(argv)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
print("Couldn't find program: %r" % cmd[0])
return
else:
raise
if not cell.endswith('\n'):
cell += '\n'
cell = cell.encode('utf8', 'replace')
if args.bg:
self.bg_processes.append(p)
self._gc_bg_processes()
if args.out:
self.shell.user_ns[args.out] = p.stdout
if args.err:
self.shell.user_ns[args.err] = p.stderr
self.job_manager.new(self._run_script, p, cell, daemon=True)
if args.proc:
self.shell.user_ns[args.proc] = p
return
try:
out, err = p.communicate(cell)
except KeyboardInterrupt:
try:
p.send_signal(signal.SIGINT)
time.sleep(0.1)
if p.poll() is not None:
print("Process is interrupted.")
return
p.terminate()
time.sleep(0.1)
if p.poll() is not None:
print("Process is terminated.")
return
p.kill()
print("Process is killed.")
except OSError:
pass
except Exception as e:
print("Error while terminating subprocess (pid=%i): %s" \
% (p.pid, e))
return
out = py3compat.bytes_to_str(out)
err = py3compat.bytes_to_str(err)
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush() | python | def shebang(self, line, cell):
"""Run a cell via a shell command
The `%%script` line is like the #! line of script,
specifying a program (bash, perl, ruby, etc.) with which to run.
The rest of the cell is run by that program.
Examples
--------
::
In [1]: %%script bash
...: for i in 1 2 3; do
...: echo $i
...: done
1
2
3
"""
argv = arg_split(line, posix=not sys.platform.startswith('win'))
args, cmd = self.shebang.parser.parse_known_args(argv)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
print("Couldn't find program: %r" % cmd[0])
return
else:
raise
if not cell.endswith('\n'):
cell += '\n'
cell = cell.encode('utf8', 'replace')
if args.bg:
self.bg_processes.append(p)
self._gc_bg_processes()
if args.out:
self.shell.user_ns[args.out] = p.stdout
if args.err:
self.shell.user_ns[args.err] = p.stderr
self.job_manager.new(self._run_script, p, cell, daemon=True)
if args.proc:
self.shell.user_ns[args.proc] = p
return
try:
out, err = p.communicate(cell)
except KeyboardInterrupt:
try:
p.send_signal(signal.SIGINT)
time.sleep(0.1)
if p.poll() is not None:
print("Process is interrupted.")
return
p.terminate()
time.sleep(0.1)
if p.poll() is not None:
print("Process is terminated.")
return
p.kill()
print("Process is killed.")
except OSError:
pass
except Exception as e:
print("Error while terminating subprocess (pid=%i): %s" \
% (p.pid, e))
return
out = py3compat.bytes_to_str(out)
err = py3compat.bytes_to_str(err)
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush() | [
"def",
"shebang",
"(",
"self",
",",
"line",
",",
"cell",
")",
":",
"argv",
"=",
"arg_split",
"(",
"line",
",",
"posix",
"=",
"not",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
")",
"args",
",",
"cmd",
"=",
"self",
".",
"shebang",
".",
"parser",
".",
"parse_known_args",
"(",
"argv",
")",
"try",
":",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"stdin",
"=",
"PIPE",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"print",
"(",
"\"Couldn't find program: %r\"",
"%",
"cmd",
"[",
"0",
"]",
")",
"return",
"else",
":",
"raise",
"if",
"not",
"cell",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"cell",
"+=",
"'\\n'",
"cell",
"=",
"cell",
".",
"encode",
"(",
"'utf8'",
",",
"'replace'",
")",
"if",
"args",
".",
"bg",
":",
"self",
".",
"bg_processes",
".",
"append",
"(",
"p",
")",
"self",
".",
"_gc_bg_processes",
"(",
")",
"if",
"args",
".",
"out",
":",
"self",
".",
"shell",
".",
"user_ns",
"[",
"args",
".",
"out",
"]",
"=",
"p",
".",
"stdout",
"if",
"args",
".",
"err",
":",
"self",
".",
"shell",
".",
"user_ns",
"[",
"args",
".",
"err",
"]",
"=",
"p",
".",
"stderr",
"self",
".",
"job_manager",
".",
"new",
"(",
"self",
".",
"_run_script",
",",
"p",
",",
"cell",
",",
"daemon",
"=",
"True",
")",
"if",
"args",
".",
"proc",
":",
"self",
".",
"shell",
".",
"user_ns",
"[",
"args",
".",
"proc",
"]",
"=",
"p",
"return",
"try",
":",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
"cell",
")",
"except",
"KeyboardInterrupt",
":",
"try",
":",
"p",
".",
"send_signal",
"(",
"signal",
".",
"SIGINT",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"p",
".",
"poll",
"(",
")",
"is",
"not",
"None",
":",
"print",
"(",
"\"Process is interrupted.\"",
")",
"return",
"p",
".",
"terminate",
"(",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"p",
".",
"poll",
"(",
")",
"is",
"not",
"None",
":",
"print",
"(",
"\"Process is terminated.\"",
")",
"return",
"p",
".",
"kill",
"(",
")",
"print",
"(",
"\"Process is killed.\"",
")",
"except",
"OSError",
":",
"pass",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"Error while terminating subprocess (pid=%i): %s\"",
"%",
"(",
"p",
".",
"pid",
",",
"e",
")",
")",
"return",
"out",
"=",
"py3compat",
".",
"bytes_to_str",
"(",
"out",
")",
"err",
"=",
"py3compat",
".",
"bytes_to_str",
"(",
"err",
")",
"if",
"args",
".",
"out",
":",
"self",
".",
"shell",
".",
"user_ns",
"[",
"args",
".",
"out",
"]",
"=",
"out",
"else",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"out",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"args",
".",
"err",
":",
"self",
".",
"shell",
".",
"user_ns",
"[",
"args",
".",
"err",
"]",
"=",
"err",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"err",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")"
] | Run a cell via a shell command
The `%%script` line is like the #! line of script,
specifying a program (bash, perl, ruby, etc.) with which to run.
The rest of the cell is run by that program.
Examples
--------
::
In [1]: %%script bash
...: for i in 1 2 3; do
...: echo $i
...: done
1
2
3 | [
"Run",
"a",
"cell",
"via",
"a",
"shell",
"command"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/script.py#L119-L199 |
project-rig/rig | rig/place_and_route/place/rcm.py | _get_vertices_neighbours | def _get_vertices_neighbours(nets):
"""Generate a listing of each vertex's immedate neighbours in an undirected
interpretation of a graph.
Returns
-------
{vertex: {vertex: weight, ...}), ...}
"""
zero_fn = (lambda: 0)
vertices_neighbours = defaultdict(lambda: defaultdict(zero_fn))
for net in nets:
if net.weight != 0:
for sink in net.sinks:
vertices_neighbours[net.source][sink] += net.weight
vertices_neighbours[sink][net.source] += net.weight
return vertices_neighbours | python | def _get_vertices_neighbours(nets):
"""Generate a listing of each vertex's immedate neighbours in an undirected
interpretation of a graph.
Returns
-------
{vertex: {vertex: weight, ...}), ...}
"""
zero_fn = (lambda: 0)
vertices_neighbours = defaultdict(lambda: defaultdict(zero_fn))
for net in nets:
if net.weight != 0:
for sink in net.sinks:
vertices_neighbours[net.source][sink] += net.weight
vertices_neighbours[sink][net.source] += net.weight
return vertices_neighbours | [
"def",
"_get_vertices_neighbours",
"(",
"nets",
")",
":",
"zero_fn",
"=",
"(",
"lambda",
":",
"0",
")",
"vertices_neighbours",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"zero_fn",
")",
")",
"for",
"net",
"in",
"nets",
":",
"if",
"net",
".",
"weight",
"!=",
"0",
":",
"for",
"sink",
"in",
"net",
".",
"sinks",
":",
"vertices_neighbours",
"[",
"net",
".",
"source",
"]",
"[",
"sink",
"]",
"+=",
"net",
".",
"weight",
"vertices_neighbours",
"[",
"sink",
"]",
"[",
"net",
".",
"source",
"]",
"+=",
"net",
".",
"weight",
"return",
"vertices_neighbours"
] | Generate a listing of each vertex's immedate neighbours in an undirected
interpretation of a graph.
Returns
-------
{vertex: {vertex: weight, ...}), ...} | [
"Generate",
"a",
"listing",
"of",
"each",
"vertex",
"s",
"immedate",
"neighbours",
"in",
"an",
"undirected",
"interpretation",
"of",
"a",
"graph",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L13-L28 |
project-rig/rig | rig/place_and_route/place/rcm.py | _dfs | def _dfs(vertex, vertices_neighbours):
"""Generate all the vertices connected to the supplied vertex in
depth-first-search order.
"""
visited = set()
to_visit = deque([vertex])
while to_visit:
vertex = to_visit.pop()
if vertex not in visited:
yield vertex
visited.add(vertex)
to_visit.extend(vertices_neighbours[vertex]) | python | def _dfs(vertex, vertices_neighbours):
"""Generate all the vertices connected to the supplied vertex in
depth-first-search order.
"""
visited = set()
to_visit = deque([vertex])
while to_visit:
vertex = to_visit.pop()
if vertex not in visited:
yield vertex
visited.add(vertex)
to_visit.extend(vertices_neighbours[vertex]) | [
"def",
"_dfs",
"(",
"vertex",
",",
"vertices_neighbours",
")",
":",
"visited",
"=",
"set",
"(",
")",
"to_visit",
"=",
"deque",
"(",
"[",
"vertex",
"]",
")",
"while",
"to_visit",
":",
"vertex",
"=",
"to_visit",
".",
"pop",
"(",
")",
"if",
"vertex",
"not",
"in",
"visited",
":",
"yield",
"vertex",
"visited",
".",
"add",
"(",
"vertex",
")",
"to_visit",
".",
"extend",
"(",
"vertices_neighbours",
"[",
"vertex",
"]",
")"
] | Generate all the vertices connected to the supplied vertex in
depth-first-search order. | [
"Generate",
"all",
"the",
"vertices",
"connected",
"to",
"the",
"supplied",
"vertex",
"in",
"depth",
"-",
"first",
"-",
"search",
"order",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L31-L42 |
project-rig/rig | rig/place_and_route/place/rcm.py | _get_connected_subgraphs | def _get_connected_subgraphs(vertices, vertices_neighbours):
"""Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
"""
remaining_vertices = set(vertices)
subgraphs = []
while remaining_vertices:
subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours))
remaining_vertices.difference_update(subgraph)
subgraphs.append(subgraph)
return subgraphs | python | def _get_connected_subgraphs(vertices, vertices_neighbours):
"""Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
"""
remaining_vertices = set(vertices)
subgraphs = []
while remaining_vertices:
subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours))
remaining_vertices.difference_update(subgraph)
subgraphs.append(subgraph)
return subgraphs | [
"def",
"_get_connected_subgraphs",
"(",
"vertices",
",",
"vertices_neighbours",
")",
":",
"remaining_vertices",
"=",
"set",
"(",
"vertices",
")",
"subgraphs",
"=",
"[",
"]",
"while",
"remaining_vertices",
":",
"subgraph",
"=",
"set",
"(",
"_dfs",
"(",
"remaining_vertices",
".",
"pop",
"(",
")",
",",
"vertices_neighbours",
")",
")",
"remaining_vertices",
".",
"difference_update",
"(",
"subgraph",
")",
"subgraphs",
".",
"append",
"(",
"subgraph",
")",
"return",
"subgraphs"
] | Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...] | [
"Break",
"a",
"graph",
"containing",
"unconnected",
"subgraphs",
"into",
"a",
"list",
"of",
"connected",
"subgraphs",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L45-L60 |
project-rig/rig | rig/place_and_route/place/rcm.py | _cuthill_mckee | def _cuthill_mckee(vertices, vertices_neighbours):
"""Yield the Cuthill-McKee order for a connected, undirected graph.
`Wikipedia
<https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides
a good introduction to the Cuthill-McKee algorithm. The RCM algorithm
attempts to order vertices in a graph such that their adjacency matrix's
bandwidth is reduced. In brief the RCM algorithm is a breadth-first search
with the following tweaks:
* The search starts from the vertex with the lowest degree.
* Vertices discovered in each layer of the search are sorted by ascending
order of their degree in the output.
.. warning::
This function must not be called on a disconnected or empty graph.
Returns
-------
[vertex, ...]
"""
vertices_degrees = {v: sum(itervalues(vertices_neighbours[v]))
for v in vertices}
peripheral_vertex = min(vertices, key=(lambda v: vertices_degrees[v]))
visited = set([peripheral_vertex])
cm_order = [peripheral_vertex]
previous_layer = set([peripheral_vertex])
while len(cm_order) < len(vertices):
adjacent = set()
for vertex in previous_layer:
adjacent.update(vertices_neighbours[vertex])
adjacent.difference_update(visited)
visited.update(adjacent)
cm_order.extend(sorted(adjacent, key=(lambda v: vertices_degrees[v])))
previous_layer = adjacent
return cm_order | python | def _cuthill_mckee(vertices, vertices_neighbours):
"""Yield the Cuthill-McKee order for a connected, undirected graph.
`Wikipedia
<https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides
a good introduction to the Cuthill-McKee algorithm. The RCM algorithm
attempts to order vertices in a graph such that their adjacency matrix's
bandwidth is reduced. In brief the RCM algorithm is a breadth-first search
with the following tweaks:
* The search starts from the vertex with the lowest degree.
* Vertices discovered in each layer of the search are sorted by ascending
order of their degree in the output.
.. warning::
This function must not be called on a disconnected or empty graph.
Returns
-------
[vertex, ...]
"""
vertices_degrees = {v: sum(itervalues(vertices_neighbours[v]))
for v in vertices}
peripheral_vertex = min(vertices, key=(lambda v: vertices_degrees[v]))
visited = set([peripheral_vertex])
cm_order = [peripheral_vertex]
previous_layer = set([peripheral_vertex])
while len(cm_order) < len(vertices):
adjacent = set()
for vertex in previous_layer:
adjacent.update(vertices_neighbours[vertex])
adjacent.difference_update(visited)
visited.update(adjacent)
cm_order.extend(sorted(adjacent, key=(lambda v: vertices_degrees[v])))
previous_layer = adjacent
return cm_order | [
"def",
"_cuthill_mckee",
"(",
"vertices",
",",
"vertices_neighbours",
")",
":",
"vertices_degrees",
"=",
"{",
"v",
":",
"sum",
"(",
"itervalues",
"(",
"vertices_neighbours",
"[",
"v",
"]",
")",
")",
"for",
"v",
"in",
"vertices",
"}",
"peripheral_vertex",
"=",
"min",
"(",
"vertices",
",",
"key",
"=",
"(",
"lambda",
"v",
":",
"vertices_degrees",
"[",
"v",
"]",
")",
")",
"visited",
"=",
"set",
"(",
"[",
"peripheral_vertex",
"]",
")",
"cm_order",
"=",
"[",
"peripheral_vertex",
"]",
"previous_layer",
"=",
"set",
"(",
"[",
"peripheral_vertex",
"]",
")",
"while",
"len",
"(",
"cm_order",
")",
"<",
"len",
"(",
"vertices",
")",
":",
"adjacent",
"=",
"set",
"(",
")",
"for",
"vertex",
"in",
"previous_layer",
":",
"adjacent",
".",
"update",
"(",
"vertices_neighbours",
"[",
"vertex",
"]",
")",
"adjacent",
".",
"difference_update",
"(",
"visited",
")",
"visited",
".",
"update",
"(",
"adjacent",
")",
"cm_order",
".",
"extend",
"(",
"sorted",
"(",
"adjacent",
",",
"key",
"=",
"(",
"lambda",
"v",
":",
"vertices_degrees",
"[",
"v",
"]",
")",
")",
")",
"previous_layer",
"=",
"adjacent",
"return",
"cm_order"
] | Yield the Cuthill-McKee order for a connected, undirected graph.
`Wikipedia
<https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides
a good introduction to the Cuthill-McKee algorithm. The RCM algorithm
attempts to order vertices in a graph such that their adjacency matrix's
bandwidth is reduced. In brief the RCM algorithm is a breadth-first search
with the following tweaks:
* The search starts from the vertex with the lowest degree.
* Vertices discovered in each layer of the search are sorted by ascending
order of their degree in the output.
.. warning::
This function must not be called on a disconnected or empty graph.
Returns
-------
[vertex, ...] | [
"Yield",
"the",
"Cuthill",
"-",
"McKee",
"order",
"for",
"a",
"connected",
"undirected",
"graph",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L63-L103 |
project-rig/rig | rig/place_and_route/place/rcm.py | rcm_vertex_order | def rcm_vertex_order(vertices_resources, nets):
"""A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer.
"""
vertices_neighbours = _get_vertices_neighbours(nets)
for subgraph_vertices in _get_connected_subgraphs(vertices_resources,
vertices_neighbours):
cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours)
for vertex in reversed(cm_order):
yield vertex | python | def rcm_vertex_order(vertices_resources, nets):
"""A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer.
"""
vertices_neighbours = _get_vertices_neighbours(nets)
for subgraph_vertices in _get_connected_subgraphs(vertices_resources,
vertices_neighbours):
cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours)
for vertex in reversed(cm_order):
yield vertex | [
"def",
"rcm_vertex_order",
"(",
"vertices_resources",
",",
"nets",
")",
":",
"vertices_neighbours",
"=",
"_get_vertices_neighbours",
"(",
"nets",
")",
"for",
"subgraph_vertices",
"in",
"_get_connected_subgraphs",
"(",
"vertices_resources",
",",
"vertices_neighbours",
")",
":",
"cm_order",
"=",
"_cuthill_mckee",
"(",
"subgraph_vertices",
",",
"vertices_neighbours",
")",
"for",
"vertex",
"in",
"reversed",
"(",
"cm_order",
")",
":",
"yield",
"vertex"
] | A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer. | [
"A",
"generator",
"which",
"iterates",
"over",
"the",
"vertices",
"in",
"Reverse",
"-",
"Cuthill",
"-",
"McKee",
"order",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L106-L117 |
project-rig/rig | rig/place_and_route/place/rcm.py | rcm_chip_order | def rcm_chip_order(machine):
"""A generator which iterates over a set of chips in a machine in
Reverse-Cuthill-McKee order.
For use as a chip ordering for the sequential placer.
"""
# Convert the Machine description into a placement-problem-style-graph
# where the vertices are chip coordinate tuples (x, y) and each net
# represents the links leaving each chip. This allows us to re-use the
# rcm_vertex_order function above to generate an RCM ordering of chips in
# the machine.
vertices = list(machine)
nets = []
for (x, y) in vertices:
neighbours = []
for link in Links:
if (x, y, link) in machine:
dx, dy = link.to_vector()
neighbour = ((x + dx) % machine.width,
(y + dy) % machine.height)
# In principle if the link to chip is marked as working, that
# chip should be working. In practice this might not be the
# case (especially for carelessly hand-defined Machine
# objects).
if neighbour in machine:
neighbours.append(neighbour)
nets.append(Net((x, y), neighbours))
return rcm_vertex_order(vertices, nets) | python | def rcm_chip_order(machine):
"""A generator which iterates over a set of chips in a machine in
Reverse-Cuthill-McKee order.
For use as a chip ordering for the sequential placer.
"""
# Convert the Machine description into a placement-problem-style-graph
# where the vertices are chip coordinate tuples (x, y) and each net
# represents the links leaving each chip. This allows us to re-use the
# rcm_vertex_order function above to generate an RCM ordering of chips in
# the machine.
vertices = list(machine)
nets = []
for (x, y) in vertices:
neighbours = []
for link in Links:
if (x, y, link) in machine:
dx, dy = link.to_vector()
neighbour = ((x + dx) % machine.width,
(y + dy) % machine.height)
# In principle if the link to chip is marked as working, that
# chip should be working. In practice this might not be the
# case (especially for carelessly hand-defined Machine
# objects).
if neighbour in machine:
neighbours.append(neighbour)
nets.append(Net((x, y), neighbours))
return rcm_vertex_order(vertices, nets) | [
"def",
"rcm_chip_order",
"(",
"machine",
")",
":",
"# Convert the Machine description into a placement-problem-style-graph",
"# where the vertices are chip coordinate tuples (x, y) and each net",
"# represents the links leaving each chip. This allows us to re-use the",
"# rcm_vertex_order function above to generate an RCM ordering of chips in",
"# the machine.",
"vertices",
"=",
"list",
"(",
"machine",
")",
"nets",
"=",
"[",
"]",
"for",
"(",
"x",
",",
"y",
")",
"in",
"vertices",
":",
"neighbours",
"=",
"[",
"]",
"for",
"link",
"in",
"Links",
":",
"if",
"(",
"x",
",",
"y",
",",
"link",
")",
"in",
"machine",
":",
"dx",
",",
"dy",
"=",
"link",
".",
"to_vector",
"(",
")",
"neighbour",
"=",
"(",
"(",
"x",
"+",
"dx",
")",
"%",
"machine",
".",
"width",
",",
"(",
"y",
"+",
"dy",
")",
"%",
"machine",
".",
"height",
")",
"# In principle if the link to chip is marked as working, that",
"# chip should be working. In practice this might not be the",
"# case (especially for carelessly hand-defined Machine",
"# objects).",
"if",
"neighbour",
"in",
"machine",
":",
"neighbours",
".",
"append",
"(",
"neighbour",
")",
"nets",
".",
"append",
"(",
"Net",
"(",
"(",
"x",
",",
"y",
")",
",",
"neighbours",
")",
")",
"return",
"rcm_vertex_order",
"(",
"vertices",
",",
"nets",
")"
] | A generator which iterates over a set of chips in a machine in
Reverse-Cuthill-McKee order.
For use as a chip ordering for the sequential placer. | [
"A",
"generator",
"which",
"iterates",
"over",
"a",
"set",
"of",
"chips",
"in",
"a",
"machine",
"in",
"Reverse",
"-",
"Cuthill",
"-",
"McKee",
"order",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L120-L149 |
project-rig/rig | rig/place_and_route/place/rcm.py | place | def place(vertices_resources, nets, machine, constraints):
"""Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
rcm_vertex_order(vertices_resources, nets),
rcm_chip_order(machine)) | python | def place(vertices_resources, nets, machine, constraints):
"""Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
rcm_vertex_order(vertices_resources, nets),
rcm_chip_order(machine)) | [
"def",
"place",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
")",
":",
"return",
"sequential_place",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"rcm_vertex_order",
"(",
"vertices_resources",
",",
"nets",
")",
",",
"rcm_chip_order",
"(",
"machine",
")",
")"
] | Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default. | [
"Assigns",
"vertices",
"to",
"chips",
"in",
"Reverse",
"-",
"Cuthill",
"-",
"McKee",
"(",
"RCM",
")",
"order",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rcm.py#L152-L180 |
NicolasLM/spinach | spinach/contrib/datadog.py | register_datadog | def register_datadog(tracer=None, namespace: Optional[str]=None,
service: str='spinach'):
"""Register the Datadog integration.
Exceptions making jobs fail are sent to Sentry.
:param tracer: optionally use a custom ddtrace Tracer instead of the global
one.
:param namespace: optionally only register the Datadog integration for a
particular Spinach :class:`Engine`
:param service: Datadog service associated with the trace, defaults to
`spinach`
"""
if tracer is None:
from ddtrace import tracer
@signals.job_started.connect_via(namespace)
def job_started(namespace, job, **kwargs):
tracer.trace(
'spinach.task', service=service, span_type='worker',
resource=job.task_name
)
@signals.job_finished.connect_via(namespace)
def job_finished(namespace, job, **kwargs):
root_span = tracer.current_root_span()
for attr in job.__slots__:
root_span.set_tag(attr, getattr(job, attr))
root_span.finish()
@signals.job_failed.connect_via(namespace)
def job_failed(namespace, job, **kwargs):
root_span = tracer.current_root_span()
root_span.set_traceback()
@signals.job_schedule_retry.connect_via(namespace)
def job_schedule_retry(namespace, job, **kwargs):
root_span = tracer.current_root_span()
root_span.set_traceback() | python | def register_datadog(tracer=None, namespace: Optional[str]=None,
service: str='spinach'):
"""Register the Datadog integration.
Exceptions making jobs fail are sent to Sentry.
:param tracer: optionally use a custom ddtrace Tracer instead of the global
one.
:param namespace: optionally only register the Datadog integration for a
particular Spinach :class:`Engine`
:param service: Datadog service associated with the trace, defaults to
`spinach`
"""
if tracer is None:
from ddtrace import tracer
@signals.job_started.connect_via(namespace)
def job_started(namespace, job, **kwargs):
tracer.trace(
'spinach.task', service=service, span_type='worker',
resource=job.task_name
)
@signals.job_finished.connect_via(namespace)
def job_finished(namespace, job, **kwargs):
root_span = tracer.current_root_span()
for attr in job.__slots__:
root_span.set_tag(attr, getattr(job, attr))
root_span.finish()
@signals.job_failed.connect_via(namespace)
def job_failed(namespace, job, **kwargs):
root_span = tracer.current_root_span()
root_span.set_traceback()
@signals.job_schedule_retry.connect_via(namespace)
def job_schedule_retry(namespace, job, **kwargs):
root_span = tracer.current_root_span()
root_span.set_traceback() | [
"def",
"register_datadog",
"(",
"tracer",
"=",
"None",
",",
"namespace",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"service",
":",
"str",
"=",
"'spinach'",
")",
":",
"if",
"tracer",
"is",
"None",
":",
"from",
"ddtrace",
"import",
"tracer",
"@",
"signals",
".",
"job_started",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_started",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"tracer",
".",
"trace",
"(",
"'spinach.task'",
",",
"service",
"=",
"service",
",",
"span_type",
"=",
"'worker'",
",",
"resource",
"=",
"job",
".",
"task_name",
")",
"@",
"signals",
".",
"job_finished",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_finished",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"root_span",
"=",
"tracer",
".",
"current_root_span",
"(",
")",
"for",
"attr",
"in",
"job",
".",
"__slots__",
":",
"root_span",
".",
"set_tag",
"(",
"attr",
",",
"getattr",
"(",
"job",
",",
"attr",
")",
")",
"root_span",
".",
"finish",
"(",
")",
"@",
"signals",
".",
"job_failed",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_failed",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"root_span",
"=",
"tracer",
".",
"current_root_span",
"(",
")",
"root_span",
".",
"set_traceback",
"(",
")",
"@",
"signals",
".",
"job_schedule_retry",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_schedule_retry",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"root_span",
"=",
"tracer",
".",
"current_root_span",
"(",
")",
"root_span",
".",
"set_traceback",
"(",
")"
] | Register the Datadog integration.
Exceptions making jobs fail are sent to Sentry.
:param tracer: optionally use a custom ddtrace Tracer instead of the global
one.
:param namespace: optionally only register the Datadog integration for a
particular Spinach :class:`Engine`
:param service: Datadog service associated with the trace, defaults to
`spinach` | [
"Register",
"the",
"Datadog",
"integration",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/contrib/datadog.py#L6-L44 |
Metatab/metapack | metapack/rowgenerator.py | copy_reference | def copy_reference(resource, doc, env, *args, **kwargs):
"""A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term
"""
yield from doc.reference(resource.name) | python | def copy_reference(resource, doc, env, *args, **kwargs):
"""A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term
"""
yield from doc.reference(resource.name) | [
"def",
"copy_reference",
"(",
"resource",
",",
"doc",
",",
"env",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"yield",
"from",
"doc",
".",
"reference",
"(",
"resource",
".",
"name",
")"
] | A row-generating function that yields from a reference. This permits an upstream package to be
copied and modified by this package, while being formally referenced as a dependency
The function will generate rows from a reference that has the same name as the resource term | [
"A",
"row",
"-",
"generating",
"function",
"that",
"yields",
"from",
"a",
"reference",
".",
"This",
"permits",
"an",
"upstream",
"package",
"to",
"be",
"copied",
"and",
"modified",
"by",
"this",
"package",
"while",
"being",
"formally",
"referenced",
"as",
"a",
"dependency"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/rowgenerator.py#L164-L171 |
Metatab/metapack | metapack/rowgenerator.py | copy_reference_group | def copy_reference_group(resource, doc, env, *args, **kwargs):
"""
A Row generating function that copies all of the references that have the same 'Group' argument as this reference
The 'RefArgs' argument is a comma seperated list of arguments from the references that will be prepended to each
row.
:param resource:
:param doc:
:param env:
:param args:
:param kwargs:
:return:
"""
all_headers = []
# Combine all of the headers into a list of tuples by position
for ref in doc.references():
if ref.get_value('Group') == resource.get_value('Group'):
for row in ref.iterrowproxy():
all_headers.append(list(row.keys()))
break
# For each position, add the headers that are not already in the header set.
# this merges the headers from all datasets, maintaining the order. mostly.
headers = []
for e in zip(*all_headers):
for c in set(e):
if c not in headers:
headers.append(c)
if resource.get_value('RefArgs'):
ref_args = [e.strip() for e in resource.get_value('RefArgs').strip().split(',')]
else:
ref_args = []
yield ref_args+headers
for ref in doc.references():
if ref.get_value('Group') == resource.get_value('Group'):
ref_args_values = [ ref.get_value(e) for e in ref_args]
for row in ref.iterdict:
yield ref_args_values + [ row.get(c) for c in headers] | python | def copy_reference_group(resource, doc, env, *args, **kwargs):
"""
A Row generating function that copies all of the references that have the same 'Group' argument as this reference
The 'RefArgs' argument is a comma seperated list of arguments from the references that will be prepended to each
row.
:param resource:
:param doc:
:param env:
:param args:
:param kwargs:
:return:
"""
all_headers = []
# Combine all of the headers into a list of tuples by position
for ref in doc.references():
if ref.get_value('Group') == resource.get_value('Group'):
for row in ref.iterrowproxy():
all_headers.append(list(row.keys()))
break
# For each position, add the headers that are not already in the header set.
# this merges the headers from all datasets, maintaining the order. mostly.
headers = []
for e in zip(*all_headers):
for c in set(e):
if c not in headers:
headers.append(c)
if resource.get_value('RefArgs'):
ref_args = [e.strip() for e in resource.get_value('RefArgs').strip().split(',')]
else:
ref_args = []
yield ref_args+headers
for ref in doc.references():
if ref.get_value('Group') == resource.get_value('Group'):
ref_args_values = [ ref.get_value(e) for e in ref_args]
for row in ref.iterdict:
yield ref_args_values + [ row.get(c) for c in headers] | [
"def",
"copy_reference_group",
"(",
"resource",
",",
"doc",
",",
"env",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"all_headers",
"=",
"[",
"]",
"# Combine all of the headers into a list of tuples by position",
"for",
"ref",
"in",
"doc",
".",
"references",
"(",
")",
":",
"if",
"ref",
".",
"get_value",
"(",
"'Group'",
")",
"==",
"resource",
".",
"get_value",
"(",
"'Group'",
")",
":",
"for",
"row",
"in",
"ref",
".",
"iterrowproxy",
"(",
")",
":",
"all_headers",
".",
"append",
"(",
"list",
"(",
"row",
".",
"keys",
"(",
")",
")",
")",
"break",
"# For each position, add the headers that are not already in the header set.",
"# this merges the headers from all datasets, maintaining the order. mostly.",
"headers",
"=",
"[",
"]",
"for",
"e",
"in",
"zip",
"(",
"*",
"all_headers",
")",
":",
"for",
"c",
"in",
"set",
"(",
"e",
")",
":",
"if",
"c",
"not",
"in",
"headers",
":",
"headers",
".",
"append",
"(",
"c",
")",
"if",
"resource",
".",
"get_value",
"(",
"'RefArgs'",
")",
":",
"ref_args",
"=",
"[",
"e",
".",
"strip",
"(",
")",
"for",
"e",
"in",
"resource",
".",
"get_value",
"(",
"'RefArgs'",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"','",
")",
"]",
"else",
":",
"ref_args",
"=",
"[",
"]",
"yield",
"ref_args",
"+",
"headers",
"for",
"ref",
"in",
"doc",
".",
"references",
"(",
")",
":",
"if",
"ref",
".",
"get_value",
"(",
"'Group'",
")",
"==",
"resource",
".",
"get_value",
"(",
"'Group'",
")",
":",
"ref_args_values",
"=",
"[",
"ref",
".",
"get_value",
"(",
"e",
")",
"for",
"e",
"in",
"ref_args",
"]",
"for",
"row",
"in",
"ref",
".",
"iterdict",
":",
"yield",
"ref_args_values",
"+",
"[",
"row",
".",
"get",
"(",
"c",
")",
"for",
"c",
"in",
"headers",
"]"
] | A Row generating function that copies all of the references that have the same 'Group' argument as this reference
The 'RefArgs' argument is a comma seperated list of arguments from the references that will be prepended to each
row.
:param resource:
:param doc:
:param env:
:param args:
:param kwargs:
:return: | [
"A",
"Row",
"generating",
"function",
"that",
"copies",
"all",
"of",
"the",
"references",
"that",
"have",
"the",
"same",
"Group",
"argument",
"as",
"this",
"reference"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/rowgenerator.py#L173-L218 |
Metatab/metapack | metapack/package/filesystem.py | FileSystemPackageBuilder.is_older_than_metadata | def is_older_than_metadata(self):
"""
Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns
False if the time of either can't be determined
:param path: Optional extra save path, used in save_path()
"""
try:
path = self.doc_file.path
except AttributeError:
path = self.doc_file
source_ref = self._doc.ref.path
try:
age_diff = getmtime(source_ref) - getmtime(path)
return age_diff > 0
except (FileNotFoundError, OSError):
return False | python | def is_older_than_metadata(self):
"""
Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns
False if the time of either can't be determined
:param path: Optional extra save path, used in save_path()
"""
try:
path = self.doc_file.path
except AttributeError:
path = self.doc_file
source_ref = self._doc.ref.path
try:
age_diff = getmtime(source_ref) - getmtime(path)
return age_diff > 0
except (FileNotFoundError, OSError):
return False | [
"def",
"is_older_than_metadata",
"(",
"self",
")",
":",
"try",
":",
"path",
"=",
"self",
".",
"doc_file",
".",
"path",
"except",
"AttributeError",
":",
"path",
"=",
"self",
".",
"doc_file",
"source_ref",
"=",
"self",
".",
"_doc",
".",
"ref",
".",
"path",
"try",
":",
"age_diff",
"=",
"getmtime",
"(",
"source_ref",
")",
"-",
"getmtime",
"(",
"path",
")",
"return",
"age_diff",
">",
"0",
"except",
"(",
"FileNotFoundError",
",",
"OSError",
")",
":",
"return",
"False"
] | Return True if the package save file is older than the metadata. If it is, it should be rebuilt. Returns
False if the time of either can't be determined
:param path: Optional extra save path, used in save_path() | [
"Return",
"True",
"if",
"the",
"package",
"save",
"file",
"is",
"older",
"than",
"the",
"metadata",
".",
"If",
"it",
"is",
"it",
"should",
"be",
"rebuilt",
".",
"Returns",
"False",
"if",
"the",
"time",
"of",
"either",
"can",
"t",
"be",
"determined"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/filesystem.py#L82-L104 |
Metatab/metapack | metapack/package/filesystem.py | FileSystemPackageBuilder._load_resource | def _load_resource(self, source_r, abs_path=False):
"""The CSV package has no resources, so we just need to resolve the URLs to them. Usually, the
CSV package is built from a file system ackage on a publically acessible server. """
from itertools import islice
from metapack.exc import MetapackError
from os.path import splitext
# Refetch the resource ... IIRC b/c the source_r resource may actually be from
# a different package. So r is the resource we want to possibly modify in this package,
# while source_r is from a different souce package, whose data is being loaded into this
# one.
r = self.datafile(source_r.name)
if self.reuse_resources:
self.prt("Re-using data for '{}' ".format(r.name))
else:
self.prt("Loading data for '{}' ".format(r.name))
if not r.name:
raise MetapackError(f"Resource/reference term has no name: {str(r)}")
# Special handing for SQL should not be done here; it should be done in Rowgenerators, probably.
if r.term_is('root.sql'):
new_r = self.doc['Resources'].new_term('Root.Datafile', '')
new_r.name = r.name
self.doc.remove_term(r)
r = new_r
r.url = 'data/' + r.name + '.csv' # Re-writing the URL for the resource.
path = join(self.package_path.path, r.url)
makedirs(dirname(path), exist_ok=True)
if not self.reuse_resources or not exists(path):
if self.reuse_resources:
self.prt("Resource {} doesn't exist, rebuilding".format(path))
if exists(path):
remove(path)
gen = islice(source_r, 1, None)
headers = source_r.headers
self.write_csv(path, headers, gen)
for k, v in source_r.post_iter_meta.items():
r[k] = v
try:
if source_r.errors:
for col_name, errors in source_r.errors.items():
self.warn("ERRORS for column '{}' ".format(col_name))
for e in islice(errors,5):
self.warn(' {}'.format(e))
if len(errors) > 5:
self.warn("... and {} more ".format(len(errors)-5))
except AttributeError:
pass # Maybe generator does not track errors
if source_r.errors:
self.err("Resource processing generated conversion errors")
# Writing between resources so row-generating programs and notebooks can
# access previously created resources. We have to clean the doc before writing it
ref = self._write_doc()
# What a wreck ... we also have to get rid of the 'Transform' values, since the CSV files
# that are written don't need them, and a lot of intermediate processsing ( specifically,
# jupyter Notebooks, ) does not load them.
p = FileSystemPackageBuilder(ref, self.package_root)
p._clean_doc()
ref = p._write_doc() | python | def _load_resource(self, source_r, abs_path=False):
"""The CSV package has no resources, so we just need to resolve the URLs to them. Usually, the
CSV package is built from a file system ackage on a publically acessible server. """
from itertools import islice
from metapack.exc import MetapackError
from os.path import splitext
# Refetch the resource ... IIRC b/c the source_r resource may actually be from
# a different package. So r is the resource we want to possibly modify in this package,
# while source_r is from a different souce package, whose data is being loaded into this
# one.
r = self.datafile(source_r.name)
if self.reuse_resources:
self.prt("Re-using data for '{}' ".format(r.name))
else:
self.prt("Loading data for '{}' ".format(r.name))
if not r.name:
raise MetapackError(f"Resource/reference term has no name: {str(r)}")
# Special handing for SQL should not be done here; it should be done in Rowgenerators, probably.
if r.term_is('root.sql'):
new_r = self.doc['Resources'].new_term('Root.Datafile', '')
new_r.name = r.name
self.doc.remove_term(r)
r = new_r
r.url = 'data/' + r.name + '.csv' # Re-writing the URL for the resource.
path = join(self.package_path.path, r.url)
makedirs(dirname(path), exist_ok=True)
if not self.reuse_resources or not exists(path):
if self.reuse_resources:
self.prt("Resource {} doesn't exist, rebuilding".format(path))
if exists(path):
remove(path)
gen = islice(source_r, 1, None)
headers = source_r.headers
self.write_csv(path, headers, gen)
for k, v in source_r.post_iter_meta.items():
r[k] = v
try:
if source_r.errors:
for col_name, errors in source_r.errors.items():
self.warn("ERRORS for column '{}' ".format(col_name))
for e in islice(errors,5):
self.warn(' {}'.format(e))
if len(errors) > 5:
self.warn("... and {} more ".format(len(errors)-5))
except AttributeError:
pass # Maybe generator does not track errors
if source_r.errors:
self.err("Resource processing generated conversion errors")
# Writing between resources so row-generating programs and notebooks can
# access previously created resources. We have to clean the doc before writing it
ref = self._write_doc()
# What a wreck ... we also have to get rid of the 'Transform' values, since the CSV files
# that are written don't need them, and a lot of intermediate processsing ( specifically,
# jupyter Notebooks, ) does not load them.
p = FileSystemPackageBuilder(ref, self.package_root)
p._clean_doc()
ref = p._write_doc() | [
"def",
"_load_resource",
"(",
"self",
",",
"source_r",
",",
"abs_path",
"=",
"False",
")",
":",
"from",
"itertools",
"import",
"islice",
"from",
"metapack",
".",
"exc",
"import",
"MetapackError",
"from",
"os",
".",
"path",
"import",
"splitext",
"# Refetch the resource ... IIRC b/c the source_r resource may actually be from",
"# a different package. So r is the resource we want to possibly modify in this package,",
"# while source_r is from a different souce package, whose data is being loaded into this",
"# one.",
"r",
"=",
"self",
".",
"datafile",
"(",
"source_r",
".",
"name",
")",
"if",
"self",
".",
"reuse_resources",
":",
"self",
".",
"prt",
"(",
"\"Re-using data for '{}' \"",
".",
"format",
"(",
"r",
".",
"name",
")",
")",
"else",
":",
"self",
".",
"prt",
"(",
"\"Loading data for '{}' \"",
".",
"format",
"(",
"r",
".",
"name",
")",
")",
"if",
"not",
"r",
".",
"name",
":",
"raise",
"MetapackError",
"(",
"f\"Resource/reference term has no name: {str(r)}\"",
")",
"# Special handing for SQL should not be done here; it should be done in Rowgenerators, probably.",
"if",
"r",
".",
"term_is",
"(",
"'root.sql'",
")",
":",
"new_r",
"=",
"self",
".",
"doc",
"[",
"'Resources'",
"]",
".",
"new_term",
"(",
"'Root.Datafile'",
",",
"''",
")",
"new_r",
".",
"name",
"=",
"r",
".",
"name",
"self",
".",
"doc",
".",
"remove_term",
"(",
"r",
")",
"r",
"=",
"new_r",
"r",
".",
"url",
"=",
"'data/'",
"+",
"r",
".",
"name",
"+",
"'.csv'",
"# Re-writing the URL for the resource.",
"path",
"=",
"join",
"(",
"self",
".",
"package_path",
".",
"path",
",",
"r",
".",
"url",
")",
"makedirs",
"(",
"dirname",
"(",
"path",
")",
",",
"exist_ok",
"=",
"True",
")",
"if",
"not",
"self",
".",
"reuse_resources",
"or",
"not",
"exists",
"(",
"path",
")",
":",
"if",
"self",
".",
"reuse_resources",
":",
"self",
".",
"prt",
"(",
"\"Resource {} doesn't exist, rebuilding\"",
".",
"format",
"(",
"path",
")",
")",
"if",
"exists",
"(",
"path",
")",
":",
"remove",
"(",
"path",
")",
"gen",
"=",
"islice",
"(",
"source_r",
",",
"1",
",",
"None",
")",
"headers",
"=",
"source_r",
".",
"headers",
"self",
".",
"write_csv",
"(",
"path",
",",
"headers",
",",
"gen",
")",
"for",
"k",
",",
"v",
"in",
"source_r",
".",
"post_iter_meta",
".",
"items",
"(",
")",
":",
"r",
"[",
"k",
"]",
"=",
"v",
"try",
":",
"if",
"source_r",
".",
"errors",
":",
"for",
"col_name",
",",
"errors",
"in",
"source_r",
".",
"errors",
".",
"items",
"(",
")",
":",
"self",
".",
"warn",
"(",
"\"ERRORS for column '{}' \"",
".",
"format",
"(",
"col_name",
")",
")",
"for",
"e",
"in",
"islice",
"(",
"errors",
",",
"5",
")",
":",
"self",
".",
"warn",
"(",
"' {}'",
".",
"format",
"(",
"e",
")",
")",
"if",
"len",
"(",
"errors",
")",
">",
"5",
":",
"self",
".",
"warn",
"(",
"\"... and {} more \"",
".",
"format",
"(",
"len",
"(",
"errors",
")",
"-",
"5",
")",
")",
"except",
"AttributeError",
":",
"pass",
"# Maybe generator does not track errors",
"if",
"source_r",
".",
"errors",
":",
"self",
".",
"err",
"(",
"\"Resource processing generated conversion errors\"",
")",
"# Writing between resources so row-generating programs and notebooks can",
"# access previously created resources. We have to clean the doc before writing it",
"ref",
"=",
"self",
".",
"_write_doc",
"(",
")",
"# What a wreck ... we also have to get rid of the 'Transform' values, since the CSV files",
"# that are written don't need them, and a lot of intermediate processsing ( specifically,",
"# jupyter Notebooks, ) does not load them.",
"p",
"=",
"FileSystemPackageBuilder",
"(",
"ref",
",",
"self",
".",
"package_root",
")",
"p",
".",
"_clean_doc",
"(",
")",
"ref",
"=",
"p",
".",
"_write_doc",
"(",
")"
] | The CSV package has no resources, so we just need to resolve the URLs to them. Usually, the
CSV package is built from a file system ackage on a publically acessible server. | [
"The",
"CSV",
"package",
"has",
"no",
"resources",
"so",
"we",
"just",
"need",
"to",
"resolve",
"the",
"URLs",
"to",
"them",
".",
"Usually",
"the",
"CSV",
"package",
"is",
"built",
"from",
"a",
"file",
"system",
"ackage",
"on",
"a",
"publically",
"acessible",
"server",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/filesystem.py#L150-L228 |
Metatab/metapack | metapack/package/filesystem.py | FileSystemPackageBuilder._load_documentation | def _load_documentation(self, term, contents, file_name):
"""Load a single documentation entry"""
try:
title = term['title'].value
except KeyError:
self.warn("Documentation has no title, skipping: '{}' ".format(term.value))
return
if term.term_is('Root.Readme'): # This term type has inline content, not a url
package_sub_dir = 'docs'
else:
try:
eu = term.expanded_url
parsed_url = term.parsed_url
except AttributeError:
parsed_url = eu = parse_app_url(term.value)
# Can't used expanded_url here because expansion makes file system URLS absolute.
if eu.proto == 'file' and not parsed_url.path_is_absolute:
package_sub_dir = parsed_url.fspath.parent
else:
package_sub_dir = 'docs'
path = join(self.package_path.path, package_sub_dir, file_name)
self.prt("Loading documentation for '{}', '{}' to '{}' ".format(title, file_name, path))
makedirs(dirname(path), exist_ok=True)
if exists(path):
remove(path)
with open(path, 'wb') as f:
f.write(contents) | python | def _load_documentation(self, term, contents, file_name):
"""Load a single documentation entry"""
try:
title = term['title'].value
except KeyError:
self.warn("Documentation has no title, skipping: '{}' ".format(term.value))
return
if term.term_is('Root.Readme'): # This term type has inline content, not a url
package_sub_dir = 'docs'
else:
try:
eu = term.expanded_url
parsed_url = term.parsed_url
except AttributeError:
parsed_url = eu = parse_app_url(term.value)
# Can't used expanded_url here because expansion makes file system URLS absolute.
if eu.proto == 'file' and not parsed_url.path_is_absolute:
package_sub_dir = parsed_url.fspath.parent
else:
package_sub_dir = 'docs'
path = join(self.package_path.path, package_sub_dir, file_name)
self.prt("Loading documentation for '{}', '{}' to '{}' ".format(title, file_name, path))
makedirs(dirname(path), exist_ok=True)
if exists(path):
remove(path)
with open(path, 'wb') as f:
f.write(contents) | [
"def",
"_load_documentation",
"(",
"self",
",",
"term",
",",
"contents",
",",
"file_name",
")",
":",
"try",
":",
"title",
"=",
"term",
"[",
"'title'",
"]",
".",
"value",
"except",
"KeyError",
":",
"self",
".",
"warn",
"(",
"\"Documentation has no title, skipping: '{}' \"",
".",
"format",
"(",
"term",
".",
"value",
")",
")",
"return",
"if",
"term",
".",
"term_is",
"(",
"'Root.Readme'",
")",
":",
"# This term type has inline content, not a url",
"package_sub_dir",
"=",
"'docs'",
"else",
":",
"try",
":",
"eu",
"=",
"term",
".",
"expanded_url",
"parsed_url",
"=",
"term",
".",
"parsed_url",
"except",
"AttributeError",
":",
"parsed_url",
"=",
"eu",
"=",
"parse_app_url",
"(",
"term",
".",
"value",
")",
"# Can't used expanded_url here because expansion makes file system URLS absolute.",
"if",
"eu",
".",
"proto",
"==",
"'file'",
"and",
"not",
"parsed_url",
".",
"path_is_absolute",
":",
"package_sub_dir",
"=",
"parsed_url",
".",
"fspath",
".",
"parent",
"else",
":",
"package_sub_dir",
"=",
"'docs'",
"path",
"=",
"join",
"(",
"self",
".",
"package_path",
".",
"path",
",",
"package_sub_dir",
",",
"file_name",
")",
"self",
".",
"prt",
"(",
"\"Loading documentation for '{}', '{}' to '{}' \"",
".",
"format",
"(",
"title",
",",
"file_name",
",",
"path",
")",
")",
"makedirs",
"(",
"dirname",
"(",
"path",
")",
",",
"exist_ok",
"=",
"True",
")",
"if",
"exists",
"(",
"path",
")",
":",
"remove",
"(",
"path",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"contents",
")"
] | Load a single documentation entry | [
"Load",
"a",
"single",
"documentation",
"entry"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/filesystem.py#L272-L307 |
narwhaljames/restapi-logging-handler | restapi_logging_handler/restapi_logging_handler.py | serialize | def serialize(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.datetime):
serial = obj.isoformat(sep='T')
return serial
if isinstance(obj, uuid.UUID):
serial = str(obj)
return serial
try:
return obj.__dict__
except AttributeError:
return str(obj)
except Exception as e:
strval = 'unknown obj'
exceptval = 'unknown err'
try:
strval = str(obj)
exceptval = repr(e)
except Exception:
pass
return 'json fail {} {}'.format(exceptval, strval) | python | def serialize(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.datetime):
serial = obj.isoformat(sep='T')
return serial
if isinstance(obj, uuid.UUID):
serial = str(obj)
return serial
try:
return obj.__dict__
except AttributeError:
return str(obj)
except Exception as e:
strval = 'unknown obj'
exceptval = 'unknown err'
try:
strval = str(obj)
exceptval = repr(e)
except Exception:
pass
return 'json fail {} {}'.format(exceptval, strval) | [
"def",
"serialize",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"datetime",
".",
"datetime",
")",
":",
"serial",
"=",
"obj",
".",
"isoformat",
"(",
"sep",
"=",
"'T'",
")",
"return",
"serial",
"if",
"isinstance",
"(",
"obj",
",",
"uuid",
".",
"UUID",
")",
":",
"serial",
"=",
"str",
"(",
"obj",
")",
"return",
"serial",
"try",
":",
"return",
"obj",
".",
"__dict__",
"except",
"AttributeError",
":",
"return",
"str",
"(",
"obj",
")",
"except",
"Exception",
"as",
"e",
":",
"strval",
"=",
"'unknown obj'",
"exceptval",
"=",
"'unknown err'",
"try",
":",
"strval",
"=",
"str",
"(",
"obj",
")",
"exceptval",
"=",
"repr",
"(",
"e",
")",
"except",
"Exception",
":",
"pass",
"return",
"'json fail {} {}'",
".",
"format",
"(",
"exceptval",
",",
"strval",
")"
] | JSON serializer for objects not serializable by default json code | [
"JSON",
"serializer",
"for",
"objects",
"not",
"serializable",
"by",
"default",
"json",
"code"
] | train | https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/restapi_logging_handler.py#L97-L119 |
narwhaljames/restapi-logging-handler | restapi_logging_handler/restapi_logging_handler.py | RestApiHandler._getPayload | def _getPayload(self, record):
"""
The data that will be sent to the RESTful API
"""
try:
# top level payload items
d = record.__dict__
pid = d.pop('process', 'nopid')
tid = d.pop('thread', 'notid')
payload = {
k: v for (k, v) in d.items()
if k in TOP_KEYS
}
# logging meta attributes
payload['meta'] = {
k: v for (k, v) in d.items()
if k in META_KEYS
}
# everything else goes in details
payload['details'] = {
k: simple_json(v) for (k, v) in d.items()
if k not in self.detail_ignore_set
}
payload['log'] = payload.pop('name', 'n/a')
payload['level'] = payload.pop('levelname', 'n/a')
payload['meta']['line'] = payload['meta'].pop('lineno', 'n/a')
payload['message'] = record.getMessage()
tb = self._getTraceback(record)
if tb:
payload['traceback'] = tb
except Exception as e:
payload = {
'level': 'ERROR',
'message': 'could not format',
'exception': repr(e),
}
payload['pid'] = 'p-{}'.format(pid)
payload['tid'] = 't-{}'.format(tid)
return payload | python | def _getPayload(self, record):
"""
The data that will be sent to the RESTful API
"""
try:
# top level payload items
d = record.__dict__
pid = d.pop('process', 'nopid')
tid = d.pop('thread', 'notid')
payload = {
k: v for (k, v) in d.items()
if k in TOP_KEYS
}
# logging meta attributes
payload['meta'] = {
k: v for (k, v) in d.items()
if k in META_KEYS
}
# everything else goes in details
payload['details'] = {
k: simple_json(v) for (k, v) in d.items()
if k not in self.detail_ignore_set
}
payload['log'] = payload.pop('name', 'n/a')
payload['level'] = payload.pop('levelname', 'n/a')
payload['meta']['line'] = payload['meta'].pop('lineno', 'n/a')
payload['message'] = record.getMessage()
tb = self._getTraceback(record)
if tb:
payload['traceback'] = tb
except Exception as e:
payload = {
'level': 'ERROR',
'message': 'could not format',
'exception': repr(e),
}
payload['pid'] = 'p-{}'.format(pid)
payload['tid'] = 't-{}'.format(tid)
return payload | [
"def",
"_getPayload",
"(",
"self",
",",
"record",
")",
":",
"try",
":",
"# top level payload items",
"d",
"=",
"record",
".",
"__dict__",
"pid",
"=",
"d",
".",
"pop",
"(",
"'process'",
",",
"'nopid'",
")",
"tid",
"=",
"d",
".",
"pop",
"(",
"'thread'",
",",
"'notid'",
")",
"payload",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"d",
".",
"items",
"(",
")",
"if",
"k",
"in",
"TOP_KEYS",
"}",
"# logging meta attributes",
"payload",
"[",
"'meta'",
"]",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"d",
".",
"items",
"(",
")",
"if",
"k",
"in",
"META_KEYS",
"}",
"# everything else goes in details",
"payload",
"[",
"'details'",
"]",
"=",
"{",
"k",
":",
"simple_json",
"(",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"d",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"self",
".",
"detail_ignore_set",
"}",
"payload",
"[",
"'log'",
"]",
"=",
"payload",
".",
"pop",
"(",
"'name'",
",",
"'n/a'",
")",
"payload",
"[",
"'level'",
"]",
"=",
"payload",
".",
"pop",
"(",
"'levelname'",
",",
"'n/a'",
")",
"payload",
"[",
"'meta'",
"]",
"[",
"'line'",
"]",
"=",
"payload",
"[",
"'meta'",
"]",
".",
"pop",
"(",
"'lineno'",
",",
"'n/a'",
")",
"payload",
"[",
"'message'",
"]",
"=",
"record",
".",
"getMessage",
"(",
")",
"tb",
"=",
"self",
".",
"_getTraceback",
"(",
"record",
")",
"if",
"tb",
":",
"payload",
"[",
"'traceback'",
"]",
"=",
"tb",
"except",
"Exception",
"as",
"e",
":",
"payload",
"=",
"{",
"'level'",
":",
"'ERROR'",
",",
"'message'",
":",
"'could not format'",
",",
"'exception'",
":",
"repr",
"(",
"e",
")",
",",
"}",
"payload",
"[",
"'pid'",
"]",
"=",
"'p-{}'",
".",
"format",
"(",
"pid",
")",
"payload",
"[",
"'tid'",
"]",
"=",
"'t-{}'",
".",
"format",
"(",
"tid",
")",
"return",
"payload"
] | The data that will be sent to the RESTful API | [
"The",
"data",
"that",
"will",
"be",
"sent",
"to",
"the",
"RESTful",
"API"
] | train | https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/restapi_logging_handler.py#L166-L211 |
narwhaljames/restapi-logging-handler | restapi_logging_handler/restapi_logging_handler.py | RestApiHandler._prepPayload | def _prepPayload(self, record):
"""
record: generated from logger module
This preps the payload to be formatted in whatever content-type is
expected from the RESTful API.
returns: a tuple of the data and the http content-type
"""
payload = self._getPayload(record)
json_data = json.dumps(payload, default=serialize)
return {
'json': (json_data, 'application/json')
}.get(self.content_type, (json_data, 'text/plain')) | python | def _prepPayload(self, record):
"""
record: generated from logger module
This preps the payload to be formatted in whatever content-type is
expected from the RESTful API.
returns: a tuple of the data and the http content-type
"""
payload = self._getPayload(record)
json_data = json.dumps(payload, default=serialize)
return {
'json': (json_data, 'application/json')
}.get(self.content_type, (json_data, 'text/plain')) | [
"def",
"_prepPayload",
"(",
"self",
",",
"record",
")",
":",
"payload",
"=",
"self",
".",
"_getPayload",
"(",
"record",
")",
"json_data",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"serialize",
")",
"return",
"{",
"'json'",
":",
"(",
"json_data",
",",
"'application/json'",
")",
"}",
".",
"get",
"(",
"self",
".",
"content_type",
",",
"(",
"json_data",
",",
"'text/plain'",
")",
")"
] | record: generated from logger module
This preps the payload to be formatted in whatever content-type is
expected from the RESTful API.
returns: a tuple of the data and the http content-type | [
"record",
":",
"generated",
"from",
"logger",
"module",
"This",
"preps",
"the",
"payload",
"to",
"be",
"formatted",
"in",
"whatever",
"content",
"-",
"type",
"is",
"expected",
"from",
"the",
"RESTful",
"API",
"."
] | train | https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/restapi_logging_handler.py#L213-L226 |
narwhaljames/restapi-logging-handler | restapi_logging_handler/restapi_logging_handler.py | RestApiHandler.emit | def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful API
"""
# avoid infinite recursion
if record.name.startswith('requests'):
return
data, header = self._prepPayload(record)
try:
self.session.post(self._getEndpoint(),
data=data,
headers={'content-type': header})
except Exception:
self.handleError(record) | python | def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful API
"""
# avoid infinite recursion
if record.name.startswith('requests'):
return
data, header = self._prepPayload(record)
try:
self.session.post(self._getEndpoint(),
data=data,
headers={'content-type': header})
except Exception:
self.handleError(record) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"# avoid infinite recursion",
"if",
"record",
".",
"name",
".",
"startswith",
"(",
"'requests'",
")",
":",
"return",
"data",
",",
"header",
"=",
"self",
".",
"_prepPayload",
"(",
"record",
")",
"try",
":",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"_getEndpoint",
"(",
")",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"header",
"}",
")",
"except",
"Exception",
":",
"self",
".",
"handleError",
"(",
"record",
")"
] | Override emit() method in handler parent for sending log to RESTful API | [
"Override",
"emit",
"()",
"method",
"in",
"handler",
"parent",
"for",
"sending",
"log",
"to",
"RESTful",
"API"
] | train | https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/restapi_logging_handler.py#L228-L243 |
narwhaljames/restapi-logging-handler | restapi_logging_handler/loggly_handler.py | LogglyHandler._getEndpoint | def _getEndpoint(self, add_tags=None):
"""
Override Build Loggly's RESTful API endpoint
"""
return 'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'.format(
self.custom_token,
self._implodeTags(add_tags=add_tags)
) | python | def _getEndpoint(self, add_tags=None):
"""
Override Build Loggly's RESTful API endpoint
"""
return 'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'.format(
self.custom_token,
self._implodeTags(add_tags=add_tags)
) | [
"def",
"_getEndpoint",
"(",
"self",
",",
"add_tags",
"=",
"None",
")",
":",
"return",
"'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'",
".",
"format",
"(",
"self",
".",
"custom_token",
",",
"self",
".",
"_implodeTags",
"(",
"add_tags",
"=",
"add_tags",
")",
")"
] | Override Build Loggly's RESTful API endpoint | [
"Override",
"Build",
"Loggly",
"s",
"RESTful",
"API",
"endpoint"
] | train | https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/loggly_handler.py#L109-L117 |
narwhaljames/restapi-logging-handler | restapi_logging_handler/loggly_handler.py | LogglyHandler._getPayload | def _getPayload(self, record):
"""
The data that will be sent to loggly.
"""
payload = super(LogglyHandler, self)._getPayload(record)
payload['tags'] = self._implodeTags()
return payload | python | def _getPayload(self, record):
"""
The data that will be sent to loggly.
"""
payload = super(LogglyHandler, self)._getPayload(record)
payload['tags'] = self._implodeTags()
return payload | [
"def",
"_getPayload",
"(",
"self",
",",
"record",
")",
":",
"payload",
"=",
"super",
"(",
"LogglyHandler",
",",
"self",
")",
".",
"_getPayload",
"(",
"record",
")",
"payload",
"[",
"'tags'",
"]",
"=",
"self",
".",
"_implodeTags",
"(",
")",
"return",
"payload"
] | The data that will be sent to loggly. | [
"The",
"data",
"that",
"will",
"be",
"sent",
"to",
"loggly",
"."
] | train | https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/loggly_handler.py#L128-L135 |
narwhaljames/restapi-logging-handler | restapi_logging_handler/loggly_handler.py | LogglyHandler.emit | def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful
API
"""
pid = os.getpid()
if pid != self.pid:
self.pid = pid
self.logs = []
self.timer = self._flushAndRepeatTimer()
atexit.register(self._stopFlushTimer)
# avoid infinite recursion
if record.name.startswith('requests'):
return
self.logs.append(self._prepPayload(record)) | python | def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful
API
"""
pid = os.getpid()
if pid != self.pid:
self.pid = pid
self.logs = []
self.timer = self._flushAndRepeatTimer()
atexit.register(self._stopFlushTimer)
# avoid infinite recursion
if record.name.startswith('requests'):
return
self.logs.append(self._prepPayload(record)) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"if",
"pid",
"!=",
"self",
".",
"pid",
":",
"self",
".",
"pid",
"=",
"pid",
"self",
".",
"logs",
"=",
"[",
"]",
"self",
".",
"timer",
"=",
"self",
".",
"_flushAndRepeatTimer",
"(",
")",
"atexit",
".",
"register",
"(",
"self",
".",
"_stopFlushTimer",
")",
"# avoid infinite recursion",
"if",
"record",
".",
"name",
".",
"startswith",
"(",
"'requests'",
")",
":",
"return",
"self",
".",
"logs",
".",
"append",
"(",
"self",
".",
"_prepPayload",
"(",
"record",
")",
")"
] | Override emit() method in handler parent for sending log to RESTful
API | [
"Override",
"emit",
"()",
"method",
"in",
"handler",
"parent",
"for",
"sending",
"log",
"to",
"RESTful",
"API"
] | train | https://github.com/narwhaljames/restapi-logging-handler/blob/edaedd3e702e68cfd102bc9fbfa4a33e0c002913/restapi_logging_handler/loggly_handler.py#L183-L200 |
Metatab/metapack | metapack/doc.py | MetapackDoc.nonver_name | def nonver_name(self):
"""Return the non versioned name"""
nv = self.as_version(None)
if not nv:
import re
nv = re.sub(r'-[^-]+$', '', self.name)
return nv | python | def nonver_name(self):
"""Return the non versioned name"""
nv = self.as_version(None)
if not nv:
import re
nv = re.sub(r'-[^-]+$', '', self.name)
return nv | [
"def",
"nonver_name",
"(",
"self",
")",
":",
"nv",
"=",
"self",
".",
"as_version",
"(",
"None",
")",
"if",
"not",
"nv",
":",
"import",
"re",
"nv",
"=",
"re",
".",
"sub",
"(",
"r'-[^-]+$'",
",",
"''",
",",
"self",
".",
"name",
")",
"return",
"nv"
] | Return the non versioned name | [
"Return",
"the",
"non",
"versioned",
"name"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L101-L108 |
Metatab/metapack | metapack/doc.py | MetapackDoc.wrappable_term | def wrappable_term(self, term):
"""Return the Root.Description, possibly combining multiple terms.
:return:
"""
return ' '.join(e.value.strip() for e in self['Root'].find(term) if e and e.value) | python | def wrappable_term(self, term):
"""Return the Root.Description, possibly combining multiple terms.
:return:
"""
return ' '.join(e.value.strip() for e in self['Root'].find(term) if e and e.value) | [
"def",
"wrappable_term",
"(",
"self",
",",
"term",
")",
":",
"return",
"' '",
".",
"join",
"(",
"e",
".",
"value",
".",
"strip",
"(",
")",
"for",
"e",
"in",
"self",
"[",
"'Root'",
"]",
".",
"find",
"(",
"term",
")",
"if",
"e",
"and",
"e",
".",
"value",
")"
] | Return the Root.Description, possibly combining multiple terms.
:return: | [
"Return",
"the",
"Root",
".",
"Description",
"possibly",
"combining",
"multiple",
"terms",
".",
":",
"return",
":"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L123-L128 |
Metatab/metapack | metapack/doc.py | MetapackDoc.set_wrappable_term | def set_wrappable_term(self, v, term):
"""Set the Root.Description, possibly splitting long descriptions across multiple terms. """
import textwrap
for t in self['Root'].find(term):
self.remove_term(t)
for l in textwrap.wrap(v, 80):
self['Root'].new_term(term, l) | python | def set_wrappable_term(self, v, term):
"""Set the Root.Description, possibly splitting long descriptions across multiple terms. """
import textwrap
for t in self['Root'].find(term):
self.remove_term(t)
for l in textwrap.wrap(v, 80):
self['Root'].new_term(term, l) | [
"def",
"set_wrappable_term",
"(",
"self",
",",
"v",
",",
"term",
")",
":",
"import",
"textwrap",
"for",
"t",
"in",
"self",
"[",
"'Root'",
"]",
".",
"find",
"(",
"term",
")",
":",
"self",
".",
"remove_term",
"(",
"t",
")",
"for",
"l",
"in",
"textwrap",
".",
"wrap",
"(",
"v",
",",
"80",
")",
":",
"self",
"[",
"'Root'",
"]",
".",
"new_term",
"(",
"term",
",",
"l",
")"
] | Set the Root.Description, possibly splitting long descriptions across multiple terms. | [
"Set",
"the",
"Root",
".",
"Description",
"possibly",
"splitting",
"long",
"descriptions",
"across",
"multiple",
"terms",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L130-L139 |
Metatab/metapack | metapack/doc.py | MetapackDoc.get_lib_module_dict | def get_lib_module_dict(self):
"""Load the 'lib' directory as a python module, so it can be used to provide functions
for rowpipe transforms. This only works filesystem packages"""
from importlib import import_module
if not self.ref:
return {}
u = parse_app_url(self.ref)
if u.scheme == 'file':
if not self.set_sys_path():
return {}
for module_name in self.lib_dir_names:
try:
m = import_module(module_name)
return {k: v for k, v in m.__dict__.items() if not k.startswith('__')}
except ModuleNotFoundError as e:
# We need to know if it is the datapackage's module that is missing
# or if it is a module that it imported
if not module_name in str(e):
raise # If not our module, it's a real error.
continue
else:
return {} | python | def get_lib_module_dict(self):
"""Load the 'lib' directory as a python module, so it can be used to provide functions
for rowpipe transforms. This only works filesystem packages"""
from importlib import import_module
if not self.ref:
return {}
u = parse_app_url(self.ref)
if u.scheme == 'file':
if not self.set_sys_path():
return {}
for module_name in self.lib_dir_names:
try:
m = import_module(module_name)
return {k: v for k, v in m.__dict__.items() if not k.startswith('__')}
except ModuleNotFoundError as e:
# We need to know if it is the datapackage's module that is missing
# or if it is a module that it imported
if not module_name in str(e):
raise # If not our module, it's a real error.
continue
else:
return {} | [
"def",
"get_lib_module_dict",
"(",
"self",
")",
":",
"from",
"importlib",
"import",
"import_module",
"if",
"not",
"self",
".",
"ref",
":",
"return",
"{",
"}",
"u",
"=",
"parse_app_url",
"(",
"self",
".",
"ref",
")",
"if",
"u",
".",
"scheme",
"==",
"'file'",
":",
"if",
"not",
"self",
".",
"set_sys_path",
"(",
")",
":",
"return",
"{",
"}",
"for",
"module_name",
"in",
"self",
".",
"lib_dir_names",
":",
"try",
":",
"m",
"=",
"import_module",
"(",
"module_name",
")",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"m",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"not",
"k",
".",
"startswith",
"(",
"'__'",
")",
"}",
"except",
"ModuleNotFoundError",
"as",
"e",
":",
"# We need to know if it is the datapackage's module that is missing",
"# or if it is a module that it imported",
"if",
"not",
"module_name",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"# If not our module, it's a real error.",
"continue",
"else",
":",
"return",
"{",
"}"
] | Load the 'lib' directory as a python module, so it can be used to provide functions
for rowpipe transforms. This only works filesystem packages | [
"Load",
"the",
"lib",
"directory",
"as",
"a",
"python",
"module",
"so",
"it",
"can",
"be",
"used",
"to",
"provide",
"functions",
"for",
"rowpipe",
"transforms",
".",
"This",
"only",
"works",
"filesystem",
"packages"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L187-L219 |
Metatab/metapack | metapack/doc.py | MetapackDoc._repr_html_ | def _repr_html_(self, **kwargs):
"""Produce HTML for Jupyter Notebook"""
from jinja2 import Template
from markdown import markdown as convert_markdown
extensions = [
'markdown.extensions.extra',
'markdown.extensions.admonition'
]
return convert_markdown(self.markdown, extensions) | python | def _repr_html_(self, **kwargs):
"""Produce HTML for Jupyter Notebook"""
from jinja2 import Template
from markdown import markdown as convert_markdown
extensions = [
'markdown.extensions.extra',
'markdown.extensions.admonition'
]
return convert_markdown(self.markdown, extensions) | [
"def",
"_repr_html_",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"jinja2",
"import",
"Template",
"from",
"markdown",
"import",
"markdown",
"as",
"convert_markdown",
"extensions",
"=",
"[",
"'markdown.extensions.extra'",
",",
"'markdown.extensions.admonition'",
"]",
"return",
"convert_markdown",
"(",
"self",
".",
"markdown",
",",
"extensions",
")"
] | Produce HTML for Jupyter Notebook | [
"Produce",
"HTML",
"for",
"Jupyter",
"Notebook"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L233-L243 |
Metatab/metapack | metapack/doc.py | MetapackDoc.write_csv | def write_csv(self, path=None):
"""Write CSV file. Sorts the sections before calling the superclass write_csv"""
# Sort the Sections
self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema'])
# Sort Terms in the root section
# Re-wrap the description and abstract
if self.description:
self.description = self.description
if self.abstract:
self.description = self.abstract
t = self['Root'].get_or_new_term('Root.Modified')
t.value = datetime_now()
self.sort_by_term()
return super().write_csv(str(path)) | python | def write_csv(self, path=None):
"""Write CSV file. Sorts the sections before calling the superclass write_csv"""
# Sort the Sections
self.sort_sections(['Root', 'Contacts', 'Documentation', 'References', 'Resources', 'Citations', 'Schema'])
# Sort Terms in the root section
# Re-wrap the description and abstract
if self.description:
self.description = self.description
if self.abstract:
self.description = self.abstract
t = self['Root'].get_or_new_term('Root.Modified')
t.value = datetime_now()
self.sort_by_term()
return super().write_csv(str(path)) | [
"def",
"write_csv",
"(",
"self",
",",
"path",
"=",
"None",
")",
":",
"# Sort the Sections",
"self",
".",
"sort_sections",
"(",
"[",
"'Root'",
",",
"'Contacts'",
",",
"'Documentation'",
",",
"'References'",
",",
"'Resources'",
",",
"'Citations'",
",",
"'Schema'",
"]",
")",
"# Sort Terms in the root section",
"# Re-wrap the description and abstract",
"if",
"self",
".",
"description",
":",
"self",
".",
"description",
"=",
"self",
".",
"description",
"if",
"self",
".",
"abstract",
":",
"self",
".",
"description",
"=",
"self",
".",
"abstract",
"t",
"=",
"self",
"[",
"'Root'",
"]",
".",
"get_or_new_term",
"(",
"'Root.Modified'",
")",
"t",
".",
"value",
"=",
"datetime_now",
"(",
")",
"self",
".",
"sort_by_term",
"(",
")",
"return",
"super",
"(",
")",
".",
"write_csv",
"(",
"str",
"(",
"path",
")",
")"
] | Write CSV file. Sorts the sections before calling the superclass write_csv | [
"Write",
"CSV",
"file",
".",
"Sorts",
"the",
"sections",
"before",
"calling",
"the",
"superclass",
"write_csv"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L283-L304 |
project-rig/rig | rig/wizard.py | dimensions_wizard | def dimensions_wizard():
"""A wizard which attempts to determine the dimensions of a SpiNNaker
system.
.. warning::
Since SC&MP v2.0.0 it is not necessary to know the dimensions of a
SpiNNaker machine in order to boot it. As a result, most applications
will no longer require this wizard step.
Returns ``{"dimensions": (x, y)}`` via the :py:exc:`~rig.wizard.Success`
exception.
"""
option = yield MultipleChoice(
"What type of SpiNNaker system to you have?",
["A single four-chip 'SpiNN-3' board",
"A single forty-eight-chip 'SpiNN-5' board",
"Multiple forty-eight-chip 'SpiNN-5' boards",
"Other"],
None)
assert 0 <= option < 4
if option == 0:
raise Success({"dimensions": (2, 2)})
elif option == 1:
raise Success({"dimensions": (8, 8)})
elif option == 2:
# Infer the system's dimensions from the number of boards supplied
num_boards = yield Text("How many 'SpiNN-5' boards are in the system?")
try:
w, h = standard_system_dimensions(int(num_boards))
except ValueError:
# May fail due to integer conversion or the function rejecting the
# number of boards.
raise Failure(
"'{}' is not a valid number of boards.".format(num_boards))
raise Success({"dimensions": (w, h)})
else:
dimensions = yield Text(
"What are the dimensions of the network in chips (e.g. 24x12)?")
match = re.match(r"\s*(\d+)\s*[xX]\s*(\d+)\s*", dimensions)
if not match:
raise Failure("'{}' is not a valid system size.".format(
dimensions))
else:
w = int(match.group(1))
h = int(match.group(2))
raise Success({"dimensions": (w, h)}) | python | def dimensions_wizard():
"""A wizard which attempts to determine the dimensions of a SpiNNaker
system.
.. warning::
Since SC&MP v2.0.0 it is not necessary to know the dimensions of a
SpiNNaker machine in order to boot it. As a result, most applications
will no longer require this wizard step.
Returns ``{"dimensions": (x, y)}`` via the :py:exc:`~rig.wizard.Success`
exception.
"""
option = yield MultipleChoice(
"What type of SpiNNaker system to you have?",
["A single four-chip 'SpiNN-3' board",
"A single forty-eight-chip 'SpiNN-5' board",
"Multiple forty-eight-chip 'SpiNN-5' boards",
"Other"],
None)
assert 0 <= option < 4
if option == 0:
raise Success({"dimensions": (2, 2)})
elif option == 1:
raise Success({"dimensions": (8, 8)})
elif option == 2:
# Infer the system's dimensions from the number of boards supplied
num_boards = yield Text("How many 'SpiNN-5' boards are in the system?")
try:
w, h = standard_system_dimensions(int(num_boards))
except ValueError:
# May fail due to integer conversion or the function rejecting the
# number of boards.
raise Failure(
"'{}' is not a valid number of boards.".format(num_boards))
raise Success({"dimensions": (w, h)})
else:
dimensions = yield Text(
"What are the dimensions of the network in chips (e.g. 24x12)?")
match = re.match(r"\s*(\d+)\s*[xX]\s*(\d+)\s*", dimensions)
if not match:
raise Failure("'{}' is not a valid system size.".format(
dimensions))
else:
w = int(match.group(1))
h = int(match.group(2))
raise Success({"dimensions": (w, h)}) | [
"def",
"dimensions_wizard",
"(",
")",
":",
"option",
"=",
"yield",
"MultipleChoice",
"(",
"\"What type of SpiNNaker system to you have?\"",
",",
"[",
"\"A single four-chip 'SpiNN-3' board\"",
",",
"\"A single forty-eight-chip 'SpiNN-5' board\"",
",",
"\"Multiple forty-eight-chip 'SpiNN-5' boards\"",
",",
"\"Other\"",
"]",
",",
"None",
")",
"assert",
"0",
"<=",
"option",
"<",
"4",
"if",
"option",
"==",
"0",
":",
"raise",
"Success",
"(",
"{",
"\"dimensions\"",
":",
"(",
"2",
",",
"2",
")",
"}",
")",
"elif",
"option",
"==",
"1",
":",
"raise",
"Success",
"(",
"{",
"\"dimensions\"",
":",
"(",
"8",
",",
"8",
")",
"}",
")",
"elif",
"option",
"==",
"2",
":",
"# Infer the system's dimensions from the number of boards supplied",
"num_boards",
"=",
"yield",
"Text",
"(",
"\"How many 'SpiNN-5' boards are in the system?\"",
")",
"try",
":",
"w",
",",
"h",
"=",
"standard_system_dimensions",
"(",
"int",
"(",
"num_boards",
")",
")",
"except",
"ValueError",
":",
"# May fail due to integer conversion or the function rejecting the",
"# number of boards.",
"raise",
"Failure",
"(",
"\"'{}' is not a valid number of boards.\"",
".",
"format",
"(",
"num_boards",
")",
")",
"raise",
"Success",
"(",
"{",
"\"dimensions\"",
":",
"(",
"w",
",",
"h",
")",
"}",
")",
"else",
":",
"dimensions",
"=",
"yield",
"Text",
"(",
"\"What are the dimensions of the network in chips (e.g. 24x12)?\"",
")",
"match",
"=",
"re",
".",
"match",
"(",
"r\"\\s*(\\d+)\\s*[xX]\\s*(\\d+)\\s*\"",
",",
"dimensions",
")",
"if",
"not",
"match",
":",
"raise",
"Failure",
"(",
"\"'{}' is not a valid system size.\"",
".",
"format",
"(",
"dimensions",
")",
")",
"else",
":",
"w",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"h",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"raise",
"Success",
"(",
"{",
"\"dimensions\"",
":",
"(",
"w",
",",
"h",
")",
"}",
")"
] | A wizard which attempts to determine the dimensions of a SpiNNaker
system.
.. warning::
Since SC&MP v2.0.0 it is not necessary to know the dimensions of a
SpiNNaker machine in order to boot it. As a result, most applications
will no longer require this wizard step.
Returns ``{"dimensions": (x, y)}`` via the :py:exc:`~rig.wizard.Success`
exception. | [
"A",
"wizard",
"which",
"attempts",
"to",
"determine",
"the",
"dimensions",
"of",
"a",
"SpiNNaker",
"system",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L90-L137 |
project-rig/rig | rig/wizard.py | ip_address_wizard | def ip_address_wizard():
"""A wizard which attempts to determine the IP of a SpiNNaker system.
Returns ``{"ip_address": "..."}`` via the :py:exc:`~rig.wizard.Success`
exception.
"""
option = yield MultipleChoice(
"Would you like to auto-detect the SpiNNaker system's IP address?",
["Auto-detect",
"Manually Enter IP address or hostname"],
0)
assert 0 <= option < 2
if option == 0:
yield Prompt(
"Make sure the SpiNNaker system is switched on and is not booted.")
yield Info("Discovering attached SpiNNaker systems...")
ip_address = listen()
if ip_address is None:
raise Failure(
"Did not discover a locally connected SpiNNaker system.")
elif option == 1: # pragma: no branch
ip_address = yield Text(
"What is the IP address or hostname of the SpiNNaker system?")
if ip_address == "":
raise Failure("No IP address or hostname entered")
raise Success({"ip_address": ip_address}) | python | def ip_address_wizard():
"""A wizard which attempts to determine the IP of a SpiNNaker system.
Returns ``{"ip_address": "..."}`` via the :py:exc:`~rig.wizard.Success`
exception.
"""
option = yield MultipleChoice(
"Would you like to auto-detect the SpiNNaker system's IP address?",
["Auto-detect",
"Manually Enter IP address or hostname"],
0)
assert 0 <= option < 2
if option == 0:
yield Prompt(
"Make sure the SpiNNaker system is switched on and is not booted.")
yield Info("Discovering attached SpiNNaker systems...")
ip_address = listen()
if ip_address is None:
raise Failure(
"Did not discover a locally connected SpiNNaker system.")
elif option == 1: # pragma: no branch
ip_address = yield Text(
"What is the IP address or hostname of the SpiNNaker system?")
if ip_address == "":
raise Failure("No IP address or hostname entered")
raise Success({"ip_address": ip_address}) | [
"def",
"ip_address_wizard",
"(",
")",
":",
"option",
"=",
"yield",
"MultipleChoice",
"(",
"\"Would you like to auto-detect the SpiNNaker system's IP address?\"",
",",
"[",
"\"Auto-detect\"",
",",
"\"Manually Enter IP address or hostname\"",
"]",
",",
"0",
")",
"assert",
"0",
"<=",
"option",
"<",
"2",
"if",
"option",
"==",
"0",
":",
"yield",
"Prompt",
"(",
"\"Make sure the SpiNNaker system is switched on and is not booted.\"",
")",
"yield",
"Info",
"(",
"\"Discovering attached SpiNNaker systems...\"",
")",
"ip_address",
"=",
"listen",
"(",
")",
"if",
"ip_address",
"is",
"None",
":",
"raise",
"Failure",
"(",
"\"Did not discover a locally connected SpiNNaker system.\"",
")",
"elif",
"option",
"==",
"1",
":",
"# pragma: no branch",
"ip_address",
"=",
"yield",
"Text",
"(",
"\"What is the IP address or hostname of the SpiNNaker system?\"",
")",
"if",
"ip_address",
"==",
"\"\"",
":",
"raise",
"Failure",
"(",
"\"No IP address or hostname entered\"",
")",
"raise",
"Success",
"(",
"{",
"\"ip_address\"",
":",
"ip_address",
"}",
")"
] | A wizard which attempts to determine the IP of a SpiNNaker system.
Returns ``{"ip_address": "..."}`` via the :py:exc:`~rig.wizard.Success`
exception. | [
"A",
"wizard",
"which",
"attempts",
"to",
"determine",
"the",
"IP",
"of",
"a",
"SpiNNaker",
"system",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L140-L167 |
project-rig/rig | rig/wizard.py | cat | def cat(*wizards):
"""A higher-order wizard which is the concatenation of a number of other
wizards.
The resulting data is the union of all wizard outputs.
"""
data = {}
for wizard in wizards:
try:
response = None
while True:
response = yield wizard.send(response)
except Success as s:
data.update(s.data)
raise Success(data) | python | def cat(*wizards):
"""A higher-order wizard which is the concatenation of a number of other
wizards.
The resulting data is the union of all wizard outputs.
"""
data = {}
for wizard in wizards:
try:
response = None
while True:
response = yield wizard.send(response)
except Success as s:
data.update(s.data)
raise Success(data) | [
"def",
"cat",
"(",
"*",
"wizards",
")",
":",
"data",
"=",
"{",
"}",
"for",
"wizard",
"in",
"wizards",
":",
"try",
":",
"response",
"=",
"None",
"while",
"True",
":",
"response",
"=",
"yield",
"wizard",
".",
"send",
"(",
"response",
")",
"except",
"Success",
"as",
"s",
":",
"data",
".",
"update",
"(",
"s",
".",
"data",
")",
"raise",
"Success",
"(",
"data",
")"
] | A higher-order wizard which is the concatenation of a number of other
wizards.
The resulting data is the union of all wizard outputs. | [
"A",
"higher",
"-",
"order",
"wizard",
"which",
"is",
"the",
"concatenation",
"of",
"a",
"number",
"of",
"other",
"wizards",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L170-L186 |
project-rig/rig | rig/wizard.py | cli_wrapper | def cli_wrapper(generator):
"""Given a wizard, implements an interactive command-line human-friendly
interface for it.
Parameters
----------
generator
A generator such as one created by calling
:py:func:`rig.wizard.wizard_generator`.
Returns
-------
dict or None
Returns a dictionary containing the results of the wizard or None if
the wizard failed.
"""
first = True
response = None
while True:
# Insert blank lines between prompts
if not first:
print()
first = False
try:
message = generator.send(response)
if isinstance(message, MultipleChoice):
print(message.question)
for num, choice in enumerate(message.options):
print(" {}: {}".format(num, choice))
option = input("Select an option 0-{}{}: ".format(
len(message.options) - 1,
" (default: {})".format(message.default)
if message.default is not None else ""))
if option == "" and message.default is not None:
option = message.default
try:
response = int(option)
except ValueError:
response = -1
if not (0 <= response < len(message.options)):
print("ERROR: {} is not a valid option.".format(option))
return None
elif isinstance(message, Text):
print(message.question)
response = input("> ")
elif isinstance(message, Prompt):
print(message.message)
input("<Press enter to continue>")
response = None
elif isinstance(message, Info): # pragma: no branch
print(message.message)
response = None
except Failure as f:
print("ERROR: {}".format(str(f)))
return None
except Success as s:
return s.data | python | def cli_wrapper(generator):
"""Given a wizard, implements an interactive command-line human-friendly
interface for it.
Parameters
----------
generator
A generator such as one created by calling
:py:func:`rig.wizard.wizard_generator`.
Returns
-------
dict or None
Returns a dictionary containing the results of the wizard or None if
the wizard failed.
"""
first = True
response = None
while True:
# Insert blank lines between prompts
if not first:
print()
first = False
try:
message = generator.send(response)
if isinstance(message, MultipleChoice):
print(message.question)
for num, choice in enumerate(message.options):
print(" {}: {}".format(num, choice))
option = input("Select an option 0-{}{}: ".format(
len(message.options) - 1,
" (default: {})".format(message.default)
if message.default is not None else ""))
if option == "" and message.default is not None:
option = message.default
try:
response = int(option)
except ValueError:
response = -1
if not (0 <= response < len(message.options)):
print("ERROR: {} is not a valid option.".format(option))
return None
elif isinstance(message, Text):
print(message.question)
response = input("> ")
elif isinstance(message, Prompt):
print(message.message)
input("<Press enter to continue>")
response = None
elif isinstance(message, Info): # pragma: no branch
print(message.message)
response = None
except Failure as f:
print("ERROR: {}".format(str(f)))
return None
except Success as s:
return s.data | [
"def",
"cli_wrapper",
"(",
"generator",
")",
":",
"first",
"=",
"True",
"response",
"=",
"None",
"while",
"True",
":",
"# Insert blank lines between prompts",
"if",
"not",
"first",
":",
"print",
"(",
")",
"first",
"=",
"False",
"try",
":",
"message",
"=",
"generator",
".",
"send",
"(",
"response",
")",
"if",
"isinstance",
"(",
"message",
",",
"MultipleChoice",
")",
":",
"print",
"(",
"message",
".",
"question",
")",
"for",
"num",
",",
"choice",
"in",
"enumerate",
"(",
"message",
".",
"options",
")",
":",
"print",
"(",
"\" {}: {}\"",
".",
"format",
"(",
"num",
",",
"choice",
")",
")",
"option",
"=",
"input",
"(",
"\"Select an option 0-{}{}: \"",
".",
"format",
"(",
"len",
"(",
"message",
".",
"options",
")",
"-",
"1",
",",
"\" (default: {})\"",
".",
"format",
"(",
"message",
".",
"default",
")",
"if",
"message",
".",
"default",
"is",
"not",
"None",
"else",
"\"\"",
")",
")",
"if",
"option",
"==",
"\"\"",
"and",
"message",
".",
"default",
"is",
"not",
"None",
":",
"option",
"=",
"message",
".",
"default",
"try",
":",
"response",
"=",
"int",
"(",
"option",
")",
"except",
"ValueError",
":",
"response",
"=",
"-",
"1",
"if",
"not",
"(",
"0",
"<=",
"response",
"<",
"len",
"(",
"message",
".",
"options",
")",
")",
":",
"print",
"(",
"\"ERROR: {} is not a valid option.\"",
".",
"format",
"(",
"option",
")",
")",
"return",
"None",
"elif",
"isinstance",
"(",
"message",
",",
"Text",
")",
":",
"print",
"(",
"message",
".",
"question",
")",
"response",
"=",
"input",
"(",
"\"> \"",
")",
"elif",
"isinstance",
"(",
"message",
",",
"Prompt",
")",
":",
"print",
"(",
"message",
".",
"message",
")",
"input",
"(",
"\"<Press enter to continue>\"",
")",
"response",
"=",
"None",
"elif",
"isinstance",
"(",
"message",
",",
"Info",
")",
":",
"# pragma: no branch",
"print",
"(",
"message",
".",
"message",
")",
"response",
"=",
"None",
"except",
"Failure",
"as",
"f",
":",
"print",
"(",
"\"ERROR: {}\"",
".",
"format",
"(",
"str",
"(",
"f",
")",
")",
")",
"return",
"None",
"except",
"Success",
"as",
"s",
":",
"return",
"s",
".",
"data"
] | Given a wizard, implements an interactive command-line human-friendly
interface for it.
Parameters
----------
generator
A generator such as one created by calling
:py:func:`rig.wizard.wizard_generator`.
Returns
-------
dict or None
Returns a dictionary containing the results of the wizard or None if
the wizard failed. | [
"Given",
"a",
"wizard",
"implements",
"an",
"interactive",
"command",
"-",
"line",
"human",
"-",
"friendly",
"interface",
"for",
"it",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/wizard.py#L189-L250 |
project-rig/rig | rig/place_and_route/place/sa/python_kernel.py | _net_cost | def _net_cost(net, placements, has_wrap_around_links, machine):
"""Get the cost of a given net.
This function, in principle at least, should estimate the total network
resources consumed by the given net. In practice this estimate is based on
the size of the bounding-box of the net (i.e. HPWL). This should be
improved at some later time to better account for the effects of large
fan-outs.
Parameters
----------
net : :py:class:`rig.netlist.Net`
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float
"""
# This function is by far the hottest code in the entire algorithm, as a
# result, small performance improvements in here can have significant
# impact on the runtime of the overall algorithm. As an unfortunate side
# effect, this code is rather ugly since many higher-level constructs (e.g.
# min/max) are outrageously slow.
# XXX: This does not account for the hexagonal properties of the SpiNNaker
# topology.
if has_wrap_around_links:
# When wrap-around links exist, we find the minimal bounding box and
# return the HPWL weighted by the net weight. To do this the largest
# gap between any pair of vertices is found::
#
# | x x x |
# ^-------------^
# max gap
#
# The minimal bounding box then goes the other way around::
#
# | x x x |
# ----------^ ^---
# First we collect the x and y coordinates of all vertices in the net
# into a pair of (sorted) lists, xs and ys.
x, y = placements[net.source]
num_vertices = len(net.sinks) + 1
xs = [x] * num_vertices
ys = [y] * num_vertices
i = 1
for v in net.sinks:
x, y = placements[v]
xs[i] = x
ys[i] = y
i += 1
xs.sort()
ys.sort()
# The minimal bounding box is then found as above.
x_max_delta = 0
last_x = xs[-1] - machine.width
for x in xs:
delta = x - last_x
last_x = x
if delta > x_max_delta:
x_max_delta = delta
y_max_delta = 0
last_y = ys[-1] - machine.height
for y in ys:
delta = y - last_y
last_y = y
if delta > y_max_delta:
y_max_delta = delta
return (((machine.width - x_max_delta) +
(machine.height - y_max_delta)) *
net.weight *
math.sqrt(len(net.sinks) + 1))
else:
# When no wrap-around links, find the bounding box around the vertices
# in the net and return the HPWL weighted by the net weight.
x1, y1 = x2, y2 = placements[net.source]
for vertex in net.sinks:
x, y = placements[vertex]
x1 = x if x < x1 else x1
y1 = y if y < y1 else y1
x2 = x if x > x2 else x2
y2 = y if y > y2 else y2
return (((x2 - x1) + (y2 - y1)) *
float(net.weight) *
math.sqrt(len(net.sinks) + 1)) | python | def _net_cost(net, placements, has_wrap_around_links, machine):
"""Get the cost of a given net.
This function, in principle at least, should estimate the total network
resources consumed by the given net. In practice this estimate is based on
the size of the bounding-box of the net (i.e. HPWL). This should be
improved at some later time to better account for the effects of large
fan-outs.
Parameters
----------
net : :py:class:`rig.netlist.Net`
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float
"""
# This function is by far the hottest code in the entire algorithm, as a
# result, small performance improvements in here can have significant
# impact on the runtime of the overall algorithm. As an unfortunate side
# effect, this code is rather ugly since many higher-level constructs (e.g.
# min/max) are outrageously slow.
# XXX: This does not account for the hexagonal properties of the SpiNNaker
# topology.
if has_wrap_around_links:
# When wrap-around links exist, we find the minimal bounding box and
# return the HPWL weighted by the net weight. To do this the largest
# gap between any pair of vertices is found::
#
# | x x x |
# ^-------------^
# max gap
#
# The minimal bounding box then goes the other way around::
#
# | x x x |
# ----------^ ^---
# First we collect the x and y coordinates of all vertices in the net
# into a pair of (sorted) lists, xs and ys.
x, y = placements[net.source]
num_vertices = len(net.sinks) + 1
xs = [x] * num_vertices
ys = [y] * num_vertices
i = 1
for v in net.sinks:
x, y = placements[v]
xs[i] = x
ys[i] = y
i += 1
xs.sort()
ys.sort()
# The minimal bounding box is then found as above.
x_max_delta = 0
last_x = xs[-1] - machine.width
for x in xs:
delta = x - last_x
last_x = x
if delta > x_max_delta:
x_max_delta = delta
y_max_delta = 0
last_y = ys[-1] - machine.height
for y in ys:
delta = y - last_y
last_y = y
if delta > y_max_delta:
y_max_delta = delta
return (((machine.width - x_max_delta) +
(machine.height - y_max_delta)) *
net.weight *
math.sqrt(len(net.sinks) + 1))
else:
# When no wrap-around links, find the bounding box around the vertices
# in the net and return the HPWL weighted by the net weight.
x1, y1 = x2, y2 = placements[net.source]
for vertex in net.sinks:
x, y = placements[vertex]
x1 = x if x < x1 else x1
y1 = y if y < y1 else y1
x2 = x if x > x2 else x2
y2 = y if y > y2 else y2
return (((x2 - x1) + (y2 - y1)) *
float(net.weight) *
math.sqrt(len(net.sinks) + 1)) | [
"def",
"_net_cost",
"(",
"net",
",",
"placements",
",",
"has_wrap_around_links",
",",
"machine",
")",
":",
"# This function is by far the hottest code in the entire algorithm, as a",
"# result, small performance improvements in here can have significant",
"# impact on the runtime of the overall algorithm. As an unfortunate side",
"# effect, this code is rather ugly since many higher-level constructs (e.g.",
"# min/max) are outrageously slow.",
"# XXX: This does not account for the hexagonal properties of the SpiNNaker",
"# topology.",
"if",
"has_wrap_around_links",
":",
"# When wrap-around links exist, we find the minimal bounding box and",
"# return the HPWL weighted by the net weight. To do this the largest",
"# gap between any pair of vertices is found::",
"#",
"# | x x x |",
"# ^-------------^",
"# max gap",
"#",
"# The minimal bounding box then goes the other way around::",
"#",
"# | x x x |",
"# ----------^ ^---",
"# First we collect the x and y coordinates of all vertices in the net",
"# into a pair of (sorted) lists, xs and ys.",
"x",
",",
"y",
"=",
"placements",
"[",
"net",
".",
"source",
"]",
"num_vertices",
"=",
"len",
"(",
"net",
".",
"sinks",
")",
"+",
"1",
"xs",
"=",
"[",
"x",
"]",
"*",
"num_vertices",
"ys",
"=",
"[",
"y",
"]",
"*",
"num_vertices",
"i",
"=",
"1",
"for",
"v",
"in",
"net",
".",
"sinks",
":",
"x",
",",
"y",
"=",
"placements",
"[",
"v",
"]",
"xs",
"[",
"i",
"]",
"=",
"x",
"ys",
"[",
"i",
"]",
"=",
"y",
"i",
"+=",
"1",
"xs",
".",
"sort",
"(",
")",
"ys",
".",
"sort",
"(",
")",
"# The minimal bounding box is then found as above.",
"x_max_delta",
"=",
"0",
"last_x",
"=",
"xs",
"[",
"-",
"1",
"]",
"-",
"machine",
".",
"width",
"for",
"x",
"in",
"xs",
":",
"delta",
"=",
"x",
"-",
"last_x",
"last_x",
"=",
"x",
"if",
"delta",
">",
"x_max_delta",
":",
"x_max_delta",
"=",
"delta",
"y_max_delta",
"=",
"0",
"last_y",
"=",
"ys",
"[",
"-",
"1",
"]",
"-",
"machine",
".",
"height",
"for",
"y",
"in",
"ys",
":",
"delta",
"=",
"y",
"-",
"last_y",
"last_y",
"=",
"y",
"if",
"delta",
">",
"y_max_delta",
":",
"y_max_delta",
"=",
"delta",
"return",
"(",
"(",
"(",
"machine",
".",
"width",
"-",
"x_max_delta",
")",
"+",
"(",
"machine",
".",
"height",
"-",
"y_max_delta",
")",
")",
"*",
"net",
".",
"weight",
"*",
"math",
".",
"sqrt",
"(",
"len",
"(",
"net",
".",
"sinks",
")",
"+",
"1",
")",
")",
"else",
":",
"# When no wrap-around links, find the bounding box around the vertices",
"# in the net and return the HPWL weighted by the net weight.",
"x1",
",",
"y1",
"=",
"x2",
",",
"y2",
"=",
"placements",
"[",
"net",
".",
"source",
"]",
"for",
"vertex",
"in",
"net",
".",
"sinks",
":",
"x",
",",
"y",
"=",
"placements",
"[",
"vertex",
"]",
"x1",
"=",
"x",
"if",
"x",
"<",
"x1",
"else",
"x1",
"y1",
"=",
"y",
"if",
"y",
"<",
"y1",
"else",
"y1",
"x2",
"=",
"x",
"if",
"x",
">",
"x2",
"else",
"x2",
"y2",
"=",
"y",
"if",
"y",
">",
"y2",
"else",
"y2",
"return",
"(",
"(",
"(",
"x2",
"-",
"x1",
")",
"+",
"(",
"y2",
"-",
"y1",
")",
")",
"*",
"float",
"(",
"net",
".",
"weight",
")",
"*",
"math",
".",
"sqrt",
"(",
"len",
"(",
"net",
".",
"sinks",
")",
"+",
"1",
")",
")"
] | Get the cost of a given net.
This function, in principle at least, should estimate the total network
resources consumed by the given net. In practice this estimate is based on
the size of the bounding-box of the net (i.e. HPWL). This should be
improved at some later time to better account for the effects of large
fan-outs.
Parameters
----------
net : :py:class:`rig.netlist.Net`
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float | [
"Get",
"the",
"cost",
"of",
"a",
"given",
"net",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L118-L210 |
project-rig/rig | rig/place_and_route/place/sa/python_kernel.py | _vertex_net_cost | def _vertex_net_cost(vertex, v2n, placements, has_wrap_around_links, machine):
"""Get the total cost of the nets connected to the given vertex.
Parameters
----------
vertex
The vertex whose nets we're interested in.
v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...}
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float
"""
total_cost = 0.0
for net in v2n[vertex]:
total_cost += _net_cost(net, placements, has_wrap_around_links,
machine)
return total_cost | python | def _vertex_net_cost(vertex, v2n, placements, has_wrap_around_links, machine):
"""Get the total cost of the nets connected to the given vertex.
Parameters
----------
vertex
The vertex whose nets we're interested in.
v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...}
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float
"""
total_cost = 0.0
for net in v2n[vertex]:
total_cost += _net_cost(net, placements, has_wrap_around_links,
machine)
return total_cost | [
"def",
"_vertex_net_cost",
"(",
"vertex",
",",
"v2n",
",",
"placements",
",",
"has_wrap_around_links",
",",
"machine",
")",
":",
"total_cost",
"=",
"0.0",
"for",
"net",
"in",
"v2n",
"[",
"vertex",
"]",
":",
"total_cost",
"+=",
"_net_cost",
"(",
"net",
",",
"placements",
",",
"has_wrap_around_links",
",",
"machine",
")",
"return",
"total_cost"
] | Get the total cost of the nets connected to the given vertex.
Parameters
----------
vertex
The vertex whose nets we're interested in.
v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...}
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float | [
"Get",
"the",
"total",
"cost",
"of",
"the",
"nets",
"connected",
"to",
"the",
"given",
"vertex",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L213-L234 |
project-rig/rig | rig/place_and_route/place/sa/python_kernel.py | _get_candidate_swap | def _get_candidate_swap(resources, location,
l2v, vertices_resources, fixed_vertices, machine):
"""Given a chip location, select a set of vertices which would have to be
moved elsewhere to accommodate the arrival of the specified set of
resources.
Parameters
----------
resources : {resource: value, ...}
The amount of resources which are required at the specified location.
location : (x, y)
The coordinates of the chip where the resources are sought.
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
[Vertex, ...] or None
If a (possibly empty) list, gives the set of vertices which should be
removed from the specified location to make room.
If None, the situation is impossible.
"""
# The resources already available at the given location
chip_resources = machine[location]
# The set of vertices at that location
vertices = l2v[location]
# The set of vertices to be moved from the location to free up the
# specified amount of resources
to_move = []
# While there's not enough free resource, remove an arbitrary (movable)
# vertex from the chip.
i = 0
while overallocated(subtract_resources(chip_resources, resources)):
if i >= len(vertices):
# Run out of vertices to remove from this chip, thus the situation
# must be impossible.
return None
elif vertices[i] in fixed_vertices:
# Can't move fixed vertices, just skip them.
i += 1
continue
else:
# Work out the cost change when we remove the specified vertex
vertex = vertices[i]
chip_resources = add_resources(chip_resources,
vertices_resources[vertex])
to_move.append(vertex)
i += 1
return to_move | python | def _get_candidate_swap(resources, location,
l2v, vertices_resources, fixed_vertices, machine):
"""Given a chip location, select a set of vertices which would have to be
moved elsewhere to accommodate the arrival of the specified set of
resources.
Parameters
----------
resources : {resource: value, ...}
The amount of resources which are required at the specified location.
location : (x, y)
The coordinates of the chip where the resources are sought.
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
[Vertex, ...] or None
If a (possibly empty) list, gives the set of vertices which should be
removed from the specified location to make room.
If None, the situation is impossible.
"""
# The resources already available at the given location
chip_resources = machine[location]
# The set of vertices at that location
vertices = l2v[location]
# The set of vertices to be moved from the location to free up the
# specified amount of resources
to_move = []
# While there's not enough free resource, remove an arbitrary (movable)
# vertex from the chip.
i = 0
while overallocated(subtract_resources(chip_resources, resources)):
if i >= len(vertices):
# Run out of vertices to remove from this chip, thus the situation
# must be impossible.
return None
elif vertices[i] in fixed_vertices:
# Can't move fixed vertices, just skip them.
i += 1
continue
else:
# Work out the cost change when we remove the specified vertex
vertex = vertices[i]
chip_resources = add_resources(chip_resources,
vertices_resources[vertex])
to_move.append(vertex)
i += 1
return to_move | [
"def",
"_get_candidate_swap",
"(",
"resources",
",",
"location",
",",
"l2v",
",",
"vertices_resources",
",",
"fixed_vertices",
",",
"machine",
")",
":",
"# The resources already available at the given location",
"chip_resources",
"=",
"machine",
"[",
"location",
"]",
"# The set of vertices at that location",
"vertices",
"=",
"l2v",
"[",
"location",
"]",
"# The set of vertices to be moved from the location to free up the",
"# specified amount of resources",
"to_move",
"=",
"[",
"]",
"# While there's not enough free resource, remove an arbitrary (movable)",
"# vertex from the chip.",
"i",
"=",
"0",
"while",
"overallocated",
"(",
"subtract_resources",
"(",
"chip_resources",
",",
"resources",
")",
")",
":",
"if",
"i",
">=",
"len",
"(",
"vertices",
")",
":",
"# Run out of vertices to remove from this chip, thus the situation",
"# must be impossible.",
"return",
"None",
"elif",
"vertices",
"[",
"i",
"]",
"in",
"fixed_vertices",
":",
"# Can't move fixed vertices, just skip them.",
"i",
"+=",
"1",
"continue",
"else",
":",
"# Work out the cost change when we remove the specified vertex",
"vertex",
"=",
"vertices",
"[",
"i",
"]",
"chip_resources",
"=",
"add_resources",
"(",
"chip_resources",
",",
"vertices_resources",
"[",
"vertex",
"]",
")",
"to_move",
".",
"append",
"(",
"vertex",
")",
"i",
"+=",
"1",
"return",
"to_move"
] | Given a chip location, select a set of vertices which would have to be
moved elsewhere to accommodate the arrival of the specified set of
resources.
Parameters
----------
resources : {resource: value, ...}
The amount of resources which are required at the specified location.
location : (x, y)
The coordinates of the chip where the resources are sought.
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
[Vertex, ...] or None
If a (possibly empty) list, gives the set of vertices which should be
removed from the specified location to make room.
If None, the situation is impossible. | [
"Given",
"a",
"chip",
"location",
"select",
"a",
"set",
"of",
"vertices",
"which",
"would",
"have",
"to",
"be",
"moved",
"elsewhere",
"to",
"accommodate",
"the",
"arrival",
"of",
"the",
"specified",
"set",
"of",
"resources",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L237-L292 |
project-rig/rig | rig/place_and_route/place/sa/python_kernel.py | _swap | def _swap(vas, vas_location, vbs, vbs_location, l2v, vertices_resources,
placements, machine):
"""Swap the positions of two sets of vertices.
Parameters
----------
vas : [vertex, ...]
A set of vertices currently at vas_location.
vas_location : (x, y)
vbs : [vertex, ...]
A set of vertices currently at vbs_location.
vbs_location : (x, y)
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
placements : {vertex: (x, y), ...}
machine : :py:class:`rig.place_and_route.Machine`
"""
# Get the lists of vertices at either location
vas_location2v = l2v[vas_location]
vbs_location2v = l2v[vbs_location]
# Get the resource availability at either location
vas_resources = machine[vas_location]
vbs_resources = machine[vbs_location]
# Move all the vertices in vas into vbs.
for va in vas:
# Update the placements
placements[va] = vbs_location
# Update the location-to-vertex lookup
vas_location2v.remove(va)
vbs_location2v.append(va)
# Update the resource consumption after the move
resources = vertices_resources[va]
vas_resources = add_resources(vas_resources, resources)
vbs_resources = subtract_resources(vbs_resources, resources)
for vb in vbs:
# Update the placements
placements[vb] = vas_location
# Update the location-to-vertex lookup
vbs_location2v.remove(vb)
vas_location2v.append(vb)
# Update the resource consumption after the move
resources = vertices_resources[vb]
vas_resources = subtract_resources(vas_resources, resources)
vbs_resources = add_resources(vbs_resources, resources)
# Update the resources in the machine
machine[vas_location] = vas_resources
machine[vbs_location] = vbs_resources | python | def _swap(vas, vas_location, vbs, vbs_location, l2v, vertices_resources,
placements, machine):
"""Swap the positions of two sets of vertices.
Parameters
----------
vas : [vertex, ...]
A set of vertices currently at vas_location.
vas_location : (x, y)
vbs : [vertex, ...]
A set of vertices currently at vbs_location.
vbs_location : (x, y)
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
placements : {vertex: (x, y), ...}
machine : :py:class:`rig.place_and_route.Machine`
"""
# Get the lists of vertices at either location
vas_location2v = l2v[vas_location]
vbs_location2v = l2v[vbs_location]
# Get the resource availability at either location
vas_resources = machine[vas_location]
vbs_resources = machine[vbs_location]
# Move all the vertices in vas into vbs.
for va in vas:
# Update the placements
placements[va] = vbs_location
# Update the location-to-vertex lookup
vas_location2v.remove(va)
vbs_location2v.append(va)
# Update the resource consumption after the move
resources = vertices_resources[va]
vas_resources = add_resources(vas_resources, resources)
vbs_resources = subtract_resources(vbs_resources, resources)
for vb in vbs:
# Update the placements
placements[vb] = vas_location
# Update the location-to-vertex lookup
vbs_location2v.remove(vb)
vas_location2v.append(vb)
# Update the resource consumption after the move
resources = vertices_resources[vb]
vas_resources = subtract_resources(vas_resources, resources)
vbs_resources = add_resources(vbs_resources, resources)
# Update the resources in the machine
machine[vas_location] = vas_resources
machine[vbs_location] = vbs_resources | [
"def",
"_swap",
"(",
"vas",
",",
"vas_location",
",",
"vbs",
",",
"vbs_location",
",",
"l2v",
",",
"vertices_resources",
",",
"placements",
",",
"machine",
")",
":",
"# Get the lists of vertices at either location",
"vas_location2v",
"=",
"l2v",
"[",
"vas_location",
"]",
"vbs_location2v",
"=",
"l2v",
"[",
"vbs_location",
"]",
"# Get the resource availability at either location",
"vas_resources",
"=",
"machine",
"[",
"vas_location",
"]",
"vbs_resources",
"=",
"machine",
"[",
"vbs_location",
"]",
"# Move all the vertices in vas into vbs.",
"for",
"va",
"in",
"vas",
":",
"# Update the placements",
"placements",
"[",
"va",
"]",
"=",
"vbs_location",
"# Update the location-to-vertex lookup",
"vas_location2v",
".",
"remove",
"(",
"va",
")",
"vbs_location2v",
".",
"append",
"(",
"va",
")",
"# Update the resource consumption after the move",
"resources",
"=",
"vertices_resources",
"[",
"va",
"]",
"vas_resources",
"=",
"add_resources",
"(",
"vas_resources",
",",
"resources",
")",
"vbs_resources",
"=",
"subtract_resources",
"(",
"vbs_resources",
",",
"resources",
")",
"for",
"vb",
"in",
"vbs",
":",
"# Update the placements",
"placements",
"[",
"vb",
"]",
"=",
"vas_location",
"# Update the location-to-vertex lookup",
"vbs_location2v",
".",
"remove",
"(",
"vb",
")",
"vas_location2v",
".",
"append",
"(",
"vb",
")",
"# Update the resource consumption after the move",
"resources",
"=",
"vertices_resources",
"[",
"vb",
"]",
"vas_resources",
"=",
"subtract_resources",
"(",
"vas_resources",
",",
"resources",
")",
"vbs_resources",
"=",
"add_resources",
"(",
"vbs_resources",
",",
"resources",
")",
"# Update the resources in the machine",
"machine",
"[",
"vas_location",
"]",
"=",
"vas_resources",
"machine",
"[",
"vbs_location",
"]",
"=",
"vbs_resources"
] | Swap the positions of two sets of vertices.
Parameters
----------
vas : [vertex, ...]
A set of vertices currently at vas_location.
vas_location : (x, y)
vbs : [vertex, ...]
A set of vertices currently at vbs_location.
vbs_location : (x, y)
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
placements : {vertex: (x, y), ...}
machine : :py:class:`rig.place_and_route.Machine` | [
"Swap",
"the",
"positions",
"of",
"two",
"sets",
"of",
"vertices",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L295-L349 |
project-rig/rig | rig/place_and_route/place/sa/python_kernel.py | _step | def _step(vertices, d_limit, temperature, placements, l2v, v2n,
vertices_resources, fixed_vertices, machine, has_wrap_around_links,
random):
"""Attempt a single swap operation: the kernel of the Simulated Annealing
algorithm.
Parameters
----------
vertices : [vertex, ...]
The set of *movable* vertices.
d_limit : int
The maximum distance over-which swaps are allowed.
temperature : float > 0.0 or None
The temperature (i.e. likelihood of accepting a non-advantageous swap).
Higher temperatures mean higher chances of accepting a swap.
placements : {vertex: (x, y), ...}
The positions of all vertices, will be updated if a swap is made.
l2v : {(x, y): [vertex, ...], ...}
Lookup from chip to vertices, will be updated if a swap is made.
v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...}
Lookup from vertex to all nets that vertex is in.
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
The set of vertices which must not be moved.
machine : :py:class:`rig.place_and_route.Machine`
Describes the state of the machine including the resources actually
available on each chip given the current placements. Updated if a swap
is made.
has_wrap_around_links : bool
Should the placements attempt to make use of wrap-around links?
random : :py:class:`random.Random`
The random number generator to use.
Returns
-------
(swapped, delta)
swapped is a boolean indicating if a swap was made.
delta is a float indicating the change in cost resulting from the swap
(or 0.0 when no swap is made).
"""
# Special case: If the machine is a singleton, no swaps can be made so just
# terminate.
if machine.width == 1 and machine.height == 1:
return (False, 0.0)
# Select a vertex to swap at random
src_vertex = random.choice(vertices)
# Select a random (nearby) location to swap the vertex with. Note: this is
# guaranteed to be different from the selected vertex, otherwise the swap
# cannot change the cost of the placements.
# XXX: Does not consider hexagonal properties of the system!
src_location = placements[src_vertex]
dst_location = src_location
while dst_location == src_location:
if has_wrap_around_links:
dst_location = tuple(random.randint(v - d_limit,
v + d_limit) % limit
for v, limit
in [(src_location[0], machine.width),
(src_location[1], machine.height)])
else:
dst_location = tuple(random.randint(max(v - d_limit, 0),
min(v + d_limit, limit-1))
for v, limit
in [(src_location[0], machine.width),
(src_location[1], machine.height)])
# If we've inadvertently selected a dead chip to swap to, abort the swap.
if dst_location not in machine:
return (False, 0.0)
# Find out which vertices (if any) must be swapped out of the destination
# to make room for the vertex we're moving.
src_resources = vertices_resources[src_vertex]
dst_vertices = _get_candidate_swap(src_resources, dst_location,
l2v, vertices_resources,
fixed_vertices, machine)
# The destination simply isn't big enough (no matter how many vertices at
# the destination are moved), abort the swap.
if dst_vertices is None:
return (False, 0.0)
# Make sure that any vertices moved out of the destination will fit in the
# space left in the source location. If there isn't enough space, abort the
# swap.
resources = machine[src_location]
resources = add_resources(resources, src_resources)
for dst_vertex in dst_vertices:
resources = subtract_resources(resources,
vertices_resources[dst_vertex])
if overallocated(resources):
return (False, 0.0)
# Work out the cost of the nets involved *before* swapping
cost_before = _vertex_net_cost(src_vertex, v2n, placements,
has_wrap_around_links, machine)
for dst_vertex in dst_vertices:
cost_before += _vertex_net_cost(dst_vertex, v2n, placements,
has_wrap_around_links, machine)
# Swap the vertices
_swap([src_vertex], src_location,
dst_vertices, dst_location,
l2v, vertices_resources, placements, machine)
# Work out the new cost
cost_after = _vertex_net_cost(src_vertex, v2n, placements,
has_wrap_around_links, machine)
for dst_vertex in dst_vertices:
cost_after += _vertex_net_cost(dst_vertex, v2n, placements,
has_wrap_around_links, machine)
# If the swap was beneficial, keep it, otherwise keep it with a probability
# related to just how bad the cost change is is and the temperature.
delta = cost_after - cost_before
if delta <= 0.0 or random.random() < math.exp(-delta/temperature):
# Keep the swap!
return (True, delta)
else:
# Revert the swap
_swap([src_vertex], dst_location,
dst_vertices, src_location,
l2v, vertices_resources, placements, machine)
return (False, 0.0) | python | def _step(vertices, d_limit, temperature, placements, l2v, v2n,
vertices_resources, fixed_vertices, machine, has_wrap_around_links,
random):
"""Attempt a single swap operation: the kernel of the Simulated Annealing
algorithm.
Parameters
----------
vertices : [vertex, ...]
The set of *movable* vertices.
d_limit : int
The maximum distance over-which swaps are allowed.
temperature : float > 0.0 or None
The temperature (i.e. likelihood of accepting a non-advantageous swap).
Higher temperatures mean higher chances of accepting a swap.
placements : {vertex: (x, y), ...}
The positions of all vertices, will be updated if a swap is made.
l2v : {(x, y): [vertex, ...], ...}
Lookup from chip to vertices, will be updated if a swap is made.
v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...}
Lookup from vertex to all nets that vertex is in.
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
The set of vertices which must not be moved.
machine : :py:class:`rig.place_and_route.Machine`
Describes the state of the machine including the resources actually
available on each chip given the current placements. Updated if a swap
is made.
has_wrap_around_links : bool
Should the placements attempt to make use of wrap-around links?
random : :py:class:`random.Random`
The random number generator to use.
Returns
-------
(swapped, delta)
swapped is a boolean indicating if a swap was made.
delta is a float indicating the change in cost resulting from the swap
(or 0.0 when no swap is made).
"""
# Special case: If the machine is a singleton, no swaps can be made so just
# terminate.
if machine.width == 1 and machine.height == 1:
return (False, 0.0)
# Select a vertex to swap at random
src_vertex = random.choice(vertices)
# Select a random (nearby) location to swap the vertex with. Note: this is
# guaranteed to be different from the selected vertex, otherwise the swap
# cannot change the cost of the placements.
# XXX: Does not consider hexagonal properties of the system!
src_location = placements[src_vertex]
dst_location = src_location
while dst_location == src_location:
if has_wrap_around_links:
dst_location = tuple(random.randint(v - d_limit,
v + d_limit) % limit
for v, limit
in [(src_location[0], machine.width),
(src_location[1], machine.height)])
else:
dst_location = tuple(random.randint(max(v - d_limit, 0),
min(v + d_limit, limit-1))
for v, limit
in [(src_location[0], machine.width),
(src_location[1], machine.height)])
# If we've inadvertently selected a dead chip to swap to, abort the swap.
if dst_location not in machine:
return (False, 0.0)
# Find out which vertices (if any) must be swapped out of the destination
# to make room for the vertex we're moving.
src_resources = vertices_resources[src_vertex]
dst_vertices = _get_candidate_swap(src_resources, dst_location,
l2v, vertices_resources,
fixed_vertices, machine)
# The destination simply isn't big enough (no matter how many vertices at
# the destination are moved), abort the swap.
if dst_vertices is None:
return (False, 0.0)
# Make sure that any vertices moved out of the destination will fit in the
# space left in the source location. If there isn't enough space, abort the
# swap.
resources = machine[src_location]
resources = add_resources(resources, src_resources)
for dst_vertex in dst_vertices:
resources = subtract_resources(resources,
vertices_resources[dst_vertex])
if overallocated(resources):
return (False, 0.0)
# Work out the cost of the nets involved *before* swapping
cost_before = _vertex_net_cost(src_vertex, v2n, placements,
has_wrap_around_links, machine)
for dst_vertex in dst_vertices:
cost_before += _vertex_net_cost(dst_vertex, v2n, placements,
has_wrap_around_links, machine)
# Swap the vertices
_swap([src_vertex], src_location,
dst_vertices, dst_location,
l2v, vertices_resources, placements, machine)
# Work out the new cost
cost_after = _vertex_net_cost(src_vertex, v2n, placements,
has_wrap_around_links, machine)
for dst_vertex in dst_vertices:
cost_after += _vertex_net_cost(dst_vertex, v2n, placements,
has_wrap_around_links, machine)
# If the swap was beneficial, keep it, otherwise keep it with a probability
# related to just how bad the cost change is is and the temperature.
delta = cost_after - cost_before
if delta <= 0.0 or random.random() < math.exp(-delta/temperature):
# Keep the swap!
return (True, delta)
else:
# Revert the swap
_swap([src_vertex], dst_location,
dst_vertices, src_location,
l2v, vertices_resources, placements, machine)
return (False, 0.0) | [
"def",
"_step",
"(",
"vertices",
",",
"d_limit",
",",
"temperature",
",",
"placements",
",",
"l2v",
",",
"v2n",
",",
"vertices_resources",
",",
"fixed_vertices",
",",
"machine",
",",
"has_wrap_around_links",
",",
"random",
")",
":",
"# Special case: If the machine is a singleton, no swaps can be made so just",
"# terminate.",
"if",
"machine",
".",
"width",
"==",
"1",
"and",
"machine",
".",
"height",
"==",
"1",
":",
"return",
"(",
"False",
",",
"0.0",
")",
"# Select a vertex to swap at random",
"src_vertex",
"=",
"random",
".",
"choice",
"(",
"vertices",
")",
"# Select a random (nearby) location to swap the vertex with. Note: this is",
"# guaranteed to be different from the selected vertex, otherwise the swap",
"# cannot change the cost of the placements.",
"# XXX: Does not consider hexagonal properties of the system!",
"src_location",
"=",
"placements",
"[",
"src_vertex",
"]",
"dst_location",
"=",
"src_location",
"while",
"dst_location",
"==",
"src_location",
":",
"if",
"has_wrap_around_links",
":",
"dst_location",
"=",
"tuple",
"(",
"random",
".",
"randint",
"(",
"v",
"-",
"d_limit",
",",
"v",
"+",
"d_limit",
")",
"%",
"limit",
"for",
"v",
",",
"limit",
"in",
"[",
"(",
"src_location",
"[",
"0",
"]",
",",
"machine",
".",
"width",
")",
",",
"(",
"src_location",
"[",
"1",
"]",
",",
"machine",
".",
"height",
")",
"]",
")",
"else",
":",
"dst_location",
"=",
"tuple",
"(",
"random",
".",
"randint",
"(",
"max",
"(",
"v",
"-",
"d_limit",
",",
"0",
")",
",",
"min",
"(",
"v",
"+",
"d_limit",
",",
"limit",
"-",
"1",
")",
")",
"for",
"v",
",",
"limit",
"in",
"[",
"(",
"src_location",
"[",
"0",
"]",
",",
"machine",
".",
"width",
")",
",",
"(",
"src_location",
"[",
"1",
"]",
",",
"machine",
".",
"height",
")",
"]",
")",
"# If we've inadvertently selected a dead chip to swap to, abort the swap.",
"if",
"dst_location",
"not",
"in",
"machine",
":",
"return",
"(",
"False",
",",
"0.0",
")",
"# Find out which vertices (if any) must be swapped out of the destination",
"# to make room for the vertex we're moving.",
"src_resources",
"=",
"vertices_resources",
"[",
"src_vertex",
"]",
"dst_vertices",
"=",
"_get_candidate_swap",
"(",
"src_resources",
",",
"dst_location",
",",
"l2v",
",",
"vertices_resources",
",",
"fixed_vertices",
",",
"machine",
")",
"# The destination simply isn't big enough (no matter how many vertices at",
"# the destination are moved), abort the swap.",
"if",
"dst_vertices",
"is",
"None",
":",
"return",
"(",
"False",
",",
"0.0",
")",
"# Make sure that any vertices moved out of the destination will fit in the",
"# space left in the source location. If there isn't enough space, abort the",
"# swap.",
"resources",
"=",
"machine",
"[",
"src_location",
"]",
"resources",
"=",
"add_resources",
"(",
"resources",
",",
"src_resources",
")",
"for",
"dst_vertex",
"in",
"dst_vertices",
":",
"resources",
"=",
"subtract_resources",
"(",
"resources",
",",
"vertices_resources",
"[",
"dst_vertex",
"]",
")",
"if",
"overallocated",
"(",
"resources",
")",
":",
"return",
"(",
"False",
",",
"0.0",
")",
"# Work out the cost of the nets involved *before* swapping",
"cost_before",
"=",
"_vertex_net_cost",
"(",
"src_vertex",
",",
"v2n",
",",
"placements",
",",
"has_wrap_around_links",
",",
"machine",
")",
"for",
"dst_vertex",
"in",
"dst_vertices",
":",
"cost_before",
"+=",
"_vertex_net_cost",
"(",
"dst_vertex",
",",
"v2n",
",",
"placements",
",",
"has_wrap_around_links",
",",
"machine",
")",
"# Swap the vertices",
"_swap",
"(",
"[",
"src_vertex",
"]",
",",
"src_location",
",",
"dst_vertices",
",",
"dst_location",
",",
"l2v",
",",
"vertices_resources",
",",
"placements",
",",
"machine",
")",
"# Work out the new cost",
"cost_after",
"=",
"_vertex_net_cost",
"(",
"src_vertex",
",",
"v2n",
",",
"placements",
",",
"has_wrap_around_links",
",",
"machine",
")",
"for",
"dst_vertex",
"in",
"dst_vertices",
":",
"cost_after",
"+=",
"_vertex_net_cost",
"(",
"dst_vertex",
",",
"v2n",
",",
"placements",
",",
"has_wrap_around_links",
",",
"machine",
")",
"# If the swap was beneficial, keep it, otherwise keep it with a probability",
"# related to just how bad the cost change is is and the temperature.",
"delta",
"=",
"cost_after",
"-",
"cost_before",
"if",
"delta",
"<=",
"0.0",
"or",
"random",
".",
"random",
"(",
")",
"<",
"math",
".",
"exp",
"(",
"-",
"delta",
"/",
"temperature",
")",
":",
"# Keep the swap!",
"return",
"(",
"True",
",",
"delta",
")",
"else",
":",
"# Revert the swap",
"_swap",
"(",
"[",
"src_vertex",
"]",
",",
"dst_location",
",",
"dst_vertices",
",",
"src_location",
",",
"l2v",
",",
"vertices_resources",
",",
"placements",
",",
"machine",
")",
"return",
"(",
"False",
",",
"0.0",
")"
] | Attempt a single swap operation: the kernel of the Simulated Annealing
algorithm.
Parameters
----------
vertices : [vertex, ...]
The set of *movable* vertices.
d_limit : int
The maximum distance over-which swaps are allowed.
temperature : float > 0.0 or None
The temperature (i.e. likelihood of accepting a non-advantageous swap).
Higher temperatures mean higher chances of accepting a swap.
placements : {vertex: (x, y), ...}
The positions of all vertices, will be updated if a swap is made.
l2v : {(x, y): [vertex, ...], ...}
Lookup from chip to vertices, will be updated if a swap is made.
v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...}
Lookup from vertex to all nets that vertex is in.
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
The set of vertices which must not be moved.
machine : :py:class:`rig.place_and_route.Machine`
Describes the state of the machine including the resources actually
available on each chip given the current placements. Updated if a swap
is made.
has_wrap_around_links : bool
Should the placements attempt to make use of wrap-around links?
random : :py:class:`random.Random`
The random number generator to use.
Returns
-------
(swapped, delta)
swapped is a boolean indicating if a swap was made.
delta is a float indicating the change in cost resulting from the swap
(or 0.0 when no swap is made). | [
"Attempt",
"a",
"single",
"swap",
"operation",
":",
"the",
"kernel",
"of",
"the",
"Simulated",
"Annealing",
"algorithm",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L352-L478 |
NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker._reset | def _reset(self):
"""Initialization that must happen before the broker is (re)started."""
self._subscriber_thread = None
self._must_stop = threading.Event()
self._number_periodic_tasks = 0 | python | def _reset(self):
"""Initialization that must happen before the broker is (re)started."""
self._subscriber_thread = None
self._must_stop = threading.Event()
self._number_periodic_tasks = 0 | [
"def",
"_reset",
"(",
"self",
")",
":",
"self",
".",
"_subscriber_thread",
"=",
"None",
"self",
".",
"_must_stop",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"_number_periodic_tasks",
"=",
"0"
] | Initialization that must happen before the broker is (re)started. | [
"Initialization",
"that",
"must",
"happen",
"before",
"the",
"broker",
"is",
"(",
"re",
")",
"started",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L56-L60 |
NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker._load_script | def _load_script(self, filename: str) -> Script:
"""Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute.
"""
with open(path.join(here, 'redis_scripts', filename), mode='rb') as f:
script_data = f.read()
rv = self._r.register_script(script_data)
if script_data.startswith(b'-- idempotency protected script'):
self._idempotency_protected_scripts.append(rv)
return rv | python | def _load_script(self, filename: str) -> Script:
"""Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute.
"""
with open(path.join(here, 'redis_scripts', filename), mode='rb') as f:
script_data = f.read()
rv = self._r.register_script(script_data)
if script_data.startswith(b'-- idempotency protected script'):
self._idempotency_protected_scripts.append(rv)
return rv | [
"def",
"_load_script",
"(",
"self",
",",
"filename",
":",
"str",
")",
"->",
"Script",
":",
"with",
"open",
"(",
"path",
".",
"join",
"(",
"here",
",",
"'redis_scripts'",
",",
"filename",
")",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"script_data",
"=",
"f",
".",
"read",
"(",
")",
"rv",
"=",
"self",
".",
"_r",
".",
"register_script",
"(",
"script_data",
")",
"if",
"script_data",
".",
"startswith",
"(",
"b'-- idempotency protected script'",
")",
":",
"self",
".",
"_idempotency_protected_scripts",
".",
"append",
"(",
"rv",
")",
"return",
"rv"
] | Load a Lua script.
Read the Lua script file to generate its Script object. If the script
starts with a magic string, add it to the list of scripts requiring an
idempotency token to execute. | [
"Load",
"a",
"Lua",
"script",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L62-L74 |
NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker.enqueue_jobs | def enqueue_jobs(self, jobs: Iterable[Job]):
"""Enqueue a batch of jobs."""
jobs_to_queue = list()
for job in jobs:
if job.should_start:
job.status = JobStatus.QUEUED
else:
job.status = JobStatus.WAITING
jobs_to_queue.append(job.serialize())
if jobs_to_queue:
self._run_script(
self._enqueue_job,
self._to_namespaced(NOTIFICATIONS_KEY),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
self.namespace,
self._to_namespaced(FUTURE_JOBS_KEY),
*jobs_to_queue
) | python | def enqueue_jobs(self, jobs: Iterable[Job]):
"""Enqueue a batch of jobs."""
jobs_to_queue = list()
for job in jobs:
if job.should_start:
job.status = JobStatus.QUEUED
else:
job.status = JobStatus.WAITING
jobs_to_queue.append(job.serialize())
if jobs_to_queue:
self._run_script(
self._enqueue_job,
self._to_namespaced(NOTIFICATIONS_KEY),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
self.namespace,
self._to_namespaced(FUTURE_JOBS_KEY),
*jobs_to_queue
) | [
"def",
"enqueue_jobs",
"(",
"self",
",",
"jobs",
":",
"Iterable",
"[",
"Job",
"]",
")",
":",
"jobs_to_queue",
"=",
"list",
"(",
")",
"for",
"job",
"in",
"jobs",
":",
"if",
"job",
".",
"should_start",
":",
"job",
".",
"status",
"=",
"JobStatus",
".",
"QUEUED",
"else",
":",
"job",
".",
"status",
"=",
"JobStatus",
".",
"WAITING",
"jobs_to_queue",
".",
"append",
"(",
"job",
".",
"serialize",
"(",
")",
")",
"if",
"jobs_to_queue",
":",
"self",
".",
"_run_script",
"(",
"self",
".",
"_enqueue_job",
",",
"self",
".",
"_to_namespaced",
"(",
"NOTIFICATIONS_KEY",
")",
",",
"self",
".",
"_to_namespaced",
"(",
"RUNNING_JOBS_KEY",
".",
"format",
"(",
"self",
".",
"_id",
")",
")",
",",
"self",
".",
"namespace",
",",
"self",
".",
"_to_namespaced",
"(",
"FUTURE_JOBS_KEY",
")",
",",
"*",
"jobs_to_queue",
")"
] | Enqueue a batch of jobs. | [
"Enqueue",
"a",
"batch",
"of",
"jobs",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L100-L118 |
NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker.get_jobs_from_queue | def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
"""Get jobs from a queue."""
jobs_json_string = self._run_script(
self._get_jobs_from_queue,
self._to_namespaced(queue),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
JobStatus.RUNNING.value,
max_jobs
)
jobs = json.loads(jobs_json_string.decode())
jobs = [Job.deserialize(job) for job in jobs]
return jobs | python | def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
"""Get jobs from a queue."""
jobs_json_string = self._run_script(
self._get_jobs_from_queue,
self._to_namespaced(queue),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
JobStatus.RUNNING.value,
max_jobs
)
jobs = json.loads(jobs_json_string.decode())
jobs = [Job.deserialize(job) for job in jobs]
return jobs | [
"def",
"get_jobs_from_queue",
"(",
"self",
",",
"queue",
":",
"str",
",",
"max_jobs",
":",
"int",
")",
"->",
"List",
"[",
"Job",
"]",
":",
"jobs_json_string",
"=",
"self",
".",
"_run_script",
"(",
"self",
".",
"_get_jobs_from_queue",
",",
"self",
".",
"_to_namespaced",
"(",
"queue",
")",
",",
"self",
".",
"_to_namespaced",
"(",
"RUNNING_JOBS_KEY",
".",
"format",
"(",
"self",
".",
"_id",
")",
")",
",",
"JobStatus",
".",
"RUNNING",
".",
"value",
",",
"max_jobs",
")",
"jobs",
"=",
"json",
".",
"loads",
"(",
"jobs_json_string",
".",
"decode",
"(",
")",
")",
"jobs",
"=",
"[",
"Job",
".",
"deserialize",
"(",
"job",
")",
"for",
"job",
"in",
"jobs",
"]",
"return",
"jobs"
] | Get jobs from a queue. | [
"Get",
"jobs",
"from",
"a",
"queue",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L145-L158 |
NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker.register_periodic_tasks | def register_periodic_tasks(self, tasks: Iterable[Task]):
"""Register tasks that need to be scheduled periodically."""
tasks = [task.serialize() for task in tasks]
self._number_periodic_tasks = len(tasks)
self._run_script(
self._register_periodic_tasks,
math.ceil(datetime.now(timezone.utc).timestamp()),
self._to_namespaced(PERIODIC_TASKS_HASH_KEY),
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
*tasks
) | python | def register_periodic_tasks(self, tasks: Iterable[Task]):
"""Register tasks that need to be scheduled periodically."""
tasks = [task.serialize() for task in tasks]
self._number_periodic_tasks = len(tasks)
self._run_script(
self._register_periodic_tasks,
math.ceil(datetime.now(timezone.utc).timestamp()),
self._to_namespaced(PERIODIC_TASKS_HASH_KEY),
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
*tasks
) | [
"def",
"register_periodic_tasks",
"(",
"self",
",",
"tasks",
":",
"Iterable",
"[",
"Task",
"]",
")",
":",
"tasks",
"=",
"[",
"task",
".",
"serialize",
"(",
")",
"for",
"task",
"in",
"tasks",
"]",
"self",
".",
"_number_periodic_tasks",
"=",
"len",
"(",
"tasks",
")",
"self",
".",
"_run_script",
"(",
"self",
".",
"_register_periodic_tasks",
",",
"math",
".",
"ceil",
"(",
"datetime",
".",
"now",
"(",
"timezone",
".",
"utc",
")",
".",
"timestamp",
"(",
")",
")",
",",
"self",
".",
"_to_namespaced",
"(",
"PERIODIC_TASKS_HASH_KEY",
")",
",",
"self",
".",
"_to_namespaced",
"(",
"PERIODIC_TASKS_QUEUE_KEY",
")",
",",
"*",
"tasks",
")"
] | Register tasks that need to be scheduled periodically. | [
"Register",
"tasks",
"that",
"need",
"to",
"be",
"scheduled",
"periodically",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L203-L213 |
NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker.inspect_periodic_tasks | def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:
"""Get the next periodic task schedule.
Used only for debugging and during tests.
"""
rv = self._r.zrangebyscore(
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
'-inf', '+inf', withscores=True
)
return [(int(r[1]), r[0].decode()) for r in rv] | python | def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:
"""Get the next periodic task schedule.
Used only for debugging and during tests.
"""
rv = self._r.zrangebyscore(
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
'-inf', '+inf', withscores=True
)
return [(int(r[1]), r[0].decode()) for r in rv] | [
"def",
"inspect_periodic_tasks",
"(",
"self",
")",
"->",
"List",
"[",
"Tuple",
"[",
"int",
",",
"str",
"]",
"]",
":",
"rv",
"=",
"self",
".",
"_r",
".",
"zrangebyscore",
"(",
"self",
".",
"_to_namespaced",
"(",
"PERIODIC_TASKS_QUEUE_KEY",
")",
",",
"'-inf'",
",",
"'+inf'",
",",
"withscores",
"=",
"True",
")",
"return",
"[",
"(",
"int",
"(",
"r",
"[",
"1",
"]",
")",
",",
"r",
"[",
"0",
"]",
".",
"decode",
"(",
")",
")",
"for",
"r",
"in",
"rv",
"]"
] | Get the next periodic task schedule.
Used only for debugging and during tests. | [
"Get",
"the",
"next",
"periodic",
"task",
"schedule",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L215-L224 |
NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker.next_future_periodic_delta | def next_future_periodic_delta(self) -> Optional[float]:
"""Give the amount of seconds before the next periodic task is due."""
rv = self._r.zrangebyscore(
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
'-inf', '+inf', start=0, num=1, withscores=True,
score_cast_func=int
)
if not rv:
return None
now = datetime.now(timezone.utc).timestamp()
next_event_time = rv[0][1]
if next_event_time < now:
return 0
return next_event_time - now | python | def next_future_periodic_delta(self) -> Optional[float]:
"""Give the amount of seconds before the next periodic task is due."""
rv = self._r.zrangebyscore(
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
'-inf', '+inf', start=0, num=1, withscores=True,
score_cast_func=int
)
if not rv:
return None
now = datetime.now(timezone.utc).timestamp()
next_event_time = rv[0][1]
if next_event_time < now:
return 0
return next_event_time - now | [
"def",
"next_future_periodic_delta",
"(",
"self",
")",
"->",
"Optional",
"[",
"float",
"]",
":",
"rv",
"=",
"self",
".",
"_r",
".",
"zrangebyscore",
"(",
"self",
".",
"_to_namespaced",
"(",
"PERIODIC_TASKS_QUEUE_KEY",
")",
",",
"'-inf'",
",",
"'+inf'",
",",
"start",
"=",
"0",
",",
"num",
"=",
"1",
",",
"withscores",
"=",
"True",
",",
"score_cast_func",
"=",
"int",
")",
"if",
"not",
"rv",
":",
"return",
"None",
"now",
"=",
"datetime",
".",
"now",
"(",
"timezone",
".",
"utc",
")",
".",
"timestamp",
"(",
")",
"next_event_time",
"=",
"rv",
"[",
"0",
"]",
"[",
"1",
"]",
"if",
"next_event_time",
"<",
"now",
":",
"return",
"0",
"return",
"next_event_time",
"-",
"now"
] | Give the amount of seconds before the next periodic task is due. | [
"Give",
"the",
"amount",
"of",
"seconds",
"before",
"the",
"next",
"periodic",
"task",
"is",
"due",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L227-L242 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hnv_neutron_agent.py | main | def main():
"""The entry point for the HNV Agent."""
neutron_config.register_agent_state_opts_helper(CONF)
common_config.init(sys.argv[1:])
neutron_config.setup_logging()
hnv_agent = HNVAgent()
# Start everything.
LOG.info("Agent initialized successfully, now running... ")
hnv_agent.daemon_loop() | python | def main():
"""The entry point for the HNV Agent."""
neutron_config.register_agent_state_opts_helper(CONF)
common_config.init(sys.argv[1:])
neutron_config.setup_logging()
hnv_agent = HNVAgent()
# Start everything.
LOG.info("Agent initialized successfully, now running... ")
hnv_agent.daemon_loop() | [
"def",
"main",
"(",
")",
":",
"neutron_config",
".",
"register_agent_state_opts_helper",
"(",
"CONF",
")",
"common_config",
".",
"init",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"neutron_config",
".",
"setup_logging",
"(",
")",
"hnv_agent",
"=",
"HNVAgent",
"(",
")",
"# Start everything.",
"LOG",
".",
"info",
"(",
"\"Agent initialized successfully, now running... \"",
")",
"hnv_agent",
".",
"daemon_loop",
"(",
")"
] | The entry point for the HNV Agent. | [
"The",
"entry",
"point",
"for",
"the",
"HNV",
"Agent",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_neutron_agent.py#L94-L104 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hnv_neutron_agent.py | HNVAgent._provision_network | def _provision_network(self, port_id, net_uuid, network_type,
physical_network, segmentation_id):
"""Provision the network with the received information."""
LOG.info("Provisioning network %s", net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
vswitch_map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = vswitch_map | python | def _provision_network(self, port_id, net_uuid, network_type,
physical_network, segmentation_id):
"""Provision the network with the received information."""
LOG.info("Provisioning network %s", net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
vswitch_map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = vswitch_map | [
"def",
"_provision_network",
"(",
"self",
",",
"port_id",
",",
"net_uuid",
",",
"network_type",
",",
"physical_network",
",",
"segmentation_id",
")",
":",
"LOG",
".",
"info",
"(",
"\"Provisioning network %s\"",
",",
"net_uuid",
")",
"vswitch_name",
"=",
"self",
".",
"_get_vswitch_name",
"(",
"network_type",
",",
"physical_network",
")",
"vswitch_map",
"=",
"{",
"'network_type'",
":",
"network_type",
",",
"'vswitch_name'",
":",
"vswitch_name",
",",
"'ports'",
":",
"[",
"]",
",",
"'vlan_id'",
":",
"segmentation_id",
"}",
"self",
".",
"_network_vswitch_map",
"[",
"net_uuid",
"]",
"=",
"vswitch_map"
] | Provision the network with the received information. | [
"Provision",
"the",
"network",
"with",
"the",
"received",
"information",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_neutron_agent.py#L57-L68 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hnv_neutron_agent.py | HNVAgent._port_bound | def _port_bound(self, port_id, network_id, network_type, physical_network,
segmentation_id, port_security_enabled, set_port_sriov):
"""Bind the port to the recived network."""
super(HNVAgent, self)._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id,
port_security_enabled,
set_port_sriov)
LOG.debug("Getting the profile id for the current port.")
profile_id = self._neutron_client.get_port_profile_id(port_id)
LOG.debug("Trying to set port profile id %r for the current port %r.",
profile_id, port_id)
self._utils.set_vswitch_port_profile_id(
switch_port_name=port_id,
profile_id=profile_id,
profile_data=h_const.PROFILE_DATA,
profile_name=h_const.PROFILE_NAME,
net_cfg_instance_id=h_const.NET_CFG_INSTANCE_ID,
cdn_label_id=h_const.CDN_LABEL_ID,
cdn_label_string=h_const.CDN_LABEL_STRING,
vendor_id=h_const.VENDOR_ID,
vendor_name=h_const.VENDOR_NAME) | python | def _port_bound(self, port_id, network_id, network_type, physical_network,
segmentation_id, port_security_enabled, set_port_sriov):
"""Bind the port to the recived network."""
super(HNVAgent, self)._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id,
port_security_enabled,
set_port_sriov)
LOG.debug("Getting the profile id for the current port.")
profile_id = self._neutron_client.get_port_profile_id(port_id)
LOG.debug("Trying to set port profile id %r for the current port %r.",
profile_id, port_id)
self._utils.set_vswitch_port_profile_id(
switch_port_name=port_id,
profile_id=profile_id,
profile_data=h_const.PROFILE_DATA,
profile_name=h_const.PROFILE_NAME,
net_cfg_instance_id=h_const.NET_CFG_INSTANCE_ID,
cdn_label_id=h_const.CDN_LABEL_ID,
cdn_label_string=h_const.CDN_LABEL_STRING,
vendor_id=h_const.VENDOR_ID,
vendor_name=h_const.VENDOR_NAME) | [
"def",
"_port_bound",
"(",
"self",
",",
"port_id",
",",
"network_id",
",",
"network_type",
",",
"physical_network",
",",
"segmentation_id",
",",
"port_security_enabled",
",",
"set_port_sriov",
")",
":",
"super",
"(",
"HNVAgent",
",",
"self",
")",
".",
"_port_bound",
"(",
"port_id",
",",
"network_id",
",",
"network_type",
",",
"physical_network",
",",
"segmentation_id",
",",
"port_security_enabled",
",",
"set_port_sriov",
")",
"LOG",
".",
"debug",
"(",
"\"Getting the profile id for the current port.\"",
")",
"profile_id",
"=",
"self",
".",
"_neutron_client",
".",
"get_port_profile_id",
"(",
"port_id",
")",
"LOG",
".",
"debug",
"(",
"\"Trying to set port profile id %r for the current port %r.\"",
",",
"profile_id",
",",
"port_id",
")",
"self",
".",
"_utils",
".",
"set_vswitch_port_profile_id",
"(",
"switch_port_name",
"=",
"port_id",
",",
"profile_id",
"=",
"profile_id",
",",
"profile_data",
"=",
"h_const",
".",
"PROFILE_DATA",
",",
"profile_name",
"=",
"h_const",
".",
"PROFILE_NAME",
",",
"net_cfg_instance_id",
"=",
"h_const",
".",
"NET_CFG_INSTANCE_ID",
",",
"cdn_label_id",
"=",
"h_const",
".",
"CDN_LABEL_ID",
",",
"cdn_label_string",
"=",
"h_const",
".",
"CDN_LABEL_STRING",
",",
"vendor_id",
"=",
"h_const",
".",
"VENDOR_ID",
",",
"vendor_name",
"=",
"h_const",
".",
"VENDOR_NAME",
")"
] | Bind the port to the recived network. | [
"Bind",
"the",
"port",
"to",
"the",
"recived",
"network",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_neutron_agent.py#L70-L91 |
project-rig/rig | rig/machine_control/scp_connection.py | SCPConnection.send_scp | def send_scp(self, buffer_size, x, y, p, cmd, arg1=0, arg2=0, arg3=0,
data=b'', expected_args=3, timeout=0.0):
"""Transmit a packet to the SpiNNaker machine and block until an
acknowledgement is received.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket.
x : int
y : int
p : int
cmd : int
arg1 : int
arg2 : int
arg3 : int
data : bytestring
expected_args : int
The number of arguments (0-3) that are expected in the returned
packet.
timeout : float
Additional timeout in seconds to wait for a reply on top of the
default specified upon instantiation.
Returns
-------
:py:class:`~rig.machine_control.packets.SCPPacket`
The packet that was received in acknowledgement of the transmitted
packet.
"""
# This is implemented as a single burst packet sent using the bursty
# interface. This significantly reduces code duplication.
# Construct a callable to retain the returned packet for us
class Callback(object):
def __init__(self):
self.packet = None
def __call__(self, packet):
self.packet = SCPPacket.from_bytestring(
packet, n_args=expected_args
)
# Create the packet to send
callback = Callback()
packets = [
scpcall(x, y, p, cmd, arg1, arg2, arg3, data, callback, timeout)
]
# Send the burst
self.send_scp_burst(buffer_size, 1, packets)
# Return the received packet
assert callback.packet is not None
return callback.packet | python | def send_scp(self, buffer_size, x, y, p, cmd, arg1=0, arg2=0, arg3=0,
data=b'', expected_args=3, timeout=0.0):
"""Transmit a packet to the SpiNNaker machine and block until an
acknowledgement is received.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket.
x : int
y : int
p : int
cmd : int
arg1 : int
arg2 : int
arg3 : int
data : bytestring
expected_args : int
The number of arguments (0-3) that are expected in the returned
packet.
timeout : float
Additional timeout in seconds to wait for a reply on top of the
default specified upon instantiation.
Returns
-------
:py:class:`~rig.machine_control.packets.SCPPacket`
The packet that was received in acknowledgement of the transmitted
packet.
"""
# This is implemented as a single burst packet sent using the bursty
# interface. This significantly reduces code duplication.
# Construct a callable to retain the returned packet for us
class Callback(object):
def __init__(self):
self.packet = None
def __call__(self, packet):
self.packet = SCPPacket.from_bytestring(
packet, n_args=expected_args
)
# Create the packet to send
callback = Callback()
packets = [
scpcall(x, y, p, cmd, arg1, arg2, arg3, data, callback, timeout)
]
# Send the burst
self.send_scp_burst(buffer_size, 1, packets)
# Return the received packet
assert callback.packet is not None
return callback.packet | [
"def",
"send_scp",
"(",
"self",
",",
"buffer_size",
",",
"x",
",",
"y",
",",
"p",
",",
"cmd",
",",
"arg1",
"=",
"0",
",",
"arg2",
"=",
"0",
",",
"arg3",
"=",
"0",
",",
"data",
"=",
"b''",
",",
"expected_args",
"=",
"3",
",",
"timeout",
"=",
"0.0",
")",
":",
"# This is implemented as a single burst packet sent using the bursty",
"# interface. This significantly reduces code duplication.",
"# Construct a callable to retain the returned packet for us",
"class",
"Callback",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"packet",
"=",
"None",
"def",
"__call__",
"(",
"self",
",",
"packet",
")",
":",
"self",
".",
"packet",
"=",
"SCPPacket",
".",
"from_bytestring",
"(",
"packet",
",",
"n_args",
"=",
"expected_args",
")",
"# Create the packet to send",
"callback",
"=",
"Callback",
"(",
")",
"packets",
"=",
"[",
"scpcall",
"(",
"x",
",",
"y",
",",
"p",
",",
"cmd",
",",
"arg1",
",",
"arg2",
",",
"arg3",
",",
"data",
",",
"callback",
",",
"timeout",
")",
"]",
"# Send the burst",
"self",
".",
"send_scp_burst",
"(",
"buffer_size",
",",
"1",
",",
"packets",
")",
"# Return the received packet",
"assert",
"callback",
".",
"packet",
"is",
"not",
"None",
"return",
"callback",
".",
"packet"
] | Transmit a packet to the SpiNNaker machine and block until an
acknowledgement is received.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket.
x : int
y : int
p : int
cmd : int
arg1 : int
arg2 : int
arg3 : int
data : bytestring
expected_args : int
The number of arguments (0-3) that are expected in the returned
packet.
timeout : float
Additional timeout in seconds to wait for a reply on top of the
default specified upon instantiation.
Returns
-------
:py:class:`~rig.machine_control.packets.SCPPacket`
The packet that was received in acknowledgement of the transmitted
packet. | [
"Transmit",
"a",
"packet",
"to",
"the",
"SpiNNaker",
"machine",
"and",
"block",
"until",
"an",
"acknowledgement",
"is",
"received",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/scp_connection.py#L83-L137 |
project-rig/rig | rig/machine_control/scp_connection.py | SCPConnection.send_scp_burst | def send_scp_burst(self, buffer_size, window_size,
parameters_and_callbacks):
"""Send a burst of SCP packets and call a callback for each returned
packet.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket.
window_size : int
Number of packets which can be awaiting replies from the SpiNNaker
board.
parameters_and_callbacks: iterable of :py:class:`.scpcall`
Iterable of :py:class:`.scpcall` elements. These elements can
specify a callback which will be called with the returned packet.
"""
parameters_and_callbacks = iter(parameters_and_callbacks)
self.sock.setblocking(False)
# Calculate the receive length, this should be the smallest power of
# two greater than the required size
max_length = buffer_size + consts.SDP_HEADER_LENGTH
receive_length = int(2**math.ceil(math.log(max_length, 2)))
class TransmittedPacket(object):
"""A packet which has been transmitted and still awaits a response.
"""
__slots__ = ["callback", "packet", "bytestring", "n_tries",
"timeout", "timeout_time"]
def __init__(self, callback, packet, timeout):
self.callback = callback
self.packet = packet
self.bytestring = packet.bytestring
self.n_tries = 1
self.timeout = timeout
self.timeout_time = time.time() + self.timeout
queued_packets = True
outstanding_packets = {}
outstanding_callbacks = collections.deque()
# While there are packets in the queue or packets for which we are
# still awaiting returns then continue to loop.
while queued_packets or outstanding_packets or outstanding_callbacks:
# If there are fewer outstanding packets than the window can take
# and we still might have packets left to send then transmit a
# packet and add it to the list of outstanding packets.
while len(outstanding_packets) < window_size and queued_packets:
try:
args = next(parameters_and_callbacks)
except StopIteration:
queued_packets = False
if queued_packets:
# If we extracted a new packet to send then create a new
# outstanding packet and transmit it.
seq = next(self.seq)
while seq in outstanding_packets:
# The seq should rarely be already taken, it normally
# means that one packet is taking such a long time to
# send that the sequence has wrapped around. It's not
# a problem provided that we don't reuse the number.
seq = next(self.seq)
# Construct the packet that we'll be sending
packet = SCPPacket(
reply_expected=True, tag=0xff, dest_port=0,
dest_cpu=args.p, src_port=7, src_cpu=31,
dest_x=args.x, dest_y=args.y, src_x=0, src_y=0,
cmd_rc=args.cmd, seq=seq,
arg1=args.arg1, arg2=args.arg2, arg3=args.arg3,
data=args.data
)
# Create a reference to this packet so that we know we're
# expecting a response for it and can retransmit it if
# necessary.
outstanding_packets[seq] = TransmittedPacket(
args.callback, packet,
self.default_timeout + args.timeout
)
# Actually send the packet
self.sock.send(outstanding_packets[seq].bytestring)
# Call all outstanding callbacks
while outstanding_callbacks:
callback, packet = outstanding_callbacks.pop()
callback(packet)
# Listen on the socket for an acknowledgement packet, there may not
# be one.
if outstanding_packets:
timeout = min((o.timeout_time for o in
six.itervalues(outstanding_packets)
)) - time.time()
else:
timeout = 0.0
r, w, x = select.select([self.sock], [], [], max(timeout, 0.0))
# Process the received packet (if there is one).
while r:
# Note that 'r' is never changed so this while loop will either
# get skipped if r is empty or loop until 'break' of r is not.
# Since we may receive multiple packets at once, it is better
# to try and pull all out of the socket immediately rather than
# running around the parent loop again and incuring the
# 'select' cost.
try:
ack = self.sock.recv(receive_length)
except IOError:
break
# Extract the sequence number from the bytestring, iff possible
rc, seq = struct.unpack_from("<2H", ack,
consts.SDP_HEADER_LENGTH + 2)
# If the code is an error then we respond immediately
if rc != consts.SCPReturnCodes.ok:
if rc in consts.RETRYABLE_SCP_RETURN_CODES:
# If the error is timeout related then treat the packet
# as though it timed out, just discard. This avoids us
# hammering the board when it's most vulnerable.
pass
else:
# For all other errors, we'll just fall over
# immediately.
packet = outstanding_packets.get(seq)
if packet is not None:
packet = packet.packet
raise FatalReturnCodeError(rc, packet)
else:
# Look up the sequence index of packet in the list of
# outstanding packets. We may have already processed a
# response for this packet (indicating that the response
# was delayed and we retransmitted the initial message) in
# which case we can silently ignore the returned packet.
# XXX: There is a danger that a response was so delayed
# that we already reused the seq number... this is probably
# sufficiently unlikely that there is no problem.
outstanding = outstanding_packets.pop(seq, None)
if outstanding is not None:
outstanding_callbacks.appendleft((outstanding.callback,
ack))
# Look through all the remaining outstanding packets, if any of
# them have timed out then we retransmit them.
current_time = time.time()
for outstanding in six.itervalues(outstanding_packets):
if outstanding.timeout_time < current_time:
# This packet has timed out, if we have sent it more than
# the given number of times then raise a timeout error for
# it.
if outstanding.n_tries >= self.n_tries:
raise TimeoutError(
"No response after {} attempts.".format(
self.n_tries),
outstanding.packet)
# Otherwise we retransmit it
self.sock.send(outstanding.bytestring)
outstanding.n_tries += 1
outstanding.timeout_time = (current_time +
outstanding.timeout) | python | def send_scp_burst(self, buffer_size, window_size,
parameters_and_callbacks):
"""Send a burst of SCP packets and call a callback for each returned
packet.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket.
window_size : int
Number of packets which can be awaiting replies from the SpiNNaker
board.
parameters_and_callbacks: iterable of :py:class:`.scpcall`
Iterable of :py:class:`.scpcall` elements. These elements can
specify a callback which will be called with the returned packet.
"""
parameters_and_callbacks = iter(parameters_and_callbacks)
self.sock.setblocking(False)
# Calculate the receive length, this should be the smallest power of
# two greater than the required size
max_length = buffer_size + consts.SDP_HEADER_LENGTH
receive_length = int(2**math.ceil(math.log(max_length, 2)))
class TransmittedPacket(object):
"""A packet which has been transmitted and still awaits a response.
"""
__slots__ = ["callback", "packet", "bytestring", "n_tries",
"timeout", "timeout_time"]
def __init__(self, callback, packet, timeout):
self.callback = callback
self.packet = packet
self.bytestring = packet.bytestring
self.n_tries = 1
self.timeout = timeout
self.timeout_time = time.time() + self.timeout
queued_packets = True
outstanding_packets = {}
outstanding_callbacks = collections.deque()
# While there are packets in the queue or packets for which we are
# still awaiting returns then continue to loop.
while queued_packets or outstanding_packets or outstanding_callbacks:
# If there are fewer outstanding packets than the window can take
# and we still might have packets left to send then transmit a
# packet and add it to the list of outstanding packets.
while len(outstanding_packets) < window_size and queued_packets:
try:
args = next(parameters_and_callbacks)
except StopIteration:
queued_packets = False
if queued_packets:
# If we extracted a new packet to send then create a new
# outstanding packet and transmit it.
seq = next(self.seq)
while seq in outstanding_packets:
# The seq should rarely be already taken, it normally
# means that one packet is taking such a long time to
# send that the sequence has wrapped around. It's not
# a problem provided that we don't reuse the number.
seq = next(self.seq)
# Construct the packet that we'll be sending
packet = SCPPacket(
reply_expected=True, tag=0xff, dest_port=0,
dest_cpu=args.p, src_port=7, src_cpu=31,
dest_x=args.x, dest_y=args.y, src_x=0, src_y=0,
cmd_rc=args.cmd, seq=seq,
arg1=args.arg1, arg2=args.arg2, arg3=args.arg3,
data=args.data
)
# Create a reference to this packet so that we know we're
# expecting a response for it and can retransmit it if
# necessary.
outstanding_packets[seq] = TransmittedPacket(
args.callback, packet,
self.default_timeout + args.timeout
)
# Actually send the packet
self.sock.send(outstanding_packets[seq].bytestring)
# Call all outstanding callbacks
while outstanding_callbacks:
callback, packet = outstanding_callbacks.pop()
callback(packet)
# Listen on the socket for an acknowledgement packet, there may not
# be one.
if outstanding_packets:
timeout = min((o.timeout_time for o in
six.itervalues(outstanding_packets)
)) - time.time()
else:
timeout = 0.0
r, w, x = select.select([self.sock], [], [], max(timeout, 0.0))
# Process the received packet (if there is one).
while r:
# Note that 'r' is never changed so this while loop will either
# get skipped if r is empty or loop until 'break' of r is not.
# Since we may receive multiple packets at once, it is better
# to try and pull all out of the socket immediately rather than
# running around the parent loop again and incuring the
# 'select' cost.
try:
ack = self.sock.recv(receive_length)
except IOError:
break
# Extract the sequence number from the bytestring, iff possible
rc, seq = struct.unpack_from("<2H", ack,
consts.SDP_HEADER_LENGTH + 2)
# If the code is an error then we respond immediately
if rc != consts.SCPReturnCodes.ok:
if rc in consts.RETRYABLE_SCP_RETURN_CODES:
# If the error is timeout related then treat the packet
# as though it timed out, just discard. This avoids us
# hammering the board when it's most vulnerable.
pass
else:
# For all other errors, we'll just fall over
# immediately.
packet = outstanding_packets.get(seq)
if packet is not None:
packet = packet.packet
raise FatalReturnCodeError(rc, packet)
else:
# Look up the sequence index of packet in the list of
# outstanding packets. We may have already processed a
# response for this packet (indicating that the response
# was delayed and we retransmitted the initial message) in
# which case we can silently ignore the returned packet.
# XXX: There is a danger that a response was so delayed
# that we already reused the seq number... this is probably
# sufficiently unlikely that there is no problem.
outstanding = outstanding_packets.pop(seq, None)
if outstanding is not None:
outstanding_callbacks.appendleft((outstanding.callback,
ack))
# Look through all the remaining outstanding packets, if any of
# them have timed out then we retransmit them.
current_time = time.time()
for outstanding in six.itervalues(outstanding_packets):
if outstanding.timeout_time < current_time:
# This packet has timed out, if we have sent it more than
# the given number of times then raise a timeout error for
# it.
if outstanding.n_tries >= self.n_tries:
raise TimeoutError(
"No response after {} attempts.".format(
self.n_tries),
outstanding.packet)
# Otherwise we retransmit it
self.sock.send(outstanding.bytestring)
outstanding.n_tries += 1
outstanding.timeout_time = (current_time +
outstanding.timeout) | [
"def",
"send_scp_burst",
"(",
"self",
",",
"buffer_size",
",",
"window_size",
",",
"parameters_and_callbacks",
")",
":",
"parameters_and_callbacks",
"=",
"iter",
"(",
"parameters_and_callbacks",
")",
"self",
".",
"sock",
".",
"setblocking",
"(",
"False",
")",
"# Calculate the receive length, this should be the smallest power of",
"# two greater than the required size",
"max_length",
"=",
"buffer_size",
"+",
"consts",
".",
"SDP_HEADER_LENGTH",
"receive_length",
"=",
"int",
"(",
"2",
"**",
"math",
".",
"ceil",
"(",
"math",
".",
"log",
"(",
"max_length",
",",
"2",
")",
")",
")",
"class",
"TransmittedPacket",
"(",
"object",
")",
":",
"\"\"\"A packet which has been transmitted and still awaits a response.\n \"\"\"",
"__slots__",
"=",
"[",
"\"callback\"",
",",
"\"packet\"",
",",
"\"bytestring\"",
",",
"\"n_tries\"",
",",
"\"timeout\"",
",",
"\"timeout_time\"",
"]",
"def",
"__init__",
"(",
"self",
",",
"callback",
",",
"packet",
",",
"timeout",
")",
":",
"self",
".",
"callback",
"=",
"callback",
"self",
".",
"packet",
"=",
"packet",
"self",
".",
"bytestring",
"=",
"packet",
".",
"bytestring",
"self",
".",
"n_tries",
"=",
"1",
"self",
".",
"timeout",
"=",
"timeout",
"self",
".",
"timeout_time",
"=",
"time",
".",
"time",
"(",
")",
"+",
"self",
".",
"timeout",
"queued_packets",
"=",
"True",
"outstanding_packets",
"=",
"{",
"}",
"outstanding_callbacks",
"=",
"collections",
".",
"deque",
"(",
")",
"# While there are packets in the queue or packets for which we are",
"# still awaiting returns then continue to loop.",
"while",
"queued_packets",
"or",
"outstanding_packets",
"or",
"outstanding_callbacks",
":",
"# If there are fewer outstanding packets than the window can take",
"# and we still might have packets left to send then transmit a",
"# packet and add it to the list of outstanding packets.",
"while",
"len",
"(",
"outstanding_packets",
")",
"<",
"window_size",
"and",
"queued_packets",
":",
"try",
":",
"args",
"=",
"next",
"(",
"parameters_and_callbacks",
")",
"except",
"StopIteration",
":",
"queued_packets",
"=",
"False",
"if",
"queued_packets",
":",
"# If we extracted a new packet to send then create a new",
"# outstanding packet and transmit it.",
"seq",
"=",
"next",
"(",
"self",
".",
"seq",
")",
"while",
"seq",
"in",
"outstanding_packets",
":",
"# The seq should rarely be already taken, it normally",
"# means that one packet is taking such a long time to",
"# send that the sequence has wrapped around. It's not",
"# a problem provided that we don't reuse the number.",
"seq",
"=",
"next",
"(",
"self",
".",
"seq",
")",
"# Construct the packet that we'll be sending",
"packet",
"=",
"SCPPacket",
"(",
"reply_expected",
"=",
"True",
",",
"tag",
"=",
"0xff",
",",
"dest_port",
"=",
"0",
",",
"dest_cpu",
"=",
"args",
".",
"p",
",",
"src_port",
"=",
"7",
",",
"src_cpu",
"=",
"31",
",",
"dest_x",
"=",
"args",
".",
"x",
",",
"dest_y",
"=",
"args",
".",
"y",
",",
"src_x",
"=",
"0",
",",
"src_y",
"=",
"0",
",",
"cmd_rc",
"=",
"args",
".",
"cmd",
",",
"seq",
"=",
"seq",
",",
"arg1",
"=",
"args",
".",
"arg1",
",",
"arg2",
"=",
"args",
".",
"arg2",
",",
"arg3",
"=",
"args",
".",
"arg3",
",",
"data",
"=",
"args",
".",
"data",
")",
"# Create a reference to this packet so that we know we're",
"# expecting a response for it and can retransmit it if",
"# necessary.",
"outstanding_packets",
"[",
"seq",
"]",
"=",
"TransmittedPacket",
"(",
"args",
".",
"callback",
",",
"packet",
",",
"self",
".",
"default_timeout",
"+",
"args",
".",
"timeout",
")",
"# Actually send the packet",
"self",
".",
"sock",
".",
"send",
"(",
"outstanding_packets",
"[",
"seq",
"]",
".",
"bytestring",
")",
"# Call all outstanding callbacks",
"while",
"outstanding_callbacks",
":",
"callback",
",",
"packet",
"=",
"outstanding_callbacks",
".",
"pop",
"(",
")",
"callback",
"(",
"packet",
")",
"# Listen on the socket for an acknowledgement packet, there may not",
"# be one.",
"if",
"outstanding_packets",
":",
"timeout",
"=",
"min",
"(",
"(",
"o",
".",
"timeout_time",
"for",
"o",
"in",
"six",
".",
"itervalues",
"(",
"outstanding_packets",
")",
")",
")",
"-",
"time",
".",
"time",
"(",
")",
"else",
":",
"timeout",
"=",
"0.0",
"r",
",",
"w",
",",
"x",
"=",
"select",
".",
"select",
"(",
"[",
"self",
".",
"sock",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"max",
"(",
"timeout",
",",
"0.0",
")",
")",
"# Process the received packet (if there is one).",
"while",
"r",
":",
"# Note that 'r' is never changed so this while loop will either",
"# get skipped if r is empty or loop until 'break' of r is not.",
"# Since we may receive multiple packets at once, it is better",
"# to try and pull all out of the socket immediately rather than",
"# running around the parent loop again and incuring the",
"# 'select' cost.",
"try",
":",
"ack",
"=",
"self",
".",
"sock",
".",
"recv",
"(",
"receive_length",
")",
"except",
"IOError",
":",
"break",
"# Extract the sequence number from the bytestring, iff possible",
"rc",
",",
"seq",
"=",
"struct",
".",
"unpack_from",
"(",
"\"<2H\"",
",",
"ack",
",",
"consts",
".",
"SDP_HEADER_LENGTH",
"+",
"2",
")",
"# If the code is an error then we respond immediately",
"if",
"rc",
"!=",
"consts",
".",
"SCPReturnCodes",
".",
"ok",
":",
"if",
"rc",
"in",
"consts",
".",
"RETRYABLE_SCP_RETURN_CODES",
":",
"# If the error is timeout related then treat the packet",
"# as though it timed out, just discard. This avoids us",
"# hammering the board when it's most vulnerable.",
"pass",
"else",
":",
"# For all other errors, we'll just fall over",
"# immediately.",
"packet",
"=",
"outstanding_packets",
".",
"get",
"(",
"seq",
")",
"if",
"packet",
"is",
"not",
"None",
":",
"packet",
"=",
"packet",
".",
"packet",
"raise",
"FatalReturnCodeError",
"(",
"rc",
",",
"packet",
")",
"else",
":",
"# Look up the sequence index of packet in the list of",
"# outstanding packets. We may have already processed a",
"# response for this packet (indicating that the response",
"# was delayed and we retransmitted the initial message) in",
"# which case we can silently ignore the returned packet.",
"# XXX: There is a danger that a response was so delayed",
"# that we already reused the seq number... this is probably",
"# sufficiently unlikely that there is no problem.",
"outstanding",
"=",
"outstanding_packets",
".",
"pop",
"(",
"seq",
",",
"None",
")",
"if",
"outstanding",
"is",
"not",
"None",
":",
"outstanding_callbacks",
".",
"appendleft",
"(",
"(",
"outstanding",
".",
"callback",
",",
"ack",
")",
")",
"# Look through all the remaining outstanding packets, if any of",
"# them have timed out then we retransmit them.",
"current_time",
"=",
"time",
".",
"time",
"(",
")",
"for",
"outstanding",
"in",
"six",
".",
"itervalues",
"(",
"outstanding_packets",
")",
":",
"if",
"outstanding",
".",
"timeout_time",
"<",
"current_time",
":",
"# This packet has timed out, if we have sent it more than",
"# the given number of times then raise a timeout error for",
"# it.",
"if",
"outstanding",
".",
"n_tries",
">=",
"self",
".",
"n_tries",
":",
"raise",
"TimeoutError",
"(",
"\"No response after {} attempts.\"",
".",
"format",
"(",
"self",
".",
"n_tries",
")",
",",
"outstanding",
".",
"packet",
")",
"# Otherwise we retransmit it",
"self",
".",
"sock",
".",
"send",
"(",
"outstanding",
".",
"bytestring",
")",
"outstanding",
".",
"n_tries",
"+=",
"1",
"outstanding",
".",
"timeout_time",
"=",
"(",
"current_time",
"+",
"outstanding",
".",
"timeout",
")"
] | Send a burst of SCP packets and call a callback for each returned
packet.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket.
window_size : int
Number of packets which can be awaiting replies from the SpiNNaker
board.
parameters_and_callbacks: iterable of :py:class:`.scpcall`
Iterable of :py:class:`.scpcall` elements. These elements can
specify a callback which will be called with the returned packet. | [
"Send",
"a",
"burst",
"of",
"SCP",
"packets",
"and",
"call",
"a",
"callback",
"for",
"each",
"returned",
"packet",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/scp_connection.py#L139-L304 |
project-rig/rig | rig/machine_control/scp_connection.py | SCPConnection.read | def read(self, buffer_size, window_size, x, y, p, address, length_bytes):
"""Read a bytestring from an address in memory.
..note::
This method is included here to maintain API compatibility with an
`alternative implementation of SCP
<https://github.com/project-rig/rig-scp>`_.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket and how many bytes of data will
be read back in each packet.
window_size : int
x : int
y : int
p : int
address : int
The address at which to start reading the data.
length_bytes : int
The number of bytes to read from memory. Large reads are
transparently broken into multiple SCP read commands.
Returns
-------
:py:class:`bytes`
The data is read back from memory as a bytestring.
"""
# Prepare the buffer to receive the incoming data
data = bytearray(length_bytes)
mem = memoryview(data)
# Create a callback which will write the data from a packet into a
# memoryview.
def callback(mem, data):
mem[:] = data[6 + consts.SDP_HEADER_LENGTH:]
# Create a generator that will generate request packets and store data
# until all data has been returned
def packets(length_bytes, data):
offset = 0
while length_bytes > 0:
# Get the next block of data
block_size = min((length_bytes, buffer_size))
read_address = address + offset
dtype = consts.address_length_dtype[(read_address % 4,
block_size % 4)]
# Create the call spec and yield
yield scpcall(
x, y, p, consts.SCPCommands.read, read_address,
block_size, dtype,
callback=functools.partial(callback,
mem[offset:offset + block_size])
)
# Update the number of bytes remaining and the offset
offset += block_size
length_bytes -= block_size
# Run the event loop and then return the retrieved data
self.send_scp_burst(buffer_size, window_size,
list(packets(length_bytes, data)))
return bytes(data) | python | def read(self, buffer_size, window_size, x, y, p, address, length_bytes):
"""Read a bytestring from an address in memory.
..note::
This method is included here to maintain API compatibility with an
`alternative implementation of SCP
<https://github.com/project-rig/rig-scp>`_.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket and how many bytes of data will
be read back in each packet.
window_size : int
x : int
y : int
p : int
address : int
The address at which to start reading the data.
length_bytes : int
The number of bytes to read from memory. Large reads are
transparently broken into multiple SCP read commands.
Returns
-------
:py:class:`bytes`
The data is read back from memory as a bytestring.
"""
# Prepare the buffer to receive the incoming data
data = bytearray(length_bytes)
mem = memoryview(data)
# Create a callback which will write the data from a packet into a
# memoryview.
def callback(mem, data):
mem[:] = data[6 + consts.SDP_HEADER_LENGTH:]
# Create a generator that will generate request packets and store data
# until all data has been returned
def packets(length_bytes, data):
offset = 0
while length_bytes > 0:
# Get the next block of data
block_size = min((length_bytes, buffer_size))
read_address = address + offset
dtype = consts.address_length_dtype[(read_address % 4,
block_size % 4)]
# Create the call spec and yield
yield scpcall(
x, y, p, consts.SCPCommands.read, read_address,
block_size, dtype,
callback=functools.partial(callback,
mem[offset:offset + block_size])
)
# Update the number of bytes remaining and the offset
offset += block_size
length_bytes -= block_size
# Run the event loop and then return the retrieved data
self.send_scp_burst(buffer_size, window_size,
list(packets(length_bytes, data)))
return bytes(data) | [
"def",
"read",
"(",
"self",
",",
"buffer_size",
",",
"window_size",
",",
"x",
",",
"y",
",",
"p",
",",
"address",
",",
"length_bytes",
")",
":",
"# Prepare the buffer to receive the incoming data",
"data",
"=",
"bytearray",
"(",
"length_bytes",
")",
"mem",
"=",
"memoryview",
"(",
"data",
")",
"# Create a callback which will write the data from a packet into a",
"# memoryview.",
"def",
"callback",
"(",
"mem",
",",
"data",
")",
":",
"mem",
"[",
":",
"]",
"=",
"data",
"[",
"6",
"+",
"consts",
".",
"SDP_HEADER_LENGTH",
":",
"]",
"# Create a generator that will generate request packets and store data",
"# until all data has been returned",
"def",
"packets",
"(",
"length_bytes",
",",
"data",
")",
":",
"offset",
"=",
"0",
"while",
"length_bytes",
">",
"0",
":",
"# Get the next block of data",
"block_size",
"=",
"min",
"(",
"(",
"length_bytes",
",",
"buffer_size",
")",
")",
"read_address",
"=",
"address",
"+",
"offset",
"dtype",
"=",
"consts",
".",
"address_length_dtype",
"[",
"(",
"read_address",
"%",
"4",
",",
"block_size",
"%",
"4",
")",
"]",
"# Create the call spec and yield",
"yield",
"scpcall",
"(",
"x",
",",
"y",
",",
"p",
",",
"consts",
".",
"SCPCommands",
".",
"read",
",",
"read_address",
",",
"block_size",
",",
"dtype",
",",
"callback",
"=",
"functools",
".",
"partial",
"(",
"callback",
",",
"mem",
"[",
"offset",
":",
"offset",
"+",
"block_size",
"]",
")",
")",
"# Update the number of bytes remaining and the offset",
"offset",
"+=",
"block_size",
"length_bytes",
"-=",
"block_size",
"# Run the event loop and then return the retrieved data",
"self",
".",
"send_scp_burst",
"(",
"buffer_size",
",",
"window_size",
",",
"list",
"(",
"packets",
"(",
"length_bytes",
",",
"data",
")",
")",
")",
"return",
"bytes",
"(",
"data",
")"
] | Read a bytestring from an address in memory.
..note::
This method is included here to maintain API compatibility with an
`alternative implementation of SCP
<https://github.com/project-rig/rig-scp>`_.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket and how many bytes of data will
be read back in each packet.
window_size : int
x : int
y : int
p : int
address : int
The address at which to start reading the data.
length_bytes : int
The number of bytes to read from memory. Large reads are
transparently broken into multiple SCP read commands.
Returns
-------
:py:class:`bytes`
The data is read back from memory as a bytestring. | [
"Read",
"a",
"bytestring",
"from",
"an",
"address",
"in",
"memory",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/scp_connection.py#L306-L370 |
project-rig/rig | rig/machine_control/scp_connection.py | SCPConnection.write | def write(self, buffer_size, window_size, x, y, p, address, data):
"""Write a bytestring to an address in memory.
..note::
This method is included here to maintain API compatibility with an
`alternative implementation of SCP
<https://github.com/project-rig/rig-scp>`_.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket and how many bytes will be
written in each packet.
window_size : int
x : int
y : int
p : int
address : int
The address at which to start writing the data. Addresses are given
within the address space of a SpiNNaker core. See the SpiNNaker
datasheet for more information.
data : :py:class:`bytes`
Data to write into memory. Writes are automatically broken into a
sequence of SCP write commands.
"""
# While there is still data perform a write: get the block to write
# this time around, determine the data type, perform the write and
# increment the address
def packets(address, data):
end = len(data)
pos = 0
while pos < end:
block = data[pos:pos + buffer_size]
block_size = len(block)
dtype = consts.address_length_dtype[(address % 4,
block_size % 4)]
yield scpcall(x, y, p, consts.SCPCommands.write, address,
block_size, dtype, block)
address += block_size
pos += block_size
# Run the event loop and then return the retrieved data
self.send_scp_burst(buffer_size, window_size,
list(packets(address, data))) | python | def write(self, buffer_size, window_size, x, y, p, address, data):
"""Write a bytestring to an address in memory.
..note::
This method is included here to maintain API compatibility with an
`alternative implementation of SCP
<https://github.com/project-rig/rig-scp>`_.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket and how many bytes will be
written in each packet.
window_size : int
x : int
y : int
p : int
address : int
The address at which to start writing the data. Addresses are given
within the address space of a SpiNNaker core. See the SpiNNaker
datasheet for more information.
data : :py:class:`bytes`
Data to write into memory. Writes are automatically broken into a
sequence of SCP write commands.
"""
# While there is still data perform a write: get the block to write
# this time around, determine the data type, perform the write and
# increment the address
def packets(address, data):
end = len(data)
pos = 0
while pos < end:
block = data[pos:pos + buffer_size]
block_size = len(block)
dtype = consts.address_length_dtype[(address % 4,
block_size % 4)]
yield scpcall(x, y, p, consts.SCPCommands.write, address,
block_size, dtype, block)
address += block_size
pos += block_size
# Run the event loop and then return the retrieved data
self.send_scp_burst(buffer_size, window_size,
list(packets(address, data))) | [
"def",
"write",
"(",
"self",
",",
"buffer_size",
",",
"window_size",
",",
"x",
",",
"y",
",",
"p",
",",
"address",
",",
"data",
")",
":",
"# While there is still data perform a write: get the block to write",
"# this time around, determine the data type, perform the write and",
"# increment the address",
"def",
"packets",
"(",
"address",
",",
"data",
")",
":",
"end",
"=",
"len",
"(",
"data",
")",
"pos",
"=",
"0",
"while",
"pos",
"<",
"end",
":",
"block",
"=",
"data",
"[",
"pos",
":",
"pos",
"+",
"buffer_size",
"]",
"block_size",
"=",
"len",
"(",
"block",
")",
"dtype",
"=",
"consts",
".",
"address_length_dtype",
"[",
"(",
"address",
"%",
"4",
",",
"block_size",
"%",
"4",
")",
"]",
"yield",
"scpcall",
"(",
"x",
",",
"y",
",",
"p",
",",
"consts",
".",
"SCPCommands",
".",
"write",
",",
"address",
",",
"block_size",
",",
"dtype",
",",
"block",
")",
"address",
"+=",
"block_size",
"pos",
"+=",
"block_size",
"# Run the event loop and then return the retrieved data",
"self",
".",
"send_scp_burst",
"(",
"buffer_size",
",",
"window_size",
",",
"list",
"(",
"packets",
"(",
"address",
",",
"data",
")",
")",
")"
] | Write a bytestring to an address in memory.
..note::
This method is included here to maintain API compatibility with an
`alternative implementation of SCP
<https://github.com/project-rig/rig-scp>`_.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket and how many bytes will be
written in each packet.
window_size : int
x : int
y : int
p : int
address : int
The address at which to start writing the data. Addresses are given
within the address space of a SpiNNaker core. See the SpiNNaker
datasheet for more information.
data : :py:class:`bytes`
Data to write into memory. Writes are automatically broken into a
sequence of SCP write commands. | [
"Write",
"a",
"bytestring",
"to",
"an",
"address",
"in",
"memory",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/scp_connection.py#L372-L419 |
belbio/bel | bel/nanopub/validate.py | convert_msg_to_html | def convert_msg_to_html(msg):
"""Convert \n into a <BR> for an HTML formatted message"""
msg = re.sub("\n", "<br />", msg, flags=re.MULTILINE)
return msg | python | def convert_msg_to_html(msg):
"""Convert \n into a <BR> for an HTML formatted message"""
msg = re.sub("\n", "<br />", msg, flags=re.MULTILINE)
return msg | [
"def",
"convert_msg_to_html",
"(",
"msg",
")",
":",
"msg",
"=",
"re",
".",
"sub",
"(",
"\"\\n\"",
",",
"\"<br />\"",
",",
"msg",
",",
"flags",
"=",
"re",
".",
"MULTILINE",
")",
"return",
"msg"
] | Convert \n into a <BR> for an HTML formatted message | [
"Convert",
"\\",
"n",
"into",
"a",
"<BR",
">",
"for",
"an",
"HTML",
"formatted",
"message"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/validate.py#L15-L19 |
belbio/bel | bel/nanopub/validate.py | validate | def validate(nanopub: dict, error_level: str = "WARNING") -> Tuple[str, str, str]:
"""Validate Nanopub
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
The validation result is a list of objects containing
{
'level': 'Warning|Error',
'section': 'Assertion|Annotation|Structure',
'label': '{Error|Warning}-{Assertion|Annotation|Structure}', # to be used for faceting in Elasticsearch
'index': idx, # Index of Assertion or Annotation in Nanopub - starts at 0
'msg': msg, # Error or Warning message
}
Args:
nanopub: nanopub record starting with nanopub...
level: return WARNING or just ERROR? defaults to warnings and errors
Returns:
list(tuples): [{'level': 'Warning', 'section': 'Assertion', 'label': 'Warning-Assertion', 'index': 0, 'msg': <msg>}]
"""
# Validation results
v = []
bel_version = config["bel"]["lang"]["default_bel_version"]
# Structural checks
try:
if not isinstance(nanopub["nanopub"]["assertions"], list):
msg = "Assertions must be a list/array"
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
except Exception as e:
msg = 'Missing nanopub["nanopub"]["assertions"]'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
try:
if (
"name" in nanopub["nanopub"]["type"]
and "version" in nanopub["nanopub"]["type"]
):
pass
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
except Exception as e:
msg = 'Missing or badly formed type - must have nanopub["nanopub"]["type"] = {"name": <name>, "version": <version}'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
try:
for key in ["uri", "database", "reference"]:
if key in nanopub["nanopub"]["citation"]:
break
else:
msg = 'nanopub["nanopub"]["citation"] must have either a uri, database or reference key.'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
except Exception as e:
msg = 'nanopub["nanopub"] must have a "citation" key with either a uri, database or reference key.'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
# Assertion checks
if "assertions" in nanopub["nanopub"]:
for idx, assertion in enumerate(nanopub["nanopub"]["assertions"]):
bo = bel.lang.belobj.BEL(
bel_version, config["bel_api"]["servers"]["api_url"]
)
belstr = f'{assertion.get("subject")} {assertion.get("relation", "")} {assertion.get("object", "")}'
belstr = belstr.replace("None", "")
try:
messages = (
bo.parse(belstr)
.semantic_validation(error_level=error_level)
.validation_messages
)
for message in messages:
(level, msg) = message
if error_level == "ERROR":
if level == "ERROR":
v.append(
{
"level": f"{level.title()}",
"section": "Assertion",
"label": f"{level.title()}-Assertion",
"index": idx,
"msg": msg,
"msg_html": convert_msg_to_html(msg),
}
)
else:
v.append(
{
"level": f"{level.title()}",
"section": "Assertion",
"label": f"{level.title()}-Assertion",
"index": idx,
"msg": msg,
"msg_html": convert_msg_to_html(msg),
}
)
except Exception as e:
msg = f"Could not parse: {belstr}"
v.append(
{
"level": "Error",
"section": "Assertion",
"label": "Error-Assertion",
"index": idx,
"msg": msg,
"msg_html": msg,
}
)
log.exception(f"Could not parse: {belstr}")
# Annotation checks
if error_level == "WARNING":
for idx, annotation in enumerate(nanopub["nanopub"].get("annotations", [])):
term_type = annotation["type"]
term_id = annotation["id"]
# term_label = annotation['label']
log.info(f"Annotation: {term_type} ID: {term_id}")
search_body = {
"_source": ["src_id", "id", "name", "label", "annotation_types"],
"query": {"term": {"id": term_id}},
}
results = es.search(index="terms", doc_type="term", body=search_body)
if len(results["hits"]["hits"]) > 0:
result = results["hits"]["hits"][0]["_source"]
if term_type not in result["annotation_types"]:
msg = f'Annotation type: {term_type} for {term_id} does not match annotation types in database: {result["annotation_types"]}'
v.append(
{
"level": "Warning",
"section": "Annotation",
"index": idx,
"label": "Warning-Annotation",
"msg": msg,
"msg_html": msg,
}
)
else:
msg = f"Annotation term: {term_id} not found in database"
v.append(
{
"level": "Warning",
"section": "Annotation",
"index": idx,
"label": "Warning-Annotation",
"msg": msg,
"msg_html": msg,
}
)
return v | python | def validate(nanopub: dict, error_level: str = "WARNING") -> Tuple[str, str, str]:
"""Validate Nanopub
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
The validation result is a list of objects containing
{
'level': 'Warning|Error',
'section': 'Assertion|Annotation|Structure',
'label': '{Error|Warning}-{Assertion|Annotation|Structure}', # to be used for faceting in Elasticsearch
'index': idx, # Index of Assertion or Annotation in Nanopub - starts at 0
'msg': msg, # Error or Warning message
}
Args:
nanopub: nanopub record starting with nanopub...
level: return WARNING or just ERROR? defaults to warnings and errors
Returns:
list(tuples): [{'level': 'Warning', 'section': 'Assertion', 'label': 'Warning-Assertion', 'index': 0, 'msg': <msg>}]
"""
# Validation results
v = []
bel_version = config["bel"]["lang"]["default_bel_version"]
# Structural checks
try:
if not isinstance(nanopub["nanopub"]["assertions"], list):
msg = "Assertions must be a list/array"
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
except Exception as e:
msg = 'Missing nanopub["nanopub"]["assertions"]'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
try:
if (
"name" in nanopub["nanopub"]["type"]
and "version" in nanopub["nanopub"]["type"]
):
pass
if nanopub["nanopub"]["type"]["name"].upper() == "BEL":
bel_version = nanopub["nanopub"]["type"]["version"]
except Exception as e:
msg = 'Missing or badly formed type - must have nanopub["nanopub"]["type"] = {"name": <name>, "version": <version}'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
try:
for key in ["uri", "database", "reference"]:
if key in nanopub["nanopub"]["citation"]:
break
else:
msg = 'nanopub["nanopub"]["citation"] must have either a uri, database or reference key.'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
except Exception as e:
msg = 'nanopub["nanopub"] must have a "citation" key with either a uri, database or reference key.'
v.append(
{
"level": "Error",
"section": "Structure",
"label": "Error-Structure",
"msg": msg,
"msg_html": msg,
}
)
# Assertion checks
if "assertions" in nanopub["nanopub"]:
for idx, assertion in enumerate(nanopub["nanopub"]["assertions"]):
bo = bel.lang.belobj.BEL(
bel_version, config["bel_api"]["servers"]["api_url"]
)
belstr = f'{assertion.get("subject")} {assertion.get("relation", "")} {assertion.get("object", "")}'
belstr = belstr.replace("None", "")
try:
messages = (
bo.parse(belstr)
.semantic_validation(error_level=error_level)
.validation_messages
)
for message in messages:
(level, msg) = message
if error_level == "ERROR":
if level == "ERROR":
v.append(
{
"level": f"{level.title()}",
"section": "Assertion",
"label": f"{level.title()}-Assertion",
"index": idx,
"msg": msg,
"msg_html": convert_msg_to_html(msg),
}
)
else:
v.append(
{
"level": f"{level.title()}",
"section": "Assertion",
"label": f"{level.title()}-Assertion",
"index": idx,
"msg": msg,
"msg_html": convert_msg_to_html(msg),
}
)
except Exception as e:
msg = f"Could not parse: {belstr}"
v.append(
{
"level": "Error",
"section": "Assertion",
"label": "Error-Assertion",
"index": idx,
"msg": msg,
"msg_html": msg,
}
)
log.exception(f"Could not parse: {belstr}")
# Annotation checks
if error_level == "WARNING":
for idx, annotation in enumerate(nanopub["nanopub"].get("annotations", [])):
term_type = annotation["type"]
term_id = annotation["id"]
# term_label = annotation['label']
log.info(f"Annotation: {term_type} ID: {term_id}")
search_body = {
"_source": ["src_id", "id", "name", "label", "annotation_types"],
"query": {"term": {"id": term_id}},
}
results = es.search(index="terms", doc_type="term", body=search_body)
if len(results["hits"]["hits"]) > 0:
result = results["hits"]["hits"][0]["_source"]
if term_type not in result["annotation_types"]:
msg = f'Annotation type: {term_type} for {term_id} does not match annotation types in database: {result["annotation_types"]}'
v.append(
{
"level": "Warning",
"section": "Annotation",
"index": idx,
"label": "Warning-Annotation",
"msg": msg,
"msg_html": msg,
}
)
else:
msg = f"Annotation term: {term_id} not found in database"
v.append(
{
"level": "Warning",
"section": "Annotation",
"index": idx,
"label": "Warning-Annotation",
"msg": msg,
"msg_html": msg,
}
)
return v | [
"def",
"validate",
"(",
"nanopub",
":",
"dict",
",",
"error_level",
":",
"str",
"=",
"\"WARNING\"",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
",",
"str",
"]",
":",
"# Validation results",
"v",
"=",
"[",
"]",
"bel_version",
"=",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"lang\"",
"]",
"[",
"\"default_bel_version\"",
"]",
"# Structural checks",
"try",
":",
"if",
"not",
"isinstance",
"(",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"assertions\"",
"]",
",",
"list",
")",
":",
"msg",
"=",
"\"Assertions must be a list/array\"",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Error\"",
",",
"\"section\"",
":",
"\"Structure\"",
",",
"\"label\"",
":",
"\"Error-Structure\"",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'Missing nanopub[\"nanopub\"][\"assertions\"]'",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Error\"",
",",
"\"section\"",
":",
"\"Structure\"",
",",
"\"label\"",
":",
"\"Error-Structure\"",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"try",
":",
"if",
"(",
"\"name\"",
"in",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"type\"",
"]",
"and",
"\"version\"",
"in",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"type\"",
"]",
")",
":",
"pass",
"if",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"type\"",
"]",
"[",
"\"name\"",
"]",
".",
"upper",
"(",
")",
"==",
"\"BEL\"",
":",
"bel_version",
"=",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"type\"",
"]",
"[",
"\"version\"",
"]",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'Missing or badly formed type - must have nanopub[\"nanopub\"][\"type\"] = {\"name\": <name>, \"version\": <version}'",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Error\"",
",",
"\"section\"",
":",
"\"Structure\"",
",",
"\"label\"",
":",
"\"Error-Structure\"",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"try",
":",
"for",
"key",
"in",
"[",
"\"uri\"",
",",
"\"database\"",
",",
"\"reference\"",
"]",
":",
"if",
"key",
"in",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"citation\"",
"]",
":",
"break",
"else",
":",
"msg",
"=",
"'nanopub[\"nanopub\"][\"citation\"] must have either a uri, database or reference key.'",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Error\"",
",",
"\"section\"",
":",
"\"Structure\"",
",",
"\"label\"",
":",
"\"Error-Structure\"",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'nanopub[\"nanopub\"] must have a \"citation\" key with either a uri, database or reference key.'",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Error\"",
",",
"\"section\"",
":",
"\"Structure\"",
",",
"\"label\"",
":",
"\"Error-Structure\"",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"# Assertion checks",
"if",
"\"assertions\"",
"in",
"nanopub",
"[",
"\"nanopub\"",
"]",
":",
"for",
"idx",
",",
"assertion",
"in",
"enumerate",
"(",
"nanopub",
"[",
"\"nanopub\"",
"]",
"[",
"\"assertions\"",
"]",
")",
":",
"bo",
"=",
"bel",
".",
"lang",
".",
"belobj",
".",
"BEL",
"(",
"bel_version",
",",
"config",
"[",
"\"bel_api\"",
"]",
"[",
"\"servers\"",
"]",
"[",
"\"api_url\"",
"]",
")",
"belstr",
"=",
"f'{assertion.get(\"subject\")} {assertion.get(\"relation\", \"\")} {assertion.get(\"object\", \"\")}'",
"belstr",
"=",
"belstr",
".",
"replace",
"(",
"\"None\"",
",",
"\"\"",
")",
"try",
":",
"messages",
"=",
"(",
"bo",
".",
"parse",
"(",
"belstr",
")",
".",
"semantic_validation",
"(",
"error_level",
"=",
"error_level",
")",
".",
"validation_messages",
")",
"for",
"message",
"in",
"messages",
":",
"(",
"level",
",",
"msg",
")",
"=",
"message",
"if",
"error_level",
"==",
"\"ERROR\"",
":",
"if",
"level",
"==",
"\"ERROR\"",
":",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"f\"{level.title()}\"",
",",
"\"section\"",
":",
"\"Assertion\"",
",",
"\"label\"",
":",
"f\"{level.title()}-Assertion\"",
",",
"\"index\"",
":",
"idx",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"convert_msg_to_html",
"(",
"msg",
")",
",",
"}",
")",
"else",
":",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"f\"{level.title()}\"",
",",
"\"section\"",
":",
"\"Assertion\"",
",",
"\"label\"",
":",
"f\"{level.title()}-Assertion\"",
",",
"\"index\"",
":",
"idx",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"convert_msg_to_html",
"(",
"msg",
")",
",",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"f\"Could not parse: {belstr}\"",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Error\"",
",",
"\"section\"",
":",
"\"Assertion\"",
",",
"\"label\"",
":",
"\"Error-Assertion\"",
",",
"\"index\"",
":",
"idx",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"log",
".",
"exception",
"(",
"f\"Could not parse: {belstr}\"",
")",
"# Annotation checks",
"if",
"error_level",
"==",
"\"WARNING\"",
":",
"for",
"idx",
",",
"annotation",
"in",
"enumerate",
"(",
"nanopub",
"[",
"\"nanopub\"",
"]",
".",
"get",
"(",
"\"annotations\"",
",",
"[",
"]",
")",
")",
":",
"term_type",
"=",
"annotation",
"[",
"\"type\"",
"]",
"term_id",
"=",
"annotation",
"[",
"\"id\"",
"]",
"# term_label = annotation['label']",
"log",
".",
"info",
"(",
"f\"Annotation: {term_type} ID: {term_id}\"",
")",
"search_body",
"=",
"{",
"\"_source\"",
":",
"[",
"\"src_id\"",
",",
"\"id\"",
",",
"\"name\"",
",",
"\"label\"",
",",
"\"annotation_types\"",
"]",
",",
"\"query\"",
":",
"{",
"\"term\"",
":",
"{",
"\"id\"",
":",
"term_id",
"}",
"}",
",",
"}",
"results",
"=",
"es",
".",
"search",
"(",
"index",
"=",
"\"terms\"",
",",
"doc_type",
"=",
"\"term\"",
",",
"body",
"=",
"search_body",
")",
"if",
"len",
"(",
"results",
"[",
"\"hits\"",
"]",
"[",
"\"hits\"",
"]",
")",
">",
"0",
":",
"result",
"=",
"results",
"[",
"\"hits\"",
"]",
"[",
"\"hits\"",
"]",
"[",
"0",
"]",
"[",
"\"_source\"",
"]",
"if",
"term_type",
"not",
"in",
"result",
"[",
"\"annotation_types\"",
"]",
":",
"msg",
"=",
"f'Annotation type: {term_type} for {term_id} does not match annotation types in database: {result[\"annotation_types\"]}'",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Warning\"",
",",
"\"section\"",
":",
"\"Annotation\"",
",",
"\"index\"",
":",
"idx",
",",
"\"label\"",
":",
"\"Warning-Annotation\"",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"else",
":",
"msg",
"=",
"f\"Annotation term: {term_id} not found in database\"",
"v",
".",
"append",
"(",
"{",
"\"level\"",
":",
"\"Warning\"",
",",
"\"section\"",
":",
"\"Annotation\"",
",",
"\"index\"",
":",
"idx",
",",
"\"label\"",
":",
"\"Warning-Annotation\"",
",",
"\"msg\"",
":",
"msg",
",",
"\"msg_html\"",
":",
"msg",
",",
"}",
")",
"return",
"v"
] | Validate Nanopub
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
The validation result is a list of objects containing
{
'level': 'Warning|Error',
'section': 'Assertion|Annotation|Structure',
'label': '{Error|Warning}-{Assertion|Annotation|Structure}', # to be used for faceting in Elasticsearch
'index': idx, # Index of Assertion or Annotation in Nanopub - starts at 0
'msg': msg, # Error or Warning message
}
Args:
nanopub: nanopub record starting with nanopub...
level: return WARNING or just ERROR? defaults to warnings and errors
Returns:
list(tuples): [{'level': 'Warning', 'section': 'Assertion', 'label': 'Warning-Assertion', 'index': 0, 'msg': <msg>}] | [
"Validate",
"Nanopub"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/validate.py#L22-L219 |
belbio/bel | bel/lang/bel_specification.py | get_specification | def get_specification(version: str) -> Mapping[str, Any]:
"""Get BEL Specification
The json file this depends on is generated by belspec_yaml2json as
part of the update_specifications function
Args:
version: e.g. 2.0.0 where the filename
"""
spec_dir = config["bel"]["lang"]["specifications"]
spec_dict = {}
bel_versions = get_bel_versions()
if version not in bel_versions:
log.error("Cannot get unknown version BEL specification")
return {"error": "unknown version of BEL"}
# use this variable to find our parser file since periods aren't recommended in python module names
version_underscored = version.replace(".", "_")
json_fn = f"{spec_dir}/bel_v{version_underscored}.json"
with open(json_fn, "r") as f:
spec_dict = json.load(f)
return spec_dict | python | def get_specification(version: str) -> Mapping[str, Any]:
"""Get BEL Specification
The json file this depends on is generated by belspec_yaml2json as
part of the update_specifications function
Args:
version: e.g. 2.0.0 where the filename
"""
spec_dir = config["bel"]["lang"]["specifications"]
spec_dict = {}
bel_versions = get_bel_versions()
if version not in bel_versions:
log.error("Cannot get unknown version BEL specification")
return {"error": "unknown version of BEL"}
# use this variable to find our parser file since periods aren't recommended in python module names
version_underscored = version.replace(".", "_")
json_fn = f"{spec_dir}/bel_v{version_underscored}.json"
with open(json_fn, "r") as f:
spec_dict = json.load(f)
return spec_dict | [
"def",
"get_specification",
"(",
"version",
":",
"str",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"spec_dir",
"=",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"lang\"",
"]",
"[",
"\"specifications\"",
"]",
"spec_dict",
"=",
"{",
"}",
"bel_versions",
"=",
"get_bel_versions",
"(",
")",
"if",
"version",
"not",
"in",
"bel_versions",
":",
"log",
".",
"error",
"(",
"\"Cannot get unknown version BEL specification\"",
")",
"return",
"{",
"\"error\"",
":",
"\"unknown version of BEL\"",
"}",
"# use this variable to find our parser file since periods aren't recommended in python module names",
"version_underscored",
"=",
"version",
".",
"replace",
"(",
"\".\"",
",",
"\"_\"",
")",
"json_fn",
"=",
"f\"{spec_dir}/bel_v{version_underscored}.json\"",
"with",
"open",
"(",
"json_fn",
",",
"\"r\"",
")",
"as",
"f",
":",
"spec_dict",
"=",
"json",
".",
"load",
"(",
"f",
")",
"return",
"spec_dict"
] | Get BEL Specification
The json file this depends on is generated by belspec_yaml2json as
part of the update_specifications function
Args:
version: e.g. 2.0.0 where the filename | [
"Get",
"BEL",
"Specification"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L96-L122 |
belbio/bel | bel/lang/bel_specification.py | get_bel_versions | def get_bel_versions() -> List[str]:
"""Get BEL Language versions supported
Get the list of all BEL Language versions supported. The file this depends
on is generated by belspec_yaml2json and is kept up to date using
`make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json`
directly as it's added as a command by pip install.
Returns:
List[str]: list of versions
"""
spec_dir = config["bel"]["lang"]["specifications"]
fn = f"{spec_dir}/versions.json"
with open(fn, "r") as f:
versions = json.load(f)
return versions | python | def get_bel_versions() -> List[str]:
"""Get BEL Language versions supported
Get the list of all BEL Language versions supported. The file this depends
on is generated by belspec_yaml2json and is kept up to date using
`make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json`
directly as it's added as a command by pip install.
Returns:
List[str]: list of versions
"""
spec_dir = config["bel"]["lang"]["specifications"]
fn = f"{spec_dir}/versions.json"
with open(fn, "r") as f:
versions = json.load(f)
return versions | [
"def",
"get_bel_versions",
"(",
")",
"->",
"List",
"[",
"str",
"]",
":",
"spec_dir",
"=",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"lang\"",
"]",
"[",
"\"specifications\"",
"]",
"fn",
"=",
"f\"{spec_dir}/versions.json\"",
"with",
"open",
"(",
"fn",
",",
"\"r\"",
")",
"as",
"f",
":",
"versions",
"=",
"json",
".",
"load",
"(",
"f",
")",
"return",
"versions"
] | Get BEL Language versions supported
Get the list of all BEL Language versions supported. The file this depends
on is generated by belspec_yaml2json and is kept up to date using
`make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json`
directly as it's added as a command by pip install.
Returns:
List[str]: list of versions | [
"Get",
"BEL",
"Language",
"versions",
"supported"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L125-L143 |
belbio/bel | bel/lang/bel_specification.py | update_specifications | def update_specifications(force: bool = False):
"""Update BEL specifications
Collect BEL specifications from Github BELBio BEL Specification folder
and store in local directory specified in belbio_conf.yaml
Process all BEL Specifications in YAML into an enhanced JSON version
and capture all BEL versions in a separate file for quick access.
"""
spec_dir = config["bel"]["lang"]["specifications"]
if not os.path.isdir(spec_dir):
os.mkdir(spec_dir)
log.info(f"Updating BEL Specifications - stored in {spec_dir}")
# Collect new specifications from Git repository
if config["bel"]["lang"]["specification_github_repo"]:
github_belspec_files(spec_dir, force=force)
# Ensure that files use 'yaml' extension
files = glob.glob(f"{spec_dir}/*.yml")
for fn in files:
new_fn = fn.replace("yml", "yaml")
os.rename(fn, new_fn)
# Convert YAML to enhanced JSON
files = glob.glob(f"{spec_dir}/*.yaml")
versions = {}
for fn in files:
filename = os.path.basename(fn)
check_version = filename.replace("bel_v", "").replace(".yaml", "").replace("_", ".")
json_fn = fn.replace(".yaml", ".json")
version = belspec_yaml2json(fn, json_fn)
if version != check_version:
log.error(
f"Version mis-match for {fn} - fn version: {check_version} version: {version}"
)
versions[version] = filename
with open(f"{spec_dir}/versions.json", "w") as f:
json.dump(list(set(versions)), f, indent=4)
# Convert YAML file to EBNF and then parser module
create_ebnf_parser(files) | python | def update_specifications(force: bool = False):
"""Update BEL specifications
Collect BEL specifications from Github BELBio BEL Specification folder
and store in local directory specified in belbio_conf.yaml
Process all BEL Specifications in YAML into an enhanced JSON version
and capture all BEL versions in a separate file for quick access.
"""
spec_dir = config["bel"]["lang"]["specifications"]
if not os.path.isdir(spec_dir):
os.mkdir(spec_dir)
log.info(f"Updating BEL Specifications - stored in {spec_dir}")
# Collect new specifications from Git repository
if config["bel"]["lang"]["specification_github_repo"]:
github_belspec_files(spec_dir, force=force)
# Ensure that files use 'yaml' extension
files = glob.glob(f"{spec_dir}/*.yml")
for fn in files:
new_fn = fn.replace("yml", "yaml")
os.rename(fn, new_fn)
# Convert YAML to enhanced JSON
files = glob.glob(f"{spec_dir}/*.yaml")
versions = {}
for fn in files:
filename = os.path.basename(fn)
check_version = filename.replace("bel_v", "").replace(".yaml", "").replace("_", ".")
json_fn = fn.replace(".yaml", ".json")
version = belspec_yaml2json(fn, json_fn)
if version != check_version:
log.error(
f"Version mis-match for {fn} - fn version: {check_version} version: {version}"
)
versions[version] = filename
with open(f"{spec_dir}/versions.json", "w") as f:
json.dump(list(set(versions)), f, indent=4)
# Convert YAML file to EBNF and then parser module
create_ebnf_parser(files) | [
"def",
"update_specifications",
"(",
"force",
":",
"bool",
"=",
"False",
")",
":",
"spec_dir",
"=",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"lang\"",
"]",
"[",
"\"specifications\"",
"]",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"spec_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"spec_dir",
")",
"log",
".",
"info",
"(",
"f\"Updating BEL Specifications - stored in {spec_dir}\"",
")",
"# Collect new specifications from Git repository",
"if",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"lang\"",
"]",
"[",
"\"specification_github_repo\"",
"]",
":",
"github_belspec_files",
"(",
"spec_dir",
",",
"force",
"=",
"force",
")",
"# Ensure that files use 'yaml' extension",
"files",
"=",
"glob",
".",
"glob",
"(",
"f\"{spec_dir}/*.yml\"",
")",
"for",
"fn",
"in",
"files",
":",
"new_fn",
"=",
"fn",
".",
"replace",
"(",
"\"yml\"",
",",
"\"yaml\"",
")",
"os",
".",
"rename",
"(",
"fn",
",",
"new_fn",
")",
"# Convert YAML to enhanced JSON",
"files",
"=",
"glob",
".",
"glob",
"(",
"f\"{spec_dir}/*.yaml\"",
")",
"versions",
"=",
"{",
"}",
"for",
"fn",
"in",
"files",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
")",
"check_version",
"=",
"filename",
".",
"replace",
"(",
"\"bel_v\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\".yaml\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\".\"",
")",
"json_fn",
"=",
"fn",
".",
"replace",
"(",
"\".yaml\"",
",",
"\".json\"",
")",
"version",
"=",
"belspec_yaml2json",
"(",
"fn",
",",
"json_fn",
")",
"if",
"version",
"!=",
"check_version",
":",
"log",
".",
"error",
"(",
"f\"Version mis-match for {fn} - fn version: {check_version} version: {version}\"",
")",
"versions",
"[",
"version",
"]",
"=",
"filename",
"with",
"open",
"(",
"f\"{spec_dir}/versions.json\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"list",
"(",
"set",
"(",
"versions",
")",
")",
",",
"f",
",",
"indent",
"=",
"4",
")",
"# Convert YAML file to EBNF and then parser module",
"create_ebnf_parser",
"(",
"files",
")"
] | Update BEL specifications
Collect BEL specifications from Github BELBio BEL Specification folder
and store in local directory specified in belbio_conf.yaml
Process all BEL Specifications in YAML into an enhanced JSON version
and capture all BEL versions in a separate file for quick access. | [
"Update",
"BEL",
"specifications"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L146-L194 |
belbio/bel | bel/lang/bel_specification.py | github_belspec_files | def github_belspec_files(spec_dir, force: bool = False):
"""Get belspec files from Github repo
Args:
spec_dir: directory to store the BEL Specification and derived files
force: force update of BEL Specifications from Github - skipped if local files less than 1 day old
"""
if not force:
dtnow = datetime.datetime.utcnow()
delta = datetime.timedelta(1)
yesterday = dtnow - delta
for fn in glob.glob(f"{spec_dir}/bel*yaml"):
if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday:
log.info("Skipping BEL Specification update - specs less than 1 day old")
return
repo_url = "https://api.github.com/repos/belbio/bel_specifications/contents/specifications"
params = {}
github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "")
if github_access_token:
params = {"access_token": github_access_token}
r = requests.get(repo_url, params=params)
if r.status_code == 200:
results = r.json()
for f in results:
url = f["download_url"]
fn = os.path.basename(url)
if "yaml" not in fn and "yml" in fn:
fn = fn.replace("yml", "yaml")
r = requests.get(url, params=params, allow_redirects=True)
if r.status_code == 200:
open(f"{spec_dir}/{fn}", "wb").write(r.content)
else:
sys.exit(
f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}"
)
else:
sys.exit(
f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}"
) | python | def github_belspec_files(spec_dir, force: bool = False):
"""Get belspec files from Github repo
Args:
spec_dir: directory to store the BEL Specification and derived files
force: force update of BEL Specifications from Github - skipped if local files less than 1 day old
"""
if not force:
dtnow = datetime.datetime.utcnow()
delta = datetime.timedelta(1)
yesterday = dtnow - delta
for fn in glob.glob(f"{spec_dir}/bel*yaml"):
if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday:
log.info("Skipping BEL Specification update - specs less than 1 day old")
return
repo_url = "https://api.github.com/repos/belbio/bel_specifications/contents/specifications"
params = {}
github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "")
if github_access_token:
params = {"access_token": github_access_token}
r = requests.get(repo_url, params=params)
if r.status_code == 200:
results = r.json()
for f in results:
url = f["download_url"]
fn = os.path.basename(url)
if "yaml" not in fn and "yml" in fn:
fn = fn.replace("yml", "yaml")
r = requests.get(url, params=params, allow_redirects=True)
if r.status_code == 200:
open(f"{spec_dir}/{fn}", "wb").write(r.content)
else:
sys.exit(
f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}"
)
else:
sys.exit(
f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}"
) | [
"def",
"github_belspec_files",
"(",
"spec_dir",
",",
"force",
":",
"bool",
"=",
"False",
")",
":",
"if",
"not",
"force",
":",
"dtnow",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"delta",
"=",
"datetime",
".",
"timedelta",
"(",
"1",
")",
"yesterday",
"=",
"dtnow",
"-",
"delta",
"for",
"fn",
"in",
"glob",
".",
"glob",
"(",
"f\"{spec_dir}/bel*yaml\"",
")",
":",
"if",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"fn",
")",
")",
">",
"yesterday",
":",
"log",
".",
"info",
"(",
"\"Skipping BEL Specification update - specs less than 1 day old\"",
")",
"return",
"repo_url",
"=",
"\"https://api.github.com/repos/belbio/bel_specifications/contents/specifications\"",
"params",
"=",
"{",
"}",
"github_access_token",
"=",
"os",
".",
"getenv",
"(",
"\"GITHUB_ACCESS_TOKEN\"",
",",
"\"\"",
")",
"if",
"github_access_token",
":",
"params",
"=",
"{",
"\"access_token\"",
":",
"github_access_token",
"}",
"r",
"=",
"requests",
".",
"get",
"(",
"repo_url",
",",
"params",
"=",
"params",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"results",
"=",
"r",
".",
"json",
"(",
")",
"for",
"f",
"in",
"results",
":",
"url",
"=",
"f",
"[",
"\"download_url\"",
"]",
"fn",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"url",
")",
"if",
"\"yaml\"",
"not",
"in",
"fn",
"and",
"\"yml\"",
"in",
"fn",
":",
"fn",
"=",
"fn",
".",
"replace",
"(",
"\"yml\"",
",",
"\"yaml\"",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
",",
"allow_redirects",
"=",
"True",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"open",
"(",
"f\"{spec_dir}/{fn}\"",
",",
"\"wb\"",
")",
".",
"write",
"(",
"r",
".",
"content",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"f\"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}\"",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"f\"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}\"",
")"
] | Get belspec files from Github repo
Args:
spec_dir: directory to store the BEL Specification and derived files
force: force update of BEL Specifications from Github - skipped if local files less than 1 day old | [
"Get",
"belspec",
"files",
"from",
"Github",
"repo"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L197-L242 |
belbio/bel | bel/lang/bel_specification.py | belspec_yaml2json | def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str:
"""Enhance BEL specification and save as JSON file
Load all BEL Specification YAML files and convert to JSON files
after enhancing them. Also create a bel_versions.json file with
all available BEL versions for fast loading.
Args:
yaml_fn: original YAML version of BEL Spec
json_fn: enhanced JSON version of BEL Spec
Returns:
str: version of BEL Spec
"""
try:
spec_dict = yaml.load(open(yaml_fn, "r").read(), Loader=yaml.SafeLoader)
# admin-related keys
spec_dict["admin"] = {}
spec_dict["admin"]["version_underscored"] = spec_dict["version"].replace(".", "_")
spec_dict["admin"]["parser_fn"] = yaml_fn.replace(".yaml", "_parser.py")
# add relation keys list, to_short, to_long
add_relations(spec_dict)
# add function keys list, to_short, to_long
add_functions(spec_dict)
# add namespace keys list, list_short, list_long, to_short, to_long
add_namespaces(spec_dict)
enhance_function_signatures(spec_dict)
add_function_signature_help(spec_dict)
with open(json_fn, "w") as f:
json.dump(spec_dict, f)
except Exception as e:
log.error(
"Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.".format(
yaml_fn
)
)
sys.exit()
return spec_dict["version"] | python | def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str:
"""Enhance BEL specification and save as JSON file
Load all BEL Specification YAML files and convert to JSON files
after enhancing them. Also create a bel_versions.json file with
all available BEL versions for fast loading.
Args:
yaml_fn: original YAML version of BEL Spec
json_fn: enhanced JSON version of BEL Spec
Returns:
str: version of BEL Spec
"""
try:
spec_dict = yaml.load(open(yaml_fn, "r").read(), Loader=yaml.SafeLoader)
# admin-related keys
spec_dict["admin"] = {}
spec_dict["admin"]["version_underscored"] = spec_dict["version"].replace(".", "_")
spec_dict["admin"]["parser_fn"] = yaml_fn.replace(".yaml", "_parser.py")
# add relation keys list, to_short, to_long
add_relations(spec_dict)
# add function keys list, to_short, to_long
add_functions(spec_dict)
# add namespace keys list, list_short, list_long, to_short, to_long
add_namespaces(spec_dict)
enhance_function_signatures(spec_dict)
add_function_signature_help(spec_dict)
with open(json_fn, "w") as f:
json.dump(spec_dict, f)
except Exception as e:
log.error(
"Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.".format(
yaml_fn
)
)
sys.exit()
return spec_dict["version"] | [
"def",
"belspec_yaml2json",
"(",
"yaml_fn",
":",
"str",
",",
"json_fn",
":",
"str",
")",
"->",
"str",
":",
"try",
":",
"spec_dict",
"=",
"yaml",
".",
"load",
"(",
"open",
"(",
"yaml_fn",
",",
"\"r\"",
")",
".",
"read",
"(",
")",
",",
"Loader",
"=",
"yaml",
".",
"SafeLoader",
")",
"# admin-related keys",
"spec_dict",
"[",
"\"admin\"",
"]",
"=",
"{",
"}",
"spec_dict",
"[",
"\"admin\"",
"]",
"[",
"\"version_underscored\"",
"]",
"=",
"spec_dict",
"[",
"\"version\"",
"]",
".",
"replace",
"(",
"\".\"",
",",
"\"_\"",
")",
"spec_dict",
"[",
"\"admin\"",
"]",
"[",
"\"parser_fn\"",
"]",
"=",
"yaml_fn",
".",
"replace",
"(",
"\".yaml\"",
",",
"\"_parser.py\"",
")",
"# add relation keys list, to_short, to_long",
"add_relations",
"(",
"spec_dict",
")",
"# add function keys list, to_short, to_long",
"add_functions",
"(",
"spec_dict",
")",
"# add namespace keys list, list_short, list_long, to_short, to_long",
"add_namespaces",
"(",
"spec_dict",
")",
"enhance_function_signatures",
"(",
"spec_dict",
")",
"add_function_signature_help",
"(",
"spec_dict",
")",
"with",
"open",
"(",
"json_fn",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"spec_dict",
",",
"f",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.\"",
".",
"format",
"(",
"yaml_fn",
")",
")",
"sys",
".",
"exit",
"(",
")",
"return",
"spec_dict",
"[",
"\"version\"",
"]"
] | Enhance BEL specification and save as JSON file
Load all BEL Specification YAML files and convert to JSON files
after enhancing them. Also create a bel_versions.json file with
all available BEL versions for fast loading.
Args:
yaml_fn: original YAML version of BEL Spec
json_fn: enhanced JSON version of BEL Spec
Returns:
str: version of BEL Spec | [
"Enhance",
"BEL",
"specification",
"and",
"save",
"as",
"JSON",
"file"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L245-L289 |
belbio/bel | bel/lang/bel_specification.py | add_function_signature_help | def add_function_signature_help(spec_dict: dict) -> dict:
"""Add function signature help
Simplify the function signatures for presentation to BEL Editor users
"""
for f in spec_dict["functions"]["signatures"]:
for argset_idx, argset in enumerate(spec_dict["functions"]["signatures"][f]["signatures"]):
args_summary = ""
args_list = []
arg_idx = 0
for arg_idx, arg in enumerate(
spec_dict["functions"]["signatures"][f]["signatures"][argset_idx]["arguments"]
):
if arg["type"] in ["Function", "Modifier"]:
vals = [
spec_dict["functions"]["to_short"].get(
val, spec_dict["functions"]["to_short"].get(val)
)
for val in arg["values"]
]
args_summary += "|".join(vals) + "()"
arg_idx += 1
if arg.get("optional", False) and arg.get("multiple", False) is False:
args_summary += "?"
text = f'Zero or one of each function(s): {", ".join([val for val in arg["values"]])}'
elif arg.get("optional", False):
args_summary += "*"
text = f'Zero or more of each function(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'One of following function(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] in ["NSArg", "StrArg", "StrArgNSArg"]:
args_summary += f'{arg["type"]}'
if arg.get("optional", False) and arg.get("multiple", False) is False:
args_summary += "?"
if arg["type"] in ["NSArg"]:
text = f'Zero or one namespace argument of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] == "StrArgNSArg":
text = f'Zero or one amespace argument or default namespace argument (without prefix) of following type(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'Zero or one string argument of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg.get("optional", False):
args_summary += "*"
if arg["type"] in ["NSArg"]:
text = f'Zero or more namespace arguments of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] == "StrArgNSArg":
text = f'Zero or more namespace arguments or default namespace arguments (without prefix) of following type(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'Zero or more of string arguments of following type(s): {", ".join([val for val in arg["values"]])}'
else:
if arg["type"] in ["NSArg"]:
text = f'Namespace argument of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] == "StrArgNSArg":
text = f'Namespace argument or default namespace argument (without prefix) of following type(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'String argument of following type(s): {", ".join([val for val in arg["values"]])}'
args_summary += ", "
args_list.append(text)
args_summary = re.sub(", $", "", args_summary)
spec_dict["functions"]["signatures"][f]["signatures"][argset_idx][
"argument_summary"
] = f"{f}({args_summary})"
spec_dict["functions"]["signatures"][f]["signatures"][argset_idx][
"argument_help_listing"
] = args_list
# print(f'{f}({args_summary})')
# print(args_list)
return spec_dict | python | def add_function_signature_help(spec_dict: dict) -> dict:
"""Add function signature help
Simplify the function signatures for presentation to BEL Editor users
"""
for f in spec_dict["functions"]["signatures"]:
for argset_idx, argset in enumerate(spec_dict["functions"]["signatures"][f]["signatures"]):
args_summary = ""
args_list = []
arg_idx = 0
for arg_idx, arg in enumerate(
spec_dict["functions"]["signatures"][f]["signatures"][argset_idx]["arguments"]
):
if arg["type"] in ["Function", "Modifier"]:
vals = [
spec_dict["functions"]["to_short"].get(
val, spec_dict["functions"]["to_short"].get(val)
)
for val in arg["values"]
]
args_summary += "|".join(vals) + "()"
arg_idx += 1
if arg.get("optional", False) and arg.get("multiple", False) is False:
args_summary += "?"
text = f'Zero or one of each function(s): {", ".join([val for val in arg["values"]])}'
elif arg.get("optional", False):
args_summary += "*"
text = f'Zero or more of each function(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'One of following function(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] in ["NSArg", "StrArg", "StrArgNSArg"]:
args_summary += f'{arg["type"]}'
if arg.get("optional", False) and arg.get("multiple", False) is False:
args_summary += "?"
if arg["type"] in ["NSArg"]:
text = f'Zero or one namespace argument of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] == "StrArgNSArg":
text = f'Zero or one amespace argument or default namespace argument (without prefix) of following type(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'Zero or one string argument of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg.get("optional", False):
args_summary += "*"
if arg["type"] in ["NSArg"]:
text = f'Zero or more namespace arguments of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] == "StrArgNSArg":
text = f'Zero or more namespace arguments or default namespace arguments (without prefix) of following type(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'Zero or more of string arguments of following type(s): {", ".join([val for val in arg["values"]])}'
else:
if arg["type"] in ["NSArg"]:
text = f'Namespace argument of following type(s): {", ".join([val for val in arg["values"]])}'
elif arg["type"] == "StrArgNSArg":
text = f'Namespace argument or default namespace argument (without prefix) of following type(s): {", ".join([val for val in arg["values"]])}'
else:
text = f'String argument of following type(s): {", ".join([val for val in arg["values"]])}'
args_summary += ", "
args_list.append(text)
args_summary = re.sub(", $", "", args_summary)
spec_dict["functions"]["signatures"][f]["signatures"][argset_idx][
"argument_summary"
] = f"{f}({args_summary})"
spec_dict["functions"]["signatures"][f]["signatures"][argset_idx][
"argument_help_listing"
] = args_list
# print(f'{f}({args_summary})')
# print(args_list)
return spec_dict | [
"def",
"add_function_signature_help",
"(",
"spec_dict",
":",
"dict",
")",
"->",
"dict",
":",
"for",
"f",
"in",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
":",
"for",
"argset_idx",
",",
"argset",
"in",
"enumerate",
"(",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"f",
"]",
"[",
"\"signatures\"",
"]",
")",
":",
"args_summary",
"=",
"\"\"",
"args_list",
"=",
"[",
"]",
"arg_idx",
"=",
"0",
"for",
"arg_idx",
",",
"arg",
"in",
"enumerate",
"(",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"f",
"]",
"[",
"\"signatures\"",
"]",
"[",
"argset_idx",
"]",
"[",
"\"arguments\"",
"]",
")",
":",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"vals",
"=",
"[",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_short\"",
"]",
".",
"get",
"(",
"val",
",",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_short\"",
"]",
".",
"get",
"(",
"val",
")",
")",
"for",
"val",
"in",
"arg",
"[",
"\"values\"",
"]",
"]",
"args_summary",
"+=",
"\"|\"",
".",
"join",
"(",
"vals",
")",
"+",
"\"()\"",
"arg_idx",
"+=",
"1",
"if",
"arg",
".",
"get",
"(",
"\"optional\"",
",",
"False",
")",
"and",
"arg",
".",
"get",
"(",
"\"multiple\"",
",",
"False",
")",
"is",
"False",
":",
"args_summary",
"+=",
"\"?\"",
"text",
"=",
"f'Zero or one of each function(s): {\", \".join([val for val in arg[\"values\"]])}'",
"elif",
"arg",
".",
"get",
"(",
"\"optional\"",
",",
"False",
")",
":",
"args_summary",
"+=",
"\"*\"",
"text",
"=",
"f'Zero or more of each function(s): {\", \".join([val for val in arg[\"values\"]])}'",
"else",
":",
"text",
"=",
"f'One of following function(s): {\", \".join([val for val in arg[\"values\"]])}'",
"elif",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"NSArg\"",
",",
"\"StrArg\"",
",",
"\"StrArgNSArg\"",
"]",
":",
"args_summary",
"+=",
"f'{arg[\"type\"]}'",
"if",
"arg",
".",
"get",
"(",
"\"optional\"",
",",
"False",
")",
"and",
"arg",
".",
"get",
"(",
"\"multiple\"",
",",
"False",
")",
"is",
"False",
":",
"args_summary",
"+=",
"\"?\"",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"NSArg\"",
"]",
":",
"text",
"=",
"f'Zero or one namespace argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"StrArgNSArg\"",
":",
"text",
"=",
"f'Zero or one amespace argument or default namespace argument (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"else",
":",
"text",
"=",
"f'Zero or one string argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"elif",
"arg",
".",
"get",
"(",
"\"optional\"",
",",
"False",
")",
":",
"args_summary",
"+=",
"\"*\"",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"NSArg\"",
"]",
":",
"text",
"=",
"f'Zero or more namespace arguments of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"StrArgNSArg\"",
":",
"text",
"=",
"f'Zero or more namespace arguments or default namespace arguments (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"else",
":",
"text",
"=",
"f'Zero or more of string arguments of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"else",
":",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"NSArg\"",
"]",
":",
"text",
"=",
"f'Namespace argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"StrArgNSArg\"",
":",
"text",
"=",
"f'Namespace argument or default namespace argument (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"else",
":",
"text",
"=",
"f'String argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'",
"args_summary",
"+=",
"\", \"",
"args_list",
".",
"append",
"(",
"text",
")",
"args_summary",
"=",
"re",
".",
"sub",
"(",
"\", $\"",
",",
"\"\"",
",",
"args_summary",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"f",
"]",
"[",
"\"signatures\"",
"]",
"[",
"argset_idx",
"]",
"[",
"\"argument_summary\"",
"]",
"=",
"f\"{f}({args_summary})\"",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"f",
"]",
"[",
"\"signatures\"",
"]",
"[",
"argset_idx",
"]",
"[",
"\"argument_help_listing\"",
"]",
"=",
"args_list",
"# print(f'{f}({args_summary})')",
"# print(args_list)",
"return",
"spec_dict"
] | Add function signature help
Simplify the function signatures for presentation to BEL Editor users | [
"Add",
"function",
"signature",
"help"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L292-L364 |
belbio/bel | bel/lang/bel_specification.py | add_relations | def add_relations(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Add relation keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added relation keys
"""
# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances
spec_dict["relations"]["list"] = []
spec_dict["relations"]["list_short"] = []
spec_dict["relations"]["list_long"] = []
spec_dict["relations"]["to_short"] = {}
spec_dict["relations"]["to_long"] = {}
for relation_name in spec_dict["relations"]["info"]:
abbreviated_name = spec_dict["relations"]["info"][relation_name]["abbreviation"]
spec_dict["relations"]["list"].extend((relation_name, abbreviated_name))
spec_dict["relations"]["list_long"].append(relation_name)
spec_dict["relations"]["list_short"].append(abbreviated_name)
spec_dict["relations"]["to_short"][relation_name] = abbreviated_name
spec_dict["relations"]["to_short"][abbreviated_name] = abbreviated_name
spec_dict["relations"]["to_long"][abbreviated_name] = relation_name
spec_dict["relations"]["to_long"][relation_name] = relation_name
return spec_dict | python | def add_relations(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Add relation keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added relation keys
"""
# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances
spec_dict["relations"]["list"] = []
spec_dict["relations"]["list_short"] = []
spec_dict["relations"]["list_long"] = []
spec_dict["relations"]["to_short"] = {}
spec_dict["relations"]["to_long"] = {}
for relation_name in spec_dict["relations"]["info"]:
abbreviated_name = spec_dict["relations"]["info"][relation_name]["abbreviation"]
spec_dict["relations"]["list"].extend((relation_name, abbreviated_name))
spec_dict["relations"]["list_long"].append(relation_name)
spec_dict["relations"]["list_short"].append(abbreviated_name)
spec_dict["relations"]["to_short"][relation_name] = abbreviated_name
spec_dict["relations"]["to_short"][abbreviated_name] = abbreviated_name
spec_dict["relations"]["to_long"][abbreviated_name] = relation_name
spec_dict["relations"]["to_long"][relation_name] = relation_name
return spec_dict | [
"def",
"add_relations",
"(",
"spec_dict",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"list\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"list_short\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"list_long\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"to_short\"",
"]",
"=",
"{",
"}",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"to_long\"",
"]",
"=",
"{",
"}",
"for",
"relation_name",
"in",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"info\"",
"]",
":",
"abbreviated_name",
"=",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"info\"",
"]",
"[",
"relation_name",
"]",
"[",
"\"abbreviation\"",
"]",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"list\"",
"]",
".",
"extend",
"(",
"(",
"relation_name",
",",
"abbreviated_name",
")",
")",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"list_long\"",
"]",
".",
"append",
"(",
"relation_name",
")",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"list_short\"",
"]",
".",
"append",
"(",
"abbreviated_name",
")",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"to_short\"",
"]",
"[",
"relation_name",
"]",
"=",
"abbreviated_name",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"to_short\"",
"]",
"[",
"abbreviated_name",
"]",
"=",
"abbreviated_name",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"to_long\"",
"]",
"[",
"abbreviated_name",
"]",
"=",
"relation_name",
"spec_dict",
"[",
"\"relations\"",
"]",
"[",
"\"to_long\"",
"]",
"[",
"relation_name",
"]",
"=",
"relation_name",
"return",
"spec_dict"
] | Add relation keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added relation keys | [
"Add",
"relation",
"keys",
"to",
"spec_dict"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L367-L397 |
belbio/bel | bel/lang/bel_specification.py | add_functions | def add_functions(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Add function keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added function keys
"""
# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances
spec_dict["functions"]["list"] = []
spec_dict["functions"]["list_long"] = []
spec_dict["functions"]["list_short"] = []
spec_dict["functions"]["primary"] = {}
spec_dict["functions"]["primary"]["list_long"] = []
spec_dict["functions"]["primary"]["list_short"] = []
spec_dict["functions"]["modifier"] = {}
spec_dict["functions"]["modifier"]["list_long"] = []
spec_dict["functions"]["modifier"]["list_short"] = []
spec_dict["functions"]["to_short"] = {}
spec_dict["functions"]["to_long"] = {}
for func_name in spec_dict["functions"]["info"]:
abbreviated_name = spec_dict["functions"]["info"][func_name]["abbreviation"]
spec_dict["functions"]["list"].extend((func_name, abbreviated_name))
spec_dict["functions"]["list_long"].append(func_name)
spec_dict["functions"]["list_short"].append(abbreviated_name)
if spec_dict["functions"]["info"][func_name]["type"] == "primary":
spec_dict["functions"]["primary"]["list_long"].append(func_name)
spec_dict["functions"]["primary"]["list_short"].append(abbreviated_name)
else:
spec_dict["functions"]["modifier"]["list_long"].append(func_name)
spec_dict["functions"]["modifier"]["list_short"].append(abbreviated_name)
spec_dict["functions"]["to_short"][abbreviated_name] = abbreviated_name
spec_dict["functions"]["to_short"][func_name] = abbreviated_name
spec_dict["functions"]["to_long"][abbreviated_name] = func_name
spec_dict["functions"]["to_long"][func_name] = func_name
return spec_dict | python | def add_functions(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Add function keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added function keys
"""
# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances
spec_dict["functions"]["list"] = []
spec_dict["functions"]["list_long"] = []
spec_dict["functions"]["list_short"] = []
spec_dict["functions"]["primary"] = {}
spec_dict["functions"]["primary"]["list_long"] = []
spec_dict["functions"]["primary"]["list_short"] = []
spec_dict["functions"]["modifier"] = {}
spec_dict["functions"]["modifier"]["list_long"] = []
spec_dict["functions"]["modifier"]["list_short"] = []
spec_dict["functions"]["to_short"] = {}
spec_dict["functions"]["to_long"] = {}
for func_name in spec_dict["functions"]["info"]:
abbreviated_name = spec_dict["functions"]["info"][func_name]["abbreviation"]
spec_dict["functions"]["list"].extend((func_name, abbreviated_name))
spec_dict["functions"]["list_long"].append(func_name)
spec_dict["functions"]["list_short"].append(abbreviated_name)
if spec_dict["functions"]["info"][func_name]["type"] == "primary":
spec_dict["functions"]["primary"]["list_long"].append(func_name)
spec_dict["functions"]["primary"]["list_short"].append(abbreviated_name)
else:
spec_dict["functions"]["modifier"]["list_long"].append(func_name)
spec_dict["functions"]["modifier"]["list_short"].append(abbreviated_name)
spec_dict["functions"]["to_short"][abbreviated_name] = abbreviated_name
spec_dict["functions"]["to_short"][func_name] = abbreviated_name
spec_dict["functions"]["to_long"][abbreviated_name] = func_name
spec_dict["functions"]["to_long"][func_name] = func_name
return spec_dict | [
"def",
"add_functions",
"(",
"spec_dict",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"list\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"list_long\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"list_short\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"primary\"",
"]",
"=",
"{",
"}",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"primary\"",
"]",
"[",
"\"list_long\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"primary\"",
"]",
"[",
"\"list_short\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"modifier\"",
"]",
"=",
"{",
"}",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"modifier\"",
"]",
"[",
"\"list_long\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"modifier\"",
"]",
"[",
"\"list_short\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_short\"",
"]",
"=",
"{",
"}",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_long\"",
"]",
"=",
"{",
"}",
"for",
"func_name",
"in",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
":",
"abbreviated_name",
"=",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"[",
"func_name",
"]",
"[",
"\"abbreviation\"",
"]",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"list\"",
"]",
".",
"extend",
"(",
"(",
"func_name",
",",
"abbreviated_name",
")",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"list_long\"",
"]",
".",
"append",
"(",
"func_name",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"list_short\"",
"]",
".",
"append",
"(",
"abbreviated_name",
")",
"if",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"[",
"func_name",
"]",
"[",
"\"type\"",
"]",
"==",
"\"primary\"",
":",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"primary\"",
"]",
"[",
"\"list_long\"",
"]",
".",
"append",
"(",
"func_name",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"primary\"",
"]",
"[",
"\"list_short\"",
"]",
".",
"append",
"(",
"abbreviated_name",
")",
"else",
":",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"modifier\"",
"]",
"[",
"\"list_long\"",
"]",
".",
"append",
"(",
"func_name",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"modifier\"",
"]",
"[",
"\"list_short\"",
"]",
".",
"append",
"(",
"abbreviated_name",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_short\"",
"]",
"[",
"abbreviated_name",
"]",
"=",
"abbreviated_name",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_short\"",
"]",
"[",
"func_name",
"]",
"=",
"abbreviated_name",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_long\"",
"]",
"[",
"abbreviated_name",
"]",
"=",
"func_name",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"to_long\"",
"]",
"[",
"func_name",
"]",
"=",
"func_name",
"return",
"spec_dict"
] | Add function keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added function keys | [
"Add",
"function",
"keys",
"to",
"spec_dict"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L400-L448 |
belbio/bel | bel/lang/bel_specification.py | add_namespaces | def add_namespaces(spec_dict):
"""Add namespace convenience keys, list, list_{short|long}, to_{short|long}"""
for ns in spec_dict["namespaces"]:
spec_dict["namespaces"][ns]["list"] = []
spec_dict["namespaces"][ns]["list_long"] = []
spec_dict["namespaces"][ns]["list_short"] = []
spec_dict["namespaces"][ns]["to_short"] = {}
spec_dict["namespaces"][ns]["to_long"] = {}
for obj in spec_dict["namespaces"][ns]["info"]:
spec_dict["namespaces"][ns]["list"].extend([obj["name"], obj["abbreviation"]])
spec_dict["namespaces"][ns]["list_short"].append(obj["abbreviation"])
spec_dict["namespaces"][ns]["list_long"].append(obj["name"])
spec_dict["namespaces"][ns]["to_short"][obj["abbreviation"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_short"][obj["name"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_long"][obj["abbreviation"]] = obj["name"]
spec_dict["namespaces"][ns]["to_long"][obj["name"]] = obj["name"]
# For AminoAcid namespace
if "abbrev1" in obj:
spec_dict["namespaces"][ns]["to_short"][obj["abbrev1"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_long"][obj["abbrev1"]] = obj["name"] | python | def add_namespaces(spec_dict):
"""Add namespace convenience keys, list, list_{short|long}, to_{short|long}"""
for ns in spec_dict["namespaces"]:
spec_dict["namespaces"][ns]["list"] = []
spec_dict["namespaces"][ns]["list_long"] = []
spec_dict["namespaces"][ns]["list_short"] = []
spec_dict["namespaces"][ns]["to_short"] = {}
spec_dict["namespaces"][ns]["to_long"] = {}
for obj in spec_dict["namespaces"][ns]["info"]:
spec_dict["namespaces"][ns]["list"].extend([obj["name"], obj["abbreviation"]])
spec_dict["namespaces"][ns]["list_short"].append(obj["abbreviation"])
spec_dict["namespaces"][ns]["list_long"].append(obj["name"])
spec_dict["namespaces"][ns]["to_short"][obj["abbreviation"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_short"][obj["name"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_long"][obj["abbreviation"]] = obj["name"]
spec_dict["namespaces"][ns]["to_long"][obj["name"]] = obj["name"]
# For AminoAcid namespace
if "abbrev1" in obj:
spec_dict["namespaces"][ns]["to_short"][obj["abbrev1"]] = obj["abbreviation"]
spec_dict["namespaces"][ns]["to_long"][obj["abbrev1"]] = obj["name"] | [
"def",
"add_namespaces",
"(",
"spec_dict",
")",
":",
"for",
"ns",
"in",
"spec_dict",
"[",
"\"namespaces\"",
"]",
":",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"list\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"list_long\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"list_short\"",
"]",
"=",
"[",
"]",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_short\"",
"]",
"=",
"{",
"}",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_long\"",
"]",
"=",
"{",
"}",
"for",
"obj",
"in",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"info\"",
"]",
":",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"list\"",
"]",
".",
"extend",
"(",
"[",
"obj",
"[",
"\"name\"",
"]",
",",
"obj",
"[",
"\"abbreviation\"",
"]",
"]",
")",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"list_short\"",
"]",
".",
"append",
"(",
"obj",
"[",
"\"abbreviation\"",
"]",
")",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"list_long\"",
"]",
".",
"append",
"(",
"obj",
"[",
"\"name\"",
"]",
")",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_short\"",
"]",
"[",
"obj",
"[",
"\"abbreviation\"",
"]",
"]",
"=",
"obj",
"[",
"\"abbreviation\"",
"]",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_short\"",
"]",
"[",
"obj",
"[",
"\"name\"",
"]",
"]",
"=",
"obj",
"[",
"\"abbreviation\"",
"]",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_long\"",
"]",
"[",
"obj",
"[",
"\"abbreviation\"",
"]",
"]",
"=",
"obj",
"[",
"\"name\"",
"]",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_long\"",
"]",
"[",
"obj",
"[",
"\"name\"",
"]",
"]",
"=",
"obj",
"[",
"\"name\"",
"]",
"# For AminoAcid namespace",
"if",
"\"abbrev1\"",
"in",
"obj",
":",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_short\"",
"]",
"[",
"obj",
"[",
"\"abbrev1\"",
"]",
"]",
"=",
"obj",
"[",
"\"abbreviation\"",
"]",
"spec_dict",
"[",
"\"namespaces\"",
"]",
"[",
"ns",
"]",
"[",
"\"to_long\"",
"]",
"[",
"obj",
"[",
"\"abbrev1\"",
"]",
"]",
"=",
"obj",
"[",
"\"name\"",
"]"
] | Add namespace convenience keys, list, list_{short|long}, to_{short|long} | [
"Add",
"namespace",
"convenience",
"keys",
"list",
"list_",
"{",
"short|long",
"}",
"to_",
"{",
"short|long",
"}"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L451-L476 |
belbio/bel | bel/lang/bel_specification.py | enhance_function_signatures | def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Enhance function signatures
Add required and optional objects to signatures objects for semantic validation
support.
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: return enhanced bel specification dict
"""
for func in spec_dict["functions"]["signatures"]:
for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]):
args = sig["arguments"]
req_args = []
pos_args = []
opt_args = []
mult_args = []
for arg in args:
# Multiple argument types
if arg.get("multiple", False):
if arg["type"] in ["Function", "Modifier"]:
mult_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
# Complex signature has this
mult_args.append(arg["type"])
# Optional, position dependent - will be added after req_args based on order in bel_specification
elif arg.get("optional", False) and arg.get("position", False):
if arg["type"] in ["Function", "Modifier"]:
pos_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
pos_args.append(arg["type"])
# Optional, position independent
elif arg.get("optional", False):
if arg["type"] in ["Function", "Modifier"]:
opt_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
opt_args.append(arg["type"])
# Required arguments, position dependent
else:
if arg["type"] in ["Function", "Modifier"]:
req_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
req_args.append(arg["type"])
spec_dict["functions"]["signatures"][func]["signatures"][i]["req_args"] = copy.deepcopy(
req_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["pos_args"] = copy.deepcopy(
pos_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["opt_args"] = copy.deepcopy(
opt_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i][
"mult_args"
] = copy.deepcopy(mult_args)
return spec_dict | python | def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Enhance function signatures
Add required and optional objects to signatures objects for semantic validation
support.
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: return enhanced bel specification dict
"""
for func in spec_dict["functions"]["signatures"]:
for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]):
args = sig["arguments"]
req_args = []
pos_args = []
opt_args = []
mult_args = []
for arg in args:
# Multiple argument types
if arg.get("multiple", False):
if arg["type"] in ["Function", "Modifier"]:
mult_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
# Complex signature has this
mult_args.append(arg["type"])
# Optional, position dependent - will be added after req_args based on order in bel_specification
elif arg.get("optional", False) and arg.get("position", False):
if arg["type"] in ["Function", "Modifier"]:
pos_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
pos_args.append(arg["type"])
# Optional, position independent
elif arg.get("optional", False):
if arg["type"] in ["Function", "Modifier"]:
opt_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
opt_args.append(arg["type"])
# Required arguments, position dependent
else:
if arg["type"] in ["Function", "Modifier"]:
req_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
req_args.append(arg["type"])
spec_dict["functions"]["signatures"][func]["signatures"][i]["req_args"] = copy.deepcopy(
req_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["pos_args"] = copy.deepcopy(
pos_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["opt_args"] = copy.deepcopy(
opt_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i][
"mult_args"
] = copy.deepcopy(mult_args)
return spec_dict | [
"def",
"enhance_function_signatures",
"(",
"spec_dict",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"for",
"func",
"in",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
":",
"for",
"i",
",",
"sig",
"in",
"enumerate",
"(",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"func",
"]",
"[",
"\"signatures\"",
"]",
")",
":",
"args",
"=",
"sig",
"[",
"\"arguments\"",
"]",
"req_args",
"=",
"[",
"]",
"pos_args",
"=",
"[",
"]",
"opt_args",
"=",
"[",
"]",
"mult_args",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"# Multiple argument types",
"if",
"arg",
".",
"get",
"(",
"\"multiple\"",
",",
"False",
")",
":",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"mult_args",
".",
"extend",
"(",
"arg",
".",
"get",
"(",
"\"values\"",
",",
"[",
"]",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"StrArgNSArg\"",
",",
"\"NSArg\"",
",",
"\"StrArg\"",
"]",
":",
"# Complex signature has this",
"mult_args",
".",
"append",
"(",
"arg",
"[",
"\"type\"",
"]",
")",
"# Optional, position dependent - will be added after req_args based on order in bel_specification",
"elif",
"arg",
".",
"get",
"(",
"\"optional\"",
",",
"False",
")",
"and",
"arg",
".",
"get",
"(",
"\"position\"",
",",
"False",
")",
":",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"pos_args",
".",
"append",
"(",
"arg",
".",
"get",
"(",
"\"values\"",
",",
"[",
"]",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"StrArgNSArg\"",
",",
"\"NSArg\"",
",",
"\"StrArg\"",
"]",
":",
"pos_args",
".",
"append",
"(",
"arg",
"[",
"\"type\"",
"]",
")",
"# Optional, position independent",
"elif",
"arg",
".",
"get",
"(",
"\"optional\"",
",",
"False",
")",
":",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"opt_args",
".",
"extend",
"(",
"arg",
".",
"get",
"(",
"\"values\"",
",",
"[",
"]",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"StrArgNSArg\"",
",",
"\"NSArg\"",
",",
"\"StrArg\"",
"]",
":",
"opt_args",
".",
"append",
"(",
"arg",
"[",
"\"type\"",
"]",
")",
"# Required arguments, position dependent",
"else",
":",
"if",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"req_args",
".",
"append",
"(",
"arg",
".",
"get",
"(",
"\"values\"",
",",
"[",
"]",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"StrArgNSArg\"",
",",
"\"NSArg\"",
",",
"\"StrArg\"",
"]",
":",
"req_args",
".",
"append",
"(",
"arg",
"[",
"\"type\"",
"]",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"func",
"]",
"[",
"\"signatures\"",
"]",
"[",
"i",
"]",
"[",
"\"req_args\"",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"req_args",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"func",
"]",
"[",
"\"signatures\"",
"]",
"[",
"i",
"]",
"[",
"\"pos_args\"",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"pos_args",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"func",
"]",
"[",
"\"signatures\"",
"]",
"[",
"i",
"]",
"[",
"\"opt_args\"",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"opt_args",
")",
"spec_dict",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"func",
"]",
"[",
"\"signatures\"",
"]",
"[",
"i",
"]",
"[",
"\"mult_args\"",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"mult_args",
")",
"return",
"spec_dict"
] | Enhance function signatures
Add required and optional objects to signatures objects for semantic validation
support.
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: return enhanced bel specification dict | [
"Enhance",
"function",
"signatures"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L479-L542 |
belbio/bel | bel/lang/bel_specification.py | get_ebnf_template | def get_ebnf_template():
"""Get EBNF template from Github belbio/bel_specifications repo"""
spec_dir = config["bel"]["lang"]["specifications"]
local_fp = f"{spec_dir}/bel.ebnf.j2"
repo_url = (
"https://api.github.com/repos/belbio/bel_specifications/contents/resources/bel.ebnf.j2"
)
params = {}
github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "")
if github_access_token:
params = {"access_token": github_access_token}
try:
# Get download url for template file
r = requests.get(repo_url, params=params)
if r.status_code == 200:
template_url = r.json()["download_url"]
else:
log.warning("Could not get EBNF file download url from Github")
# Get template file
try:
r = requests.get(template_url, params=params, allow_redirects=True)
if r.status_code == 200:
open(local_fp, "wt").write(r.text)
else:
log.warning(
f"Could not download EBNF file from Github -- Status: {r.status_code} Msg: {r.text}"
)
except Exception as e:
log.warning(
f"Could not download EBNF file from Github -- Status: {r.status_code} Msg: {e}"
)
except Exception as e:
log.warning("Could not download BEL EBNF template file")
if not os.path.exists(f"{spec_dir}/local_fp"):
log.error("No BEL EBNF template file available")
return local_fp | python | def get_ebnf_template():
"""Get EBNF template from Github belbio/bel_specifications repo"""
spec_dir = config["bel"]["lang"]["specifications"]
local_fp = f"{spec_dir}/bel.ebnf.j2"
repo_url = (
"https://api.github.com/repos/belbio/bel_specifications/contents/resources/bel.ebnf.j2"
)
params = {}
github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "")
if github_access_token:
params = {"access_token": github_access_token}
try:
# Get download url for template file
r = requests.get(repo_url, params=params)
if r.status_code == 200:
template_url = r.json()["download_url"]
else:
log.warning("Could not get EBNF file download url from Github")
# Get template file
try:
r = requests.get(template_url, params=params, allow_redirects=True)
if r.status_code == 200:
open(local_fp, "wt").write(r.text)
else:
log.warning(
f"Could not download EBNF file from Github -- Status: {r.status_code} Msg: {r.text}"
)
except Exception as e:
log.warning(
f"Could not download EBNF file from Github -- Status: {r.status_code} Msg: {e}"
)
except Exception as e:
log.warning("Could not download BEL EBNF template file")
if not os.path.exists(f"{spec_dir}/local_fp"):
log.error("No BEL EBNF template file available")
return local_fp | [
"def",
"get_ebnf_template",
"(",
")",
":",
"spec_dir",
"=",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"lang\"",
"]",
"[",
"\"specifications\"",
"]",
"local_fp",
"=",
"f\"{spec_dir}/bel.ebnf.j2\"",
"repo_url",
"=",
"(",
"\"https://api.github.com/repos/belbio/bel_specifications/contents/resources/bel.ebnf.j2\"",
")",
"params",
"=",
"{",
"}",
"github_access_token",
"=",
"os",
".",
"getenv",
"(",
"\"GITHUB_ACCESS_TOKEN\"",
",",
"\"\"",
")",
"if",
"github_access_token",
":",
"params",
"=",
"{",
"\"access_token\"",
":",
"github_access_token",
"}",
"try",
":",
"# Get download url for template file",
"r",
"=",
"requests",
".",
"get",
"(",
"repo_url",
",",
"params",
"=",
"params",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"template_url",
"=",
"r",
".",
"json",
"(",
")",
"[",
"\"download_url\"",
"]",
"else",
":",
"log",
".",
"warning",
"(",
"\"Could not get EBNF file download url from Github\"",
")",
"# Get template file",
"try",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"template_url",
",",
"params",
"=",
"params",
",",
"allow_redirects",
"=",
"True",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"open",
"(",
"local_fp",
",",
"\"wt\"",
")",
".",
"write",
"(",
"r",
".",
"text",
")",
"else",
":",
"log",
".",
"warning",
"(",
"f\"Could not download EBNF file from Github -- Status: {r.status_code} Msg: {r.text}\"",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"f\"Could not download EBNF file from Github -- Status: {r.status_code} Msg: {e}\"",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"\"Could not download BEL EBNF template file\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"f\"{spec_dir}/local_fp\"",
")",
":",
"log",
".",
"error",
"(",
"\"No BEL EBNF template file available\"",
")",
"return",
"local_fp"
] | Get EBNF template from Github belbio/bel_specifications repo | [
"Get",
"EBNF",
"template",
"from",
"Github",
"belbio",
"/",
"bel_specifications",
"repo"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L545-L589 |
belbio/bel | bel/lang/bel_specification.py | create_ebnf_parser | def create_ebnf_parser(files):
"""Create EBNF files and EBNF-based parsers"""
flag = False
for belspec_fn in files:
# Get EBNF Jinja template from Github if enabled
if config["bel"]["lang"]["specification_github_repo"]:
tmpl_fn = get_ebnf_template()
# Check if EBNF file is more recent than belspec_fn
ebnf_fn = belspec_fn.replace(".yaml", ".ebnf")
if not os.path.exists(ebnf_fn) or os.path.getmtime(belspec_fn) > os.path.getmtime(ebnf_fn):
with open(belspec_fn, "r") as f:
belspec = yaml.load(f, Loader=yaml.SafeLoader)
tmpl_dir = os.path.dirname(tmpl_fn)
tmpl_basename = os.path.basename(tmpl_fn)
bel_major_version = belspec["version"].split(".")[0]
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(tmpl_dir)
) # create environment for template
template = env.get_template(tmpl_basename) # get the template
# replace template placeholders with appropriate variables
relations_list = [
(relation, belspec["relations"]["info"][relation]["abbreviation"])
for relation in belspec["relations"]["info"]
]
relations_list = sorted(list(itertools.chain(*relations_list)), key=len, reverse=True)
functions_list = [
(function, belspec["functions"]["info"][function]["abbreviation"])
for function in belspec["functions"]["info"]
if belspec["functions"]["info"][function]["type"] == "primary"
]
functions_list = sorted(list(itertools.chain(*functions_list)), key=len, reverse=True)
modifiers_list = [
(function, belspec["functions"]["info"][function]["abbreviation"])
for function in belspec["functions"]["info"]
if belspec["functions"]["info"][function]["type"] == "modifier"
]
modifiers_list = sorted(list(itertools.chain(*modifiers_list)), key=len, reverse=True)
created_time = datetime.datetime.now().strftime("%B %d, %Y - %I:%M:%S%p")
ebnf = template.render(
functions=functions_list,
m_functions=modifiers_list,
relations=relations_list,
bel_version=belspec["version"],
bel_major_version=bel_major_version,
created_time=created_time,
)
with open(ebnf_fn, "w") as f:
f.write(ebnf)
parser_fn = ebnf_fn.replace(".ebnf", "_parser.py")
parser = tatsu.to_python_sourcecode(ebnf, filename=parser_fn)
flag = True
with open(parser_fn, "wt") as f:
f.write(parser)
if flag:
# In case we created new parser modules
importlib.invalidate_caches() | python | def create_ebnf_parser(files):
"""Create EBNF files and EBNF-based parsers"""
flag = False
for belspec_fn in files:
# Get EBNF Jinja template from Github if enabled
if config["bel"]["lang"]["specification_github_repo"]:
tmpl_fn = get_ebnf_template()
# Check if EBNF file is more recent than belspec_fn
ebnf_fn = belspec_fn.replace(".yaml", ".ebnf")
if not os.path.exists(ebnf_fn) or os.path.getmtime(belspec_fn) > os.path.getmtime(ebnf_fn):
with open(belspec_fn, "r") as f:
belspec = yaml.load(f, Loader=yaml.SafeLoader)
tmpl_dir = os.path.dirname(tmpl_fn)
tmpl_basename = os.path.basename(tmpl_fn)
bel_major_version = belspec["version"].split(".")[0]
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(tmpl_dir)
) # create environment for template
template = env.get_template(tmpl_basename) # get the template
# replace template placeholders with appropriate variables
relations_list = [
(relation, belspec["relations"]["info"][relation]["abbreviation"])
for relation in belspec["relations"]["info"]
]
relations_list = sorted(list(itertools.chain(*relations_list)), key=len, reverse=True)
functions_list = [
(function, belspec["functions"]["info"][function]["abbreviation"])
for function in belspec["functions"]["info"]
if belspec["functions"]["info"][function]["type"] == "primary"
]
functions_list = sorted(list(itertools.chain(*functions_list)), key=len, reverse=True)
modifiers_list = [
(function, belspec["functions"]["info"][function]["abbreviation"])
for function in belspec["functions"]["info"]
if belspec["functions"]["info"][function]["type"] == "modifier"
]
modifiers_list = sorted(list(itertools.chain(*modifiers_list)), key=len, reverse=True)
created_time = datetime.datetime.now().strftime("%B %d, %Y - %I:%M:%S%p")
ebnf = template.render(
functions=functions_list,
m_functions=modifiers_list,
relations=relations_list,
bel_version=belspec["version"],
bel_major_version=bel_major_version,
created_time=created_time,
)
with open(ebnf_fn, "w") as f:
f.write(ebnf)
parser_fn = ebnf_fn.replace(".ebnf", "_parser.py")
parser = tatsu.to_python_sourcecode(ebnf, filename=parser_fn)
flag = True
with open(parser_fn, "wt") as f:
f.write(parser)
if flag:
# In case we created new parser modules
importlib.invalidate_caches() | [
"def",
"create_ebnf_parser",
"(",
"files",
")",
":",
"flag",
"=",
"False",
"for",
"belspec_fn",
"in",
"files",
":",
"# Get EBNF Jinja template from Github if enabled",
"if",
"config",
"[",
"\"bel\"",
"]",
"[",
"\"lang\"",
"]",
"[",
"\"specification_github_repo\"",
"]",
":",
"tmpl_fn",
"=",
"get_ebnf_template",
"(",
")",
"# Check if EBNF file is more recent than belspec_fn",
"ebnf_fn",
"=",
"belspec_fn",
".",
"replace",
"(",
"\".yaml\"",
",",
"\".ebnf\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"ebnf_fn",
")",
"or",
"os",
".",
"path",
".",
"getmtime",
"(",
"belspec_fn",
")",
">",
"os",
".",
"path",
".",
"getmtime",
"(",
"ebnf_fn",
")",
":",
"with",
"open",
"(",
"belspec_fn",
",",
"\"r\"",
")",
"as",
"f",
":",
"belspec",
"=",
"yaml",
".",
"load",
"(",
"f",
",",
"Loader",
"=",
"yaml",
".",
"SafeLoader",
")",
"tmpl_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"tmpl_fn",
")",
"tmpl_basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"tmpl_fn",
")",
"bel_major_version",
"=",
"belspec",
"[",
"\"version\"",
"]",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"env",
"=",
"jinja2",
".",
"Environment",
"(",
"loader",
"=",
"jinja2",
".",
"FileSystemLoader",
"(",
"tmpl_dir",
")",
")",
"# create environment for template",
"template",
"=",
"env",
".",
"get_template",
"(",
"tmpl_basename",
")",
"# get the template",
"# replace template placeholders with appropriate variables",
"relations_list",
"=",
"[",
"(",
"relation",
",",
"belspec",
"[",
"\"relations\"",
"]",
"[",
"\"info\"",
"]",
"[",
"relation",
"]",
"[",
"\"abbreviation\"",
"]",
")",
"for",
"relation",
"in",
"belspec",
"[",
"\"relations\"",
"]",
"[",
"\"info\"",
"]",
"]",
"relations_list",
"=",
"sorted",
"(",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"relations_list",
")",
")",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"functions_list",
"=",
"[",
"(",
"function",
",",
"belspec",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"[",
"function",
"]",
"[",
"\"abbreviation\"",
"]",
")",
"for",
"function",
"in",
"belspec",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"if",
"belspec",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"[",
"function",
"]",
"[",
"\"type\"",
"]",
"==",
"\"primary\"",
"]",
"functions_list",
"=",
"sorted",
"(",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"functions_list",
")",
")",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"modifiers_list",
"=",
"[",
"(",
"function",
",",
"belspec",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"[",
"function",
"]",
"[",
"\"abbreviation\"",
"]",
")",
"for",
"function",
"in",
"belspec",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"if",
"belspec",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"[",
"function",
"]",
"[",
"\"type\"",
"]",
"==",
"\"modifier\"",
"]",
"modifiers_list",
"=",
"sorted",
"(",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"modifiers_list",
")",
")",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"created_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%B %d, %Y - %I:%M:%S%p\"",
")",
"ebnf",
"=",
"template",
".",
"render",
"(",
"functions",
"=",
"functions_list",
",",
"m_functions",
"=",
"modifiers_list",
",",
"relations",
"=",
"relations_list",
",",
"bel_version",
"=",
"belspec",
"[",
"\"version\"",
"]",
",",
"bel_major_version",
"=",
"bel_major_version",
",",
"created_time",
"=",
"created_time",
",",
")",
"with",
"open",
"(",
"ebnf_fn",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"ebnf",
")",
"parser_fn",
"=",
"ebnf_fn",
".",
"replace",
"(",
"\".ebnf\"",
",",
"\"_parser.py\"",
")",
"parser",
"=",
"tatsu",
".",
"to_python_sourcecode",
"(",
"ebnf",
",",
"filename",
"=",
"parser_fn",
")",
"flag",
"=",
"True",
"with",
"open",
"(",
"parser_fn",
",",
"\"wt\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"parser",
")",
"if",
"flag",
":",
"# In case we created new parser modules",
"importlib",
".",
"invalidate_caches",
"(",
")"
] | Create EBNF files and EBNF-based parsers | [
"Create",
"EBNF",
"files",
"and",
"EBNF",
"-",
"based",
"parsers"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L592-L661 |
belbio/bel | bel/lang/bel_specification.py | get_function_help | def get_function_help(function: str, bel_spec: BELSpec):
"""Get function_help given function name
This will get the function summary template (argument summary in signature)
and the argument help listing.
"""
function_long = bel_spec["functions"]["to_long"].get(function)
function_help = []
if function_long:
for signature in bel_spec["functions"]["signatures"][function_long]["signatures"]:
function_help.append(
{
"function_summary": signature["argument_summary"],
"argument_help": signature["argument_help_listing"],
"description": bel_spec["functions"]["info"][function_long]["description"],
}
)
return function_help | python | def get_function_help(function: str, bel_spec: BELSpec):
"""Get function_help given function name
This will get the function summary template (argument summary in signature)
and the argument help listing.
"""
function_long = bel_spec["functions"]["to_long"].get(function)
function_help = []
if function_long:
for signature in bel_spec["functions"]["signatures"][function_long]["signatures"]:
function_help.append(
{
"function_summary": signature["argument_summary"],
"argument_help": signature["argument_help_listing"],
"description": bel_spec["functions"]["info"][function_long]["description"],
}
)
return function_help | [
"def",
"get_function_help",
"(",
"function",
":",
"str",
",",
"bel_spec",
":",
"BELSpec",
")",
":",
"function_long",
"=",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"to_long\"",
"]",
".",
"get",
"(",
"function",
")",
"function_help",
"=",
"[",
"]",
"if",
"function_long",
":",
"for",
"signature",
"in",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"function_long",
"]",
"[",
"\"signatures\"",
"]",
":",
"function_help",
".",
"append",
"(",
"{",
"\"function_summary\"",
":",
"signature",
"[",
"\"argument_summary\"",
"]",
",",
"\"argument_help\"",
":",
"signature",
"[",
"\"argument_help_listing\"",
"]",
",",
"\"description\"",
":",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"info\"",
"]",
"[",
"function_long",
"]",
"[",
"\"description\"",
"]",
",",
"}",
")",
"return",
"function_help"
] | Get function_help given function name
This will get the function summary template (argument summary in signature)
and the argument help listing. | [
"Get",
"function_help",
"given",
"function",
"name"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_specification.py#L664-L684 |
belbio/bel | bel/lang/completion.py | in_span | def in_span(loc: int, span: Span) -> bool:
"""Checks if loc is inside span"""
if loc >= span[0] and loc <= span[1]:
return True
else:
return False | python | def in_span(loc: int, span: Span) -> bool:
"""Checks if loc is inside span"""
if loc >= span[0] and loc <= span[1]:
return True
else:
return False | [
"def",
"in_span",
"(",
"loc",
":",
"int",
",",
"span",
":",
"Span",
")",
"->",
"bool",
":",
"if",
"loc",
">=",
"span",
"[",
"0",
"]",
"and",
"loc",
"<=",
"span",
"[",
"1",
"]",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Checks if loc is inside span | [
"Checks",
"if",
"loc",
"is",
"inside",
"span"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L35-L41 |
belbio/bel | bel/lang/completion.py | cursor | def cursor(
belstr: str, ast: AST, cursor_loc: int, result: Mapping[str, Any] = None
) -> Mapping[str, Any]:
"""Find BEL function or argument at cursor location
Args:
belstr: BEL String used to create the completion_text
ast (Mapping[str, Any]): AST (dict) of BEL String
cursor_loc (int): given cursor location from input field
cursor_loc starts at 0, think of it like a block cursor covering each char
result: used to recursively return the result
Returns:
result dict
"""
log.debug(f"SubAST: {json.dumps(ast, indent=4)}")
# Recurse down through subject, object, nested to functions
log.debug(f"Cursor keys {ast.keys()}, BELStr: {belstr}")
if len(belstr) == 0:
return {"type": "Function", "replace_span": (0, 0), "completion_text": ""}
if "relation" in ast and in_span(cursor_loc, ast["relation"]["span"]):
log.debug("In relation")
completion_text = belstr[ast["relation"]["span"][0] : cursor_loc + 1]
return {
"type": "Relation",
"replace_span": ast["relation"]["span"],
"completion_text": completion_text,
}
# Handle subject, object and nested keys in tree
elif "span" not in ast and isinstance(ast, dict):
for key in ast:
if key in ["subject", "object", "nested"]:
log.debug(f"Recursing Keys {key}")
result = cursor(belstr, ast[key], cursor_loc, result=result)
if result:
return result
# Matches Functions, NSArgs and StrArgs/StrArgNSArg
if "span" in ast and in_span(cursor_loc, ast["span"]):
log.debug("Inside subject/object subAST")
if "function" in ast:
name_span = ast["function"]["name_span"]
if in_span(cursor_loc, name_span):
return {
"type": "Function",
"replace_span": name_span,
"completion_text": belstr[name_span[0] : cursor_loc + 1],
}
for idx, arg in enumerate(ast["args"]):
if (
cursor_loc == ast["function"]["parens_span"][0]
and ast["function"]["parens_span"][1] == -1
):
return {
"type": "StrArg", # default type if unknown
"arg_idx": idx,
"replace_span": arg["span"], # replace entire strarg
"parent_function": ast["function"]["name"],
"completion_text": "",
}
elif in_span(cursor_loc, arg["span"]):
log.debug(
f'In argument span {arg["span"]} Cursor_loc: {cursor_loc}'
)
if arg["type"] == "Function":
if in_span(cursor_loc, arg["function"]["name_span"]):
log.debug("Found replace_span in args: Function type")
return {
"type": "Function",
"replace_span": arg["function"][
"name_span"
], # replace function name only
"arg_idx": idx,
"args": copy.deepcopy(ast["args"]),
"parent_function": ast["function"]["name"],
"completion_text": belstr[
arg["function"]["name_span"][0] : cursor_loc + 1
],
}
else:
log.debug(f'Recursing Function {arg["span"]}')
result = cursor(belstr, arg, cursor_loc, result=result)
elif arg["type"] == "NSArg":
result = {
"type": "NSArg",
"replace_span": arg["span"], # replace entire nsarg
"arg_idx": idx,
"args": copy.deepcopy(ast["args"]),
"parent_function": ast["function"]["name"],
}
# Filter on namespace and query on ns_val chars up to cursor_loc
if in_span(cursor_loc, arg["nsarg"]["ns_val_span"]):
result["namespace"] = arg["nsarg"][
"ns"
] # provide namespace for nsarg filtering
result["completion_text"] = belstr[
arg["nsarg"]["ns_val_span"][0] : cursor_loc + 1
]
# Query on nsarg chars up to cursor_loc
else:
result["completion_text"] = belstr[
arg["nsarg"]["ns_span"][0] : cursor_loc + 1
]
log.debug(f"Found replace_span in args: NSArg {result}")
return result
elif (
arg["type"] == "StrArg"
): # in case this is a default namespace StrArg
if (
arg["span"][0] == arg["span"][1]
): # handle case like p() cursor=2
completion_text = arg["arg"]
else:
completion_text = belstr[arg["span"][0] : cursor_loc + 1]
return {
"type": "StrArg",
"arg_idx": idx,
"replace_span": arg["span"], # replace entire strarg
"parent_function": ast["function"]["name"],
"completion_text": completion_text.lstrip(),
}
return result | python | def cursor(
belstr: str, ast: AST, cursor_loc: int, result: Mapping[str, Any] = None
) -> Mapping[str, Any]:
"""Find BEL function or argument at cursor location
Args:
belstr: BEL String used to create the completion_text
ast (Mapping[str, Any]): AST (dict) of BEL String
cursor_loc (int): given cursor location from input field
cursor_loc starts at 0, think of it like a block cursor covering each char
result: used to recursively return the result
Returns:
result dict
"""
log.debug(f"SubAST: {json.dumps(ast, indent=4)}")
# Recurse down through subject, object, nested to functions
log.debug(f"Cursor keys {ast.keys()}, BELStr: {belstr}")
if len(belstr) == 0:
return {"type": "Function", "replace_span": (0, 0), "completion_text": ""}
if "relation" in ast and in_span(cursor_loc, ast["relation"]["span"]):
log.debug("In relation")
completion_text = belstr[ast["relation"]["span"][0] : cursor_loc + 1]
return {
"type": "Relation",
"replace_span": ast["relation"]["span"],
"completion_text": completion_text,
}
# Handle subject, object and nested keys in tree
elif "span" not in ast and isinstance(ast, dict):
for key in ast:
if key in ["subject", "object", "nested"]:
log.debug(f"Recursing Keys {key}")
result = cursor(belstr, ast[key], cursor_loc, result=result)
if result:
return result
# Matches Functions, NSArgs and StrArgs/StrArgNSArg
if "span" in ast and in_span(cursor_loc, ast["span"]):
log.debug("Inside subject/object subAST")
if "function" in ast:
name_span = ast["function"]["name_span"]
if in_span(cursor_loc, name_span):
return {
"type": "Function",
"replace_span": name_span,
"completion_text": belstr[name_span[0] : cursor_loc + 1],
}
for idx, arg in enumerate(ast["args"]):
if (
cursor_loc == ast["function"]["parens_span"][0]
and ast["function"]["parens_span"][1] == -1
):
return {
"type": "StrArg", # default type if unknown
"arg_idx": idx,
"replace_span": arg["span"], # replace entire strarg
"parent_function": ast["function"]["name"],
"completion_text": "",
}
elif in_span(cursor_loc, arg["span"]):
log.debug(
f'In argument span {arg["span"]} Cursor_loc: {cursor_loc}'
)
if arg["type"] == "Function":
if in_span(cursor_loc, arg["function"]["name_span"]):
log.debug("Found replace_span in args: Function type")
return {
"type": "Function",
"replace_span": arg["function"][
"name_span"
], # replace function name only
"arg_idx": idx,
"args": copy.deepcopy(ast["args"]),
"parent_function": ast["function"]["name"],
"completion_text": belstr[
arg["function"]["name_span"][0] : cursor_loc + 1
],
}
else:
log.debug(f'Recursing Function {arg["span"]}')
result = cursor(belstr, arg, cursor_loc, result=result)
elif arg["type"] == "NSArg":
result = {
"type": "NSArg",
"replace_span": arg["span"], # replace entire nsarg
"arg_idx": idx,
"args": copy.deepcopy(ast["args"]),
"parent_function": ast["function"]["name"],
}
# Filter on namespace and query on ns_val chars up to cursor_loc
if in_span(cursor_loc, arg["nsarg"]["ns_val_span"]):
result["namespace"] = arg["nsarg"][
"ns"
] # provide namespace for nsarg filtering
result["completion_text"] = belstr[
arg["nsarg"]["ns_val_span"][0] : cursor_loc + 1
]
# Query on nsarg chars up to cursor_loc
else:
result["completion_text"] = belstr[
arg["nsarg"]["ns_span"][0] : cursor_loc + 1
]
log.debug(f"Found replace_span in args: NSArg {result}")
return result
elif (
arg["type"] == "StrArg"
): # in case this is a default namespace StrArg
if (
arg["span"][0] == arg["span"][1]
): # handle case like p() cursor=2
completion_text = arg["arg"]
else:
completion_text = belstr[arg["span"][0] : cursor_loc + 1]
return {
"type": "StrArg",
"arg_idx": idx,
"replace_span": arg["span"], # replace entire strarg
"parent_function": ast["function"]["name"],
"completion_text": completion_text.lstrip(),
}
return result | [
"def",
"cursor",
"(",
"belstr",
":",
"str",
",",
"ast",
":",
"AST",
",",
"cursor_loc",
":",
"int",
",",
"result",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"log",
".",
"debug",
"(",
"f\"SubAST: {json.dumps(ast, indent=4)}\"",
")",
"# Recurse down through subject, object, nested to functions",
"log",
".",
"debug",
"(",
"f\"Cursor keys {ast.keys()}, BELStr: {belstr}\"",
")",
"if",
"len",
"(",
"belstr",
")",
"==",
"0",
":",
"return",
"{",
"\"type\"",
":",
"\"Function\"",
",",
"\"replace_span\"",
":",
"(",
"0",
",",
"0",
")",
",",
"\"completion_text\"",
":",
"\"\"",
"}",
"if",
"\"relation\"",
"in",
"ast",
"and",
"in_span",
"(",
"cursor_loc",
",",
"ast",
"[",
"\"relation\"",
"]",
"[",
"\"span\"",
"]",
")",
":",
"log",
".",
"debug",
"(",
"\"In relation\"",
")",
"completion_text",
"=",
"belstr",
"[",
"ast",
"[",
"\"relation\"",
"]",
"[",
"\"span\"",
"]",
"[",
"0",
"]",
":",
"cursor_loc",
"+",
"1",
"]",
"return",
"{",
"\"type\"",
":",
"\"Relation\"",
",",
"\"replace_span\"",
":",
"ast",
"[",
"\"relation\"",
"]",
"[",
"\"span\"",
"]",
",",
"\"completion_text\"",
":",
"completion_text",
",",
"}",
"# Handle subject, object and nested keys in tree",
"elif",
"\"span\"",
"not",
"in",
"ast",
"and",
"isinstance",
"(",
"ast",
",",
"dict",
")",
":",
"for",
"key",
"in",
"ast",
":",
"if",
"key",
"in",
"[",
"\"subject\"",
",",
"\"object\"",
",",
"\"nested\"",
"]",
":",
"log",
".",
"debug",
"(",
"f\"Recursing Keys {key}\"",
")",
"result",
"=",
"cursor",
"(",
"belstr",
",",
"ast",
"[",
"key",
"]",
",",
"cursor_loc",
",",
"result",
"=",
"result",
")",
"if",
"result",
":",
"return",
"result",
"# Matches Functions, NSArgs and StrArgs/StrArgNSArg",
"if",
"\"span\"",
"in",
"ast",
"and",
"in_span",
"(",
"cursor_loc",
",",
"ast",
"[",
"\"span\"",
"]",
")",
":",
"log",
".",
"debug",
"(",
"\"Inside subject/object subAST\"",
")",
"if",
"\"function\"",
"in",
"ast",
":",
"name_span",
"=",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"name_span\"",
"]",
"if",
"in_span",
"(",
"cursor_loc",
",",
"name_span",
")",
":",
"return",
"{",
"\"type\"",
":",
"\"Function\"",
",",
"\"replace_span\"",
":",
"name_span",
",",
"\"completion_text\"",
":",
"belstr",
"[",
"name_span",
"[",
"0",
"]",
":",
"cursor_loc",
"+",
"1",
"]",
",",
"}",
"for",
"idx",
",",
"arg",
"in",
"enumerate",
"(",
"ast",
"[",
"\"args\"",
"]",
")",
":",
"if",
"(",
"cursor_loc",
"==",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"parens_span\"",
"]",
"[",
"0",
"]",
"and",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"parens_span\"",
"]",
"[",
"1",
"]",
"==",
"-",
"1",
")",
":",
"return",
"{",
"\"type\"",
":",
"\"StrArg\"",
",",
"# default type if unknown",
"\"arg_idx\"",
":",
"idx",
",",
"\"replace_span\"",
":",
"arg",
"[",
"\"span\"",
"]",
",",
"# replace entire strarg",
"\"parent_function\"",
":",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"name\"",
"]",
",",
"\"completion_text\"",
":",
"\"\"",
",",
"}",
"elif",
"in_span",
"(",
"cursor_loc",
",",
"arg",
"[",
"\"span\"",
"]",
")",
":",
"log",
".",
"debug",
"(",
"f'In argument span {arg[\"span\"]} Cursor_loc: {cursor_loc}'",
")",
"if",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
":",
"if",
"in_span",
"(",
"cursor_loc",
",",
"arg",
"[",
"\"function\"",
"]",
"[",
"\"name_span\"",
"]",
")",
":",
"log",
".",
"debug",
"(",
"\"Found replace_span in args: Function type\"",
")",
"return",
"{",
"\"type\"",
":",
"\"Function\"",
",",
"\"replace_span\"",
":",
"arg",
"[",
"\"function\"",
"]",
"[",
"\"name_span\"",
"]",
",",
"# replace function name only",
"\"arg_idx\"",
":",
"idx",
",",
"\"args\"",
":",
"copy",
".",
"deepcopy",
"(",
"ast",
"[",
"\"args\"",
"]",
")",
",",
"\"parent_function\"",
":",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"name\"",
"]",
",",
"\"completion_text\"",
":",
"belstr",
"[",
"arg",
"[",
"\"function\"",
"]",
"[",
"\"name_span\"",
"]",
"[",
"0",
"]",
":",
"cursor_loc",
"+",
"1",
"]",
",",
"}",
"else",
":",
"log",
".",
"debug",
"(",
"f'Recursing Function {arg[\"span\"]}'",
")",
"result",
"=",
"cursor",
"(",
"belstr",
",",
"arg",
",",
"cursor_loc",
",",
"result",
"=",
"result",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"NSArg\"",
":",
"result",
"=",
"{",
"\"type\"",
":",
"\"NSArg\"",
",",
"\"replace_span\"",
":",
"arg",
"[",
"\"span\"",
"]",
",",
"# replace entire nsarg",
"\"arg_idx\"",
":",
"idx",
",",
"\"args\"",
":",
"copy",
".",
"deepcopy",
"(",
"ast",
"[",
"\"args\"",
"]",
")",
",",
"\"parent_function\"",
":",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"name\"",
"]",
",",
"}",
"# Filter on namespace and query on ns_val chars up to cursor_loc",
"if",
"in_span",
"(",
"cursor_loc",
",",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns_val_span\"",
"]",
")",
":",
"result",
"[",
"\"namespace\"",
"]",
"=",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns\"",
"]",
"# provide namespace for nsarg filtering",
"result",
"[",
"\"completion_text\"",
"]",
"=",
"belstr",
"[",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns_val_span\"",
"]",
"[",
"0",
"]",
":",
"cursor_loc",
"+",
"1",
"]",
"# Query on nsarg chars up to cursor_loc",
"else",
":",
"result",
"[",
"\"completion_text\"",
"]",
"=",
"belstr",
"[",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns_span\"",
"]",
"[",
"0",
"]",
":",
"cursor_loc",
"+",
"1",
"]",
"log",
".",
"debug",
"(",
"f\"Found replace_span in args: NSArg {result}\"",
")",
"return",
"result",
"elif",
"(",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"StrArg\"",
")",
":",
"# in case this is a default namespace StrArg",
"if",
"(",
"arg",
"[",
"\"span\"",
"]",
"[",
"0",
"]",
"==",
"arg",
"[",
"\"span\"",
"]",
"[",
"1",
"]",
")",
":",
"# handle case like p() cursor=2",
"completion_text",
"=",
"arg",
"[",
"\"arg\"",
"]",
"else",
":",
"completion_text",
"=",
"belstr",
"[",
"arg",
"[",
"\"span\"",
"]",
"[",
"0",
"]",
":",
"cursor_loc",
"+",
"1",
"]",
"return",
"{",
"\"type\"",
":",
"\"StrArg\"",
",",
"\"arg_idx\"",
":",
"idx",
",",
"\"replace_span\"",
":",
"arg",
"[",
"\"span\"",
"]",
",",
"# replace entire strarg",
"\"parent_function\"",
":",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"name\"",
"]",
",",
"\"completion_text\"",
":",
"completion_text",
".",
"lstrip",
"(",
")",
",",
"}",
"return",
"result"
] | Find BEL function or argument at cursor location
Args:
belstr: BEL String used to create the completion_text
ast (Mapping[str, Any]): AST (dict) of BEL String
cursor_loc (int): given cursor location from input field
cursor_loc starts at 0, think of it like a block cursor covering each char
result: used to recursively return the result
Returns:
result dict | [
"Find",
"BEL",
"function",
"or",
"argument",
"at",
"cursor",
"location"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L44-L175 |
belbio/bel | bel/lang/completion.py | nsarg_completions | def nsarg_completions(
completion_text: str,
entity_types: list,
bel_spec: BELSpec,
namespace: str,
species_id: str,
bel_fmt: str,
size: int,
):
"""Namespace completions
Args:
completion_text
entity_types: used to filter namespace search results
bel_spec: used to search default namespaces
namespace: used to filter namespace search results
species_id: used to filter namespace search results
bel_fmt: used to select full name or abbrev for default namespaces
size: how many completions to return
Results:
list of replacement text objects
"""
minimal_nsarg_completion_len = 1
species = [species_id]
namespaces = [namespace]
replace_list = []
if len(completion_text) >= minimal_nsarg_completion_len:
# Use BEL.bio API module if running bel module in BEL.bio API, otherwise call BEL.bio API endpoint
# is there a better way to handle this?
url = f'{config["bel_api"]["servers"]["api_url"]}/terms/completions/{url_path_param_quoting(completion_text)}'
params = {
"size": size,
"entity_types": entity_types,
"namespaces": namespaces,
"species": species,
}
r = get_url(url, params=params)
if r.status_code == 200:
ns_completions = r.json()
else:
log.error(f"Status code of {r.status_code} for {url}")
ns_completions = {}
for complete in ns_completions.get("completions", []):
replace_list.append(
{
"replacement": complete["id"],
"label": f"{complete['id']} ({complete['label']})",
"highlight": complete["highlight"][-1],
"type": "NSArg",
}
)
# Check default namespaces
for entity_type in entity_types:
default_namespace = bel_spec["namespaces"].get(entity_type, [])
if default_namespace:
for obj in default_namespace["info"]:
replacement = None
if bel_fmt == "long" and re.match(
completion_text, obj["name"], re.IGNORECASE
):
replacement = obj["name"]
elif bel_fmt in ["short", "medium"] and re.match(
completion_text, obj["abbreviation"], re.IGNORECASE
):
replacement = obj["abbreviation"]
if replacement:
highlight = replacement.replace(
completion_text, f"<em>{completion_text}</em>"
)
replace_list.insert(
0,
{
"replacement": replacement,
"label": replacement,
"highlight": highlight,
"type": "NSArg",
},
)
return replace_list[:size] | python | def nsarg_completions(
completion_text: str,
entity_types: list,
bel_spec: BELSpec,
namespace: str,
species_id: str,
bel_fmt: str,
size: int,
):
"""Namespace completions
Args:
completion_text
entity_types: used to filter namespace search results
bel_spec: used to search default namespaces
namespace: used to filter namespace search results
species_id: used to filter namespace search results
bel_fmt: used to select full name or abbrev for default namespaces
size: how many completions to return
Results:
list of replacement text objects
"""
minimal_nsarg_completion_len = 1
species = [species_id]
namespaces = [namespace]
replace_list = []
if len(completion_text) >= minimal_nsarg_completion_len:
# Use BEL.bio API module if running bel module in BEL.bio API, otherwise call BEL.bio API endpoint
# is there a better way to handle this?
url = f'{config["bel_api"]["servers"]["api_url"]}/terms/completions/{url_path_param_quoting(completion_text)}'
params = {
"size": size,
"entity_types": entity_types,
"namespaces": namespaces,
"species": species,
}
r = get_url(url, params=params)
if r.status_code == 200:
ns_completions = r.json()
else:
log.error(f"Status code of {r.status_code} for {url}")
ns_completions = {}
for complete in ns_completions.get("completions", []):
replace_list.append(
{
"replacement": complete["id"],
"label": f"{complete['id']} ({complete['label']})",
"highlight": complete["highlight"][-1],
"type": "NSArg",
}
)
# Check default namespaces
for entity_type in entity_types:
default_namespace = bel_spec["namespaces"].get(entity_type, [])
if default_namespace:
for obj in default_namespace["info"]:
replacement = None
if bel_fmt == "long" and re.match(
completion_text, obj["name"], re.IGNORECASE
):
replacement = obj["name"]
elif bel_fmt in ["short", "medium"] and re.match(
completion_text, obj["abbreviation"], re.IGNORECASE
):
replacement = obj["abbreviation"]
if replacement:
highlight = replacement.replace(
completion_text, f"<em>{completion_text}</em>"
)
replace_list.insert(
0,
{
"replacement": replacement,
"label": replacement,
"highlight": highlight,
"type": "NSArg",
},
)
return replace_list[:size] | [
"def",
"nsarg_completions",
"(",
"completion_text",
":",
"str",
",",
"entity_types",
":",
"list",
",",
"bel_spec",
":",
"BELSpec",
",",
"namespace",
":",
"str",
",",
"species_id",
":",
"str",
",",
"bel_fmt",
":",
"str",
",",
"size",
":",
"int",
",",
")",
":",
"minimal_nsarg_completion_len",
"=",
"1",
"species",
"=",
"[",
"species_id",
"]",
"namespaces",
"=",
"[",
"namespace",
"]",
"replace_list",
"=",
"[",
"]",
"if",
"len",
"(",
"completion_text",
")",
">=",
"minimal_nsarg_completion_len",
":",
"# Use BEL.bio API module if running bel module in BEL.bio API, otherwise call BEL.bio API endpoint",
"# is there a better way to handle this?",
"url",
"=",
"f'{config[\"bel_api\"][\"servers\"][\"api_url\"]}/terms/completions/{url_path_param_quoting(completion_text)}'",
"params",
"=",
"{",
"\"size\"",
":",
"size",
",",
"\"entity_types\"",
":",
"entity_types",
",",
"\"namespaces\"",
":",
"namespaces",
",",
"\"species\"",
":",
"species",
",",
"}",
"r",
"=",
"get_url",
"(",
"url",
",",
"params",
"=",
"params",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"ns_completions",
"=",
"r",
".",
"json",
"(",
")",
"else",
":",
"log",
".",
"error",
"(",
"f\"Status code of {r.status_code} for {url}\"",
")",
"ns_completions",
"=",
"{",
"}",
"for",
"complete",
"in",
"ns_completions",
".",
"get",
"(",
"\"completions\"",
",",
"[",
"]",
")",
":",
"replace_list",
".",
"append",
"(",
"{",
"\"replacement\"",
":",
"complete",
"[",
"\"id\"",
"]",
",",
"\"label\"",
":",
"f\"{complete['id']} ({complete['label']})\"",
",",
"\"highlight\"",
":",
"complete",
"[",
"\"highlight\"",
"]",
"[",
"-",
"1",
"]",
",",
"\"type\"",
":",
"\"NSArg\"",
",",
"}",
")",
"# Check default namespaces",
"for",
"entity_type",
"in",
"entity_types",
":",
"default_namespace",
"=",
"bel_spec",
"[",
"\"namespaces\"",
"]",
".",
"get",
"(",
"entity_type",
",",
"[",
"]",
")",
"if",
"default_namespace",
":",
"for",
"obj",
"in",
"default_namespace",
"[",
"\"info\"",
"]",
":",
"replacement",
"=",
"None",
"if",
"bel_fmt",
"==",
"\"long\"",
"and",
"re",
".",
"match",
"(",
"completion_text",
",",
"obj",
"[",
"\"name\"",
"]",
",",
"re",
".",
"IGNORECASE",
")",
":",
"replacement",
"=",
"obj",
"[",
"\"name\"",
"]",
"elif",
"bel_fmt",
"in",
"[",
"\"short\"",
",",
"\"medium\"",
"]",
"and",
"re",
".",
"match",
"(",
"completion_text",
",",
"obj",
"[",
"\"abbreviation\"",
"]",
",",
"re",
".",
"IGNORECASE",
")",
":",
"replacement",
"=",
"obj",
"[",
"\"abbreviation\"",
"]",
"if",
"replacement",
":",
"highlight",
"=",
"replacement",
".",
"replace",
"(",
"completion_text",
",",
"f\"<em>{completion_text}</em>\"",
")",
"replace_list",
".",
"insert",
"(",
"0",
",",
"{",
"\"replacement\"",
":",
"replacement",
",",
"\"label\"",
":",
"replacement",
",",
"\"highlight\"",
":",
"highlight",
",",
"\"type\"",
":",
"\"NSArg\"",
",",
"}",
",",
")",
"return",
"replace_list",
"[",
":",
"size",
"]"
] | Namespace completions
Args:
completion_text
entity_types: used to filter namespace search results
bel_spec: used to search default namespaces
namespace: used to filter namespace search results
species_id: used to filter namespace search results
bel_fmt: used to select full name or abbrev for default namespaces
size: how many completions to return
Results:
list of replacement text objects | [
"Namespace",
"completions"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L178-L266 |
belbio/bel | bel/lang/completion.py | relation_completions | def relation_completions(
completion_text: str, bel_spec: BELSpec, bel_fmt: str, size: int
) -> list:
"""Filter BEL relations by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL relations that match prefix
"""
if bel_fmt == "short":
relation_list = bel_spec["relations"]["list_short"]
else:
relation_list = bel_spec["relations"]["list_long"]
matches = []
for r in relation_list:
if re.match(completion_text, r):
matches.append(r)
replace_list = []
for match in matches:
highlight = match.replace(completion_text, f"<em>{completion_text}</em>")
replace_list.append(
{
"replacement": match,
"label": match,
"highlight": highlight,
"type": "Relation",
}
)
return replace_list[:size] | python | def relation_completions(
completion_text: str, bel_spec: BELSpec, bel_fmt: str, size: int
) -> list:
"""Filter BEL relations by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL relations that match prefix
"""
if bel_fmt == "short":
relation_list = bel_spec["relations"]["list_short"]
else:
relation_list = bel_spec["relations"]["list_long"]
matches = []
for r in relation_list:
if re.match(completion_text, r):
matches.append(r)
replace_list = []
for match in matches:
highlight = match.replace(completion_text, f"<em>{completion_text}</em>")
replace_list.append(
{
"replacement": match,
"label": match,
"highlight": highlight,
"type": "Relation",
}
)
return replace_list[:size] | [
"def",
"relation_completions",
"(",
"completion_text",
":",
"str",
",",
"bel_spec",
":",
"BELSpec",
",",
"bel_fmt",
":",
"str",
",",
"size",
":",
"int",
")",
"->",
"list",
":",
"if",
"bel_fmt",
"==",
"\"short\"",
":",
"relation_list",
"=",
"bel_spec",
"[",
"\"relations\"",
"]",
"[",
"\"list_short\"",
"]",
"else",
":",
"relation_list",
"=",
"bel_spec",
"[",
"\"relations\"",
"]",
"[",
"\"list_long\"",
"]",
"matches",
"=",
"[",
"]",
"for",
"r",
"in",
"relation_list",
":",
"if",
"re",
".",
"match",
"(",
"completion_text",
",",
"r",
")",
":",
"matches",
".",
"append",
"(",
"r",
")",
"replace_list",
"=",
"[",
"]",
"for",
"match",
"in",
"matches",
":",
"highlight",
"=",
"match",
".",
"replace",
"(",
"completion_text",
",",
"f\"<em>{completion_text}</em>\"",
")",
"replace_list",
".",
"append",
"(",
"{",
"\"replacement\"",
":",
"match",
",",
"\"label\"",
":",
"match",
",",
"\"highlight\"",
":",
"highlight",
",",
"\"type\"",
":",
"\"Relation\"",
",",
"}",
")",
"return",
"replace_list",
"[",
":",
"size",
"]"
] | Filter BEL relations by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL relations that match prefix | [
"Filter",
"BEL",
"relations",
"by",
"prefix"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L269-L305 |
belbio/bel | bel/lang/completion.py | function_completions | def function_completions(
completion_text: str,
bel_spec: BELSpec,
function_list: list,
bel_fmt: str,
size: int,
) -> list:
"""Filter BEL functions by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL functions that match prefix
"""
# Convert provided function list to correct bel_fmt
if isinstance(function_list, list):
if bel_fmt in ["short", "medium"]:
function_list = [
bel_spec["functions"]["to_short"][fn] for fn in function_list
]
else:
function_list = [
bel_spec["functions"]["to_long"][fn] for fn in function_list
]
elif bel_fmt in ["short", "medium"]:
function_list = bel_spec["functions"]["primary"]["list_short"]
else:
function_list = bel_spec["functions"]["primary"]["list_long"]
matches = []
for f in function_list:
escaped_completion_text = completion_text.replace(r"(", r"\(").replace(
r")", r"\)"
)
log.debug(f"Completion match: {escaped_completion_text} F: {f}")
if re.match(escaped_completion_text, f):
matches.append(f)
replace_list = []
for match in matches:
if completion_text:
highlight = match.replace(completion_text, f"<em>{completion_text}</em>")
else:
highlight = completion_text
replace_list.append(
{
"replacement": match,
"label": f"{match}()",
"highlight": highlight,
"type": "Function",
}
)
return replace_list[:size] | python | def function_completions(
completion_text: str,
bel_spec: BELSpec,
function_list: list,
bel_fmt: str,
size: int,
) -> list:
"""Filter BEL functions by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL functions that match prefix
"""
# Convert provided function list to correct bel_fmt
if isinstance(function_list, list):
if bel_fmt in ["short", "medium"]:
function_list = [
bel_spec["functions"]["to_short"][fn] for fn in function_list
]
else:
function_list = [
bel_spec["functions"]["to_long"][fn] for fn in function_list
]
elif bel_fmt in ["short", "medium"]:
function_list = bel_spec["functions"]["primary"]["list_short"]
else:
function_list = bel_spec["functions"]["primary"]["list_long"]
matches = []
for f in function_list:
escaped_completion_text = completion_text.replace(r"(", r"\(").replace(
r")", r"\)"
)
log.debug(f"Completion match: {escaped_completion_text} F: {f}")
if re.match(escaped_completion_text, f):
matches.append(f)
replace_list = []
for match in matches:
if completion_text:
highlight = match.replace(completion_text, f"<em>{completion_text}</em>")
else:
highlight = completion_text
replace_list.append(
{
"replacement": match,
"label": f"{match}()",
"highlight": highlight,
"type": "Function",
}
)
return replace_list[:size] | [
"def",
"function_completions",
"(",
"completion_text",
":",
"str",
",",
"bel_spec",
":",
"BELSpec",
",",
"function_list",
":",
"list",
",",
"bel_fmt",
":",
"str",
",",
"size",
":",
"int",
",",
")",
"->",
"list",
":",
"# Convert provided function list to correct bel_fmt",
"if",
"isinstance",
"(",
"function_list",
",",
"list",
")",
":",
"if",
"bel_fmt",
"in",
"[",
"\"short\"",
",",
"\"medium\"",
"]",
":",
"function_list",
"=",
"[",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"to_short\"",
"]",
"[",
"fn",
"]",
"for",
"fn",
"in",
"function_list",
"]",
"else",
":",
"function_list",
"=",
"[",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"to_long\"",
"]",
"[",
"fn",
"]",
"for",
"fn",
"in",
"function_list",
"]",
"elif",
"bel_fmt",
"in",
"[",
"\"short\"",
",",
"\"medium\"",
"]",
":",
"function_list",
"=",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"primary\"",
"]",
"[",
"\"list_short\"",
"]",
"else",
":",
"function_list",
"=",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"primary\"",
"]",
"[",
"\"list_long\"",
"]",
"matches",
"=",
"[",
"]",
"for",
"f",
"in",
"function_list",
":",
"escaped_completion_text",
"=",
"completion_text",
".",
"replace",
"(",
"r\"(\"",
",",
"r\"\\(\"",
")",
".",
"replace",
"(",
"r\")\"",
",",
"r\"\\)\"",
")",
"log",
".",
"debug",
"(",
"f\"Completion match: {escaped_completion_text} F: {f}\"",
")",
"if",
"re",
".",
"match",
"(",
"escaped_completion_text",
",",
"f",
")",
":",
"matches",
".",
"append",
"(",
"f",
")",
"replace_list",
"=",
"[",
"]",
"for",
"match",
"in",
"matches",
":",
"if",
"completion_text",
":",
"highlight",
"=",
"match",
".",
"replace",
"(",
"completion_text",
",",
"f\"<em>{completion_text}</em>\"",
")",
"else",
":",
"highlight",
"=",
"completion_text",
"replace_list",
".",
"append",
"(",
"{",
"\"replacement\"",
":",
"match",
",",
"\"label\"",
":",
"f\"{match}()\"",
",",
"\"highlight\"",
":",
"highlight",
",",
"\"type\"",
":",
"\"Function\"",
",",
"}",
")",
"return",
"replace_list",
"[",
":",
"size",
"]"
] | Filter BEL functions by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL functions that match prefix | [
"Filter",
"BEL",
"functions",
"by",
"prefix"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L308-L366 |
belbio/bel | bel/lang/completion.py | arg_completions | def arg_completions(
completion_text: str,
parent_function: str,
args: list,
arg_idx: int,
bel_spec: BELSpec,
bel_fmt: str,
species_id: str,
namespace: str,
size: int,
):
"""Function argument completion
Only allow legal options for completion given function name, arguments and index of argument
to replace.
Args:
completion_text: text to use for completion - used for creating highlight
parent_function: BEL function containing these args
args: arguments of BEL function
arg_idx: completing on this argument identified by this index
bel_spec: BEL Specification
bel_fmt: short, medium, long BEL function/relation formats
species_id: filter on this species id, e.g. TAX:9606 if available
namespace: filter on this namespace if available
size: number of completions to return
Return:
list of replacements
"""
function_long = bel_spec["functions"]["to_long"].get(parent_function)
if not function_long:
return []
signatures = bel_spec["functions"]["signatures"][function_long]["signatures"]
# Position based argument ###################################
function_list = []
entity_types = []
fn_replace_list, ns_arg_replace_list = [], []
position_flag = False # Signature matches position-based argument
# Check for position based argument
for signature in signatures:
sig_arg = signature["arguments"][arg_idx]
sig_type = sig_arg["type"]
if sig_arg.get("position", False) and arg_idx == sig_arg["position"] - 1:
position_flag = True
if sig_type in ["Function", "Modifier"]:
function_list.extend(sig_arg["values"])
elif sig_type in ["NSArg", "StrArgNSArg"]:
entity_types.extend(sig_arg["values"])
if not position_flag:
# Collect optional and multiple signature arguments for completion
opt_fn_sig_args = []
opt_nsarg_sig_args = []
mult_fn_sig_args = []
mult_nsarg_sig_args = []
for signature in signatures:
signature_opt_fn_sig_args = []
signature_opt_nsarg_sig_args = []
signature_mult_fn_sig_args = []
signature_mult_nsarg_sig_args = []
max_position = -1
for sig_arg in signature["arguments"]:
if "position" in sig_arg:
max_position = sig_arg["position"]
continue # Skip position based signature arguments
if (
sig_arg.get("optional", False) is True
and sig_arg.get("multiple", False) is False
):
if sig_arg["type"] in ["Function", "Modifier"]:
signature_opt_fn_sig_args.extend(sig_arg["values"])
elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]:
signature_opt_nsarg_sig_args.extend(sig_arg["values"])
elif sig_arg.get("multiple", False) is True:
if sig_arg["type"] in ["Function", "Modifier"]:
signature_mult_fn_sig_args.extend(sig_arg["values"])
elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]:
signature_mult_nsarg_sig_args.extend(sig_arg["values"])
# Remove signature non-multiple, optional arguments that are already in args list
for idx, arg in enumerate(args):
if idx <= max_position - 1: # Skip positional arguments
continue
if idx == arg_idx: # Skip argument to be completed
continue
log.debug(f"Remove Optional Args {arg} {signature_opt_fn_sig_args}")
opt_fn_sig_args.extend(signature_opt_fn_sig_args)
opt_nsarg_sig_args.extend(signature_opt_nsarg_sig_args)
mult_fn_sig_args.extend(signature_mult_fn_sig_args)
mult_nsarg_sig_args.extend(signature_mult_nsarg_sig_args)
function_list.extend(list(set(opt_fn_sig_args + mult_fn_sig_args)))
entity_types.extend(list(set(opt_nsarg_sig_args + mult_nsarg_sig_args)))
if function_list:
log.debug(f"ArgComp - position-based Function list: {function_list}")
fn_replace_list = function_completions(
completion_text, bel_spec, function_list, bel_fmt, size
)
if entity_types:
log.debug(f"ArgComp - position-based Entity types: {entity_types}")
ns_arg_replace_list = nsarg_completions(
completion_text,
entity_types,
bel_spec,
namespace,
species_id,
bel_fmt,
size,
)
replace_list = fn_replace_list + ns_arg_replace_list
return replace_list | python | def arg_completions(
completion_text: str,
parent_function: str,
args: list,
arg_idx: int,
bel_spec: BELSpec,
bel_fmt: str,
species_id: str,
namespace: str,
size: int,
):
"""Function argument completion
Only allow legal options for completion given function name, arguments and index of argument
to replace.
Args:
completion_text: text to use for completion - used for creating highlight
parent_function: BEL function containing these args
args: arguments of BEL function
arg_idx: completing on this argument identified by this index
bel_spec: BEL Specification
bel_fmt: short, medium, long BEL function/relation formats
species_id: filter on this species id, e.g. TAX:9606 if available
namespace: filter on this namespace if available
size: number of completions to return
Return:
list of replacements
"""
function_long = bel_spec["functions"]["to_long"].get(parent_function)
if not function_long:
return []
signatures = bel_spec["functions"]["signatures"][function_long]["signatures"]
# Position based argument ###################################
function_list = []
entity_types = []
fn_replace_list, ns_arg_replace_list = [], []
position_flag = False # Signature matches position-based argument
# Check for position based argument
for signature in signatures:
sig_arg = signature["arguments"][arg_idx]
sig_type = sig_arg["type"]
if sig_arg.get("position", False) and arg_idx == sig_arg["position"] - 1:
position_flag = True
if sig_type in ["Function", "Modifier"]:
function_list.extend(sig_arg["values"])
elif sig_type in ["NSArg", "StrArgNSArg"]:
entity_types.extend(sig_arg["values"])
if not position_flag:
# Collect optional and multiple signature arguments for completion
opt_fn_sig_args = []
opt_nsarg_sig_args = []
mult_fn_sig_args = []
mult_nsarg_sig_args = []
for signature in signatures:
signature_opt_fn_sig_args = []
signature_opt_nsarg_sig_args = []
signature_mult_fn_sig_args = []
signature_mult_nsarg_sig_args = []
max_position = -1
for sig_arg in signature["arguments"]:
if "position" in sig_arg:
max_position = sig_arg["position"]
continue # Skip position based signature arguments
if (
sig_arg.get("optional", False) is True
and sig_arg.get("multiple", False) is False
):
if sig_arg["type"] in ["Function", "Modifier"]:
signature_opt_fn_sig_args.extend(sig_arg["values"])
elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]:
signature_opt_nsarg_sig_args.extend(sig_arg["values"])
elif sig_arg.get("multiple", False) is True:
if sig_arg["type"] in ["Function", "Modifier"]:
signature_mult_fn_sig_args.extend(sig_arg["values"])
elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]:
signature_mult_nsarg_sig_args.extend(sig_arg["values"])
# Remove signature non-multiple, optional arguments that are already in args list
for idx, arg in enumerate(args):
if idx <= max_position - 1: # Skip positional arguments
continue
if idx == arg_idx: # Skip argument to be completed
continue
log.debug(f"Remove Optional Args {arg} {signature_opt_fn_sig_args}")
opt_fn_sig_args.extend(signature_opt_fn_sig_args)
opt_nsarg_sig_args.extend(signature_opt_nsarg_sig_args)
mult_fn_sig_args.extend(signature_mult_fn_sig_args)
mult_nsarg_sig_args.extend(signature_mult_nsarg_sig_args)
function_list.extend(list(set(opt_fn_sig_args + mult_fn_sig_args)))
entity_types.extend(list(set(opt_nsarg_sig_args + mult_nsarg_sig_args)))
if function_list:
log.debug(f"ArgComp - position-based Function list: {function_list}")
fn_replace_list = function_completions(
completion_text, bel_spec, function_list, bel_fmt, size
)
if entity_types:
log.debug(f"ArgComp - position-based Entity types: {entity_types}")
ns_arg_replace_list = nsarg_completions(
completion_text,
entity_types,
bel_spec,
namespace,
species_id,
bel_fmt,
size,
)
replace_list = fn_replace_list + ns_arg_replace_list
return replace_list | [
"def",
"arg_completions",
"(",
"completion_text",
":",
"str",
",",
"parent_function",
":",
"str",
",",
"args",
":",
"list",
",",
"arg_idx",
":",
"int",
",",
"bel_spec",
":",
"BELSpec",
",",
"bel_fmt",
":",
"str",
",",
"species_id",
":",
"str",
",",
"namespace",
":",
"str",
",",
"size",
":",
"int",
",",
")",
":",
"function_long",
"=",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"to_long\"",
"]",
".",
"get",
"(",
"parent_function",
")",
"if",
"not",
"function_long",
":",
"return",
"[",
"]",
"signatures",
"=",
"bel_spec",
"[",
"\"functions\"",
"]",
"[",
"\"signatures\"",
"]",
"[",
"function_long",
"]",
"[",
"\"signatures\"",
"]",
"# Position based argument ###################################",
"function_list",
"=",
"[",
"]",
"entity_types",
"=",
"[",
"]",
"fn_replace_list",
",",
"ns_arg_replace_list",
"=",
"[",
"]",
",",
"[",
"]",
"position_flag",
"=",
"False",
"# Signature matches position-based argument",
"# Check for position based argument",
"for",
"signature",
"in",
"signatures",
":",
"sig_arg",
"=",
"signature",
"[",
"\"arguments\"",
"]",
"[",
"arg_idx",
"]",
"sig_type",
"=",
"sig_arg",
"[",
"\"type\"",
"]",
"if",
"sig_arg",
".",
"get",
"(",
"\"position\"",
",",
"False",
")",
"and",
"arg_idx",
"==",
"sig_arg",
"[",
"\"position\"",
"]",
"-",
"1",
":",
"position_flag",
"=",
"True",
"if",
"sig_type",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"function_list",
".",
"extend",
"(",
"sig_arg",
"[",
"\"values\"",
"]",
")",
"elif",
"sig_type",
"in",
"[",
"\"NSArg\"",
",",
"\"StrArgNSArg\"",
"]",
":",
"entity_types",
".",
"extend",
"(",
"sig_arg",
"[",
"\"values\"",
"]",
")",
"if",
"not",
"position_flag",
":",
"# Collect optional and multiple signature arguments for completion",
"opt_fn_sig_args",
"=",
"[",
"]",
"opt_nsarg_sig_args",
"=",
"[",
"]",
"mult_fn_sig_args",
"=",
"[",
"]",
"mult_nsarg_sig_args",
"=",
"[",
"]",
"for",
"signature",
"in",
"signatures",
":",
"signature_opt_fn_sig_args",
"=",
"[",
"]",
"signature_opt_nsarg_sig_args",
"=",
"[",
"]",
"signature_mult_fn_sig_args",
"=",
"[",
"]",
"signature_mult_nsarg_sig_args",
"=",
"[",
"]",
"max_position",
"=",
"-",
"1",
"for",
"sig_arg",
"in",
"signature",
"[",
"\"arguments\"",
"]",
":",
"if",
"\"position\"",
"in",
"sig_arg",
":",
"max_position",
"=",
"sig_arg",
"[",
"\"position\"",
"]",
"continue",
"# Skip position based signature arguments",
"if",
"(",
"sig_arg",
".",
"get",
"(",
"\"optional\"",
",",
"False",
")",
"is",
"True",
"and",
"sig_arg",
".",
"get",
"(",
"\"multiple\"",
",",
"False",
")",
"is",
"False",
")",
":",
"if",
"sig_arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"signature_opt_fn_sig_args",
".",
"extend",
"(",
"sig_arg",
"[",
"\"values\"",
"]",
")",
"elif",
"sig_arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"NSArg\"",
",",
"\"StrArgNSArg\"",
"]",
":",
"signature_opt_nsarg_sig_args",
".",
"extend",
"(",
"sig_arg",
"[",
"\"values\"",
"]",
")",
"elif",
"sig_arg",
".",
"get",
"(",
"\"multiple\"",
",",
"False",
")",
"is",
"True",
":",
"if",
"sig_arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"Function\"",
",",
"\"Modifier\"",
"]",
":",
"signature_mult_fn_sig_args",
".",
"extend",
"(",
"sig_arg",
"[",
"\"values\"",
"]",
")",
"elif",
"sig_arg",
"[",
"\"type\"",
"]",
"in",
"[",
"\"NSArg\"",
",",
"\"StrArgNSArg\"",
"]",
":",
"signature_mult_nsarg_sig_args",
".",
"extend",
"(",
"sig_arg",
"[",
"\"values\"",
"]",
")",
"# Remove signature non-multiple, optional arguments that are already in args list",
"for",
"idx",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"idx",
"<=",
"max_position",
"-",
"1",
":",
"# Skip positional arguments",
"continue",
"if",
"idx",
"==",
"arg_idx",
":",
"# Skip argument to be completed",
"continue",
"log",
".",
"debug",
"(",
"f\"Remove Optional Args {arg} {signature_opt_fn_sig_args}\"",
")",
"opt_fn_sig_args",
".",
"extend",
"(",
"signature_opt_fn_sig_args",
")",
"opt_nsarg_sig_args",
".",
"extend",
"(",
"signature_opt_nsarg_sig_args",
")",
"mult_fn_sig_args",
".",
"extend",
"(",
"signature_mult_fn_sig_args",
")",
"mult_nsarg_sig_args",
".",
"extend",
"(",
"signature_mult_nsarg_sig_args",
")",
"function_list",
".",
"extend",
"(",
"list",
"(",
"set",
"(",
"opt_fn_sig_args",
"+",
"mult_fn_sig_args",
")",
")",
")",
"entity_types",
".",
"extend",
"(",
"list",
"(",
"set",
"(",
"opt_nsarg_sig_args",
"+",
"mult_nsarg_sig_args",
")",
")",
")",
"if",
"function_list",
":",
"log",
".",
"debug",
"(",
"f\"ArgComp - position-based Function list: {function_list}\"",
")",
"fn_replace_list",
"=",
"function_completions",
"(",
"completion_text",
",",
"bel_spec",
",",
"function_list",
",",
"bel_fmt",
",",
"size",
")",
"if",
"entity_types",
":",
"log",
".",
"debug",
"(",
"f\"ArgComp - position-based Entity types: {entity_types}\"",
")",
"ns_arg_replace_list",
"=",
"nsarg_completions",
"(",
"completion_text",
",",
"entity_types",
",",
"bel_spec",
",",
"namespace",
",",
"species_id",
",",
"bel_fmt",
",",
"size",
",",
")",
"replace_list",
"=",
"fn_replace_list",
"+",
"ns_arg_replace_list",
"return",
"replace_list"
] | Function argument completion
Only allow legal options for completion given function name, arguments and index of argument
to replace.
Args:
completion_text: text to use for completion - used for creating highlight
parent_function: BEL function containing these args
args: arguments of BEL function
arg_idx: completing on this argument identified by this index
bel_spec: BEL Specification
bel_fmt: short, medium, long BEL function/relation formats
species_id: filter on this species id, e.g. TAX:9606 if available
namespace: filter on this namespace if available
size: number of completions to return
Return:
list of replacements | [
"Function",
"argument",
"completion"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L369-L491 |
belbio/bel | bel/lang/completion.py | add_completions | def add_completions(
replace_list: list, belstr: str, replace_span: Span, completion_text: str
) -> List[Mapping[str, Any]]:
"""Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
"""
completions = []
for r in replace_list:
# if '(' not in belstr:
# replacement = f'{r["replacement"]}()'
# cursor_loc = len(replacement) - 1 # inside parenthesis
# elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1:
if len(belstr) > 0:
belstr_end = len(belstr) - 1
else:
belstr_end = 0
log.debug(
f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}'
)
# Put a space between comma and following function arg
if (
r["type"] == "Function"
and replace_span[0] > 0
and belstr[replace_span[0] - 1] == ","
):
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ f"{r['replacement']}()"
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()"
)
# Put a space between comman and following NSArg or StrArg
elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",":
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"])
# Add function to end of belstr
elif r["type"] == "Function" and replace_span[1] >= belstr_end:
replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()"
cursor_loc = len(replacement) - 1 # inside parenthesis
log.debug(f"Replacement: {replacement}")
# Insert replacement in beginning or middle of belstr
else:
replacement = (
belstr[0 : replace_span[0]]
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + r["replacement"]
) # move cursor just past replacement
completions.append(
{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": r["highlight"],
"label": r["label"],
}
)
return completions | python | def add_completions(
replace_list: list, belstr: str, replace_span: Span, completion_text: str
) -> List[Mapping[str, Any]]:
"""Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
"""
completions = []
for r in replace_list:
# if '(' not in belstr:
# replacement = f'{r["replacement"]}()'
# cursor_loc = len(replacement) - 1 # inside parenthesis
# elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1:
if len(belstr) > 0:
belstr_end = len(belstr) - 1
else:
belstr_end = 0
log.debug(
f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}'
)
# Put a space between comma and following function arg
if (
r["type"] == "Function"
and replace_span[0] > 0
and belstr[replace_span[0] - 1] == ","
):
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ f"{r['replacement']}()"
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()"
)
# Put a space between comman and following NSArg or StrArg
elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",":
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"])
# Add function to end of belstr
elif r["type"] == "Function" and replace_span[1] >= belstr_end:
replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()"
cursor_loc = len(replacement) - 1 # inside parenthesis
log.debug(f"Replacement: {replacement}")
# Insert replacement in beginning or middle of belstr
else:
replacement = (
belstr[0 : replace_span[0]]
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + r["replacement"]
) # move cursor just past replacement
completions.append(
{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": r["highlight"],
"label": r["label"],
}
)
return completions | [
"def",
"add_completions",
"(",
"replace_list",
":",
"list",
",",
"belstr",
":",
"str",
",",
"replace_span",
":",
"Span",
",",
"completion_text",
":",
"str",
")",
"->",
"List",
"[",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"completions",
"=",
"[",
"]",
"for",
"r",
"in",
"replace_list",
":",
"# if '(' not in belstr:",
"# replacement = f'{r[\"replacement\"]}()'",
"# cursor_loc = len(replacement) - 1 # inside parenthesis",
"# elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1:",
"if",
"len",
"(",
"belstr",
")",
">",
"0",
":",
"belstr_end",
"=",
"len",
"(",
"belstr",
")",
"-",
"1",
"else",
":",
"belstr_end",
"=",
"0",
"log",
".",
"debug",
"(",
"f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r[\"type\"] == \"Function\"} Test2 {replace_span[1] + 1 == len(belstr)}'",
")",
"# Put a space between comma and following function arg",
"if",
"(",
"r",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
"and",
"replace_span",
"[",
"0",
"]",
">",
"0",
"and",
"belstr",
"[",
"replace_span",
"[",
"0",
"]",
"-",
"1",
"]",
"==",
"\",\"",
")",
":",
"log",
".",
"debug",
"(",
"\"prior char is a comma\"",
")",
"replacement",
"=",
"(",
"belstr",
"[",
"0",
":",
"replace_span",
"[",
"0",
"]",
"]",
"+",
"\" \"",
"+",
"f\"{r['replacement']}()\"",
"+",
"belstr",
"[",
"replace_span",
"[",
"1",
"]",
"+",
"1",
":",
"]",
")",
"cursor_loc",
"=",
"len",
"(",
"belstr",
"[",
"0",
":",
"replace_span",
"[",
"0",
"]",
"]",
"+",
"\" \"",
"+",
"f\"{r['replacement']}()\"",
")",
"# Put a space between comman and following NSArg or StrArg",
"elif",
"replace_span",
"[",
"0",
"]",
">",
"0",
"and",
"belstr",
"[",
"replace_span",
"[",
"0",
"]",
"-",
"1",
"]",
"==",
"\",\"",
":",
"log",
".",
"debug",
"(",
"\"prior char is a comma\"",
")",
"replacement",
"=",
"(",
"belstr",
"[",
"0",
":",
"replace_span",
"[",
"0",
"]",
"]",
"+",
"\" \"",
"+",
"r",
"[",
"\"replacement\"",
"]",
"+",
"belstr",
"[",
"replace_span",
"[",
"1",
"]",
"+",
"1",
":",
"]",
")",
"cursor_loc",
"=",
"len",
"(",
"belstr",
"[",
"0",
":",
"replace_span",
"[",
"0",
"]",
"]",
"+",
"\" \"",
"+",
"r",
"[",
"\"replacement\"",
"]",
")",
"# Add function to end of belstr",
"elif",
"r",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
"and",
"replace_span",
"[",
"1",
"]",
">=",
"belstr_end",
":",
"replacement",
"=",
"belstr",
"[",
"0",
":",
"replace_span",
"[",
"0",
"]",
"]",
"+",
"f\"{r['replacement']}()\"",
"cursor_loc",
"=",
"len",
"(",
"replacement",
")",
"-",
"1",
"# inside parenthesis",
"log",
".",
"debug",
"(",
"f\"Replacement: {replacement}\"",
")",
"# Insert replacement in beginning or middle of belstr",
"else",
":",
"replacement",
"=",
"(",
"belstr",
"[",
"0",
":",
"replace_span",
"[",
"0",
"]",
"]",
"+",
"r",
"[",
"\"replacement\"",
"]",
"+",
"belstr",
"[",
"replace_span",
"[",
"1",
"]",
"+",
"1",
":",
"]",
")",
"cursor_loc",
"=",
"len",
"(",
"belstr",
"[",
"0",
":",
"replace_span",
"[",
"0",
"]",
"]",
"+",
"r",
"[",
"\"replacement\"",
"]",
")",
"# move cursor just past replacement",
"completions",
".",
"append",
"(",
"{",
"\"replacement\"",
":",
"replacement",
",",
"\"cursor_loc\"",
":",
"cursor_loc",
",",
"\"highlight\"",
":",
"r",
"[",
"\"highlight\"",
"]",
",",
"\"label\"",
":",
"r",
"[",
"\"label\"",
"]",
",",
"}",
")",
"return",
"completions"
] | Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}] | [
"Create",
"completions",
"to",
"return",
"given",
"replacement",
"list"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L494-L582 |
belbio/bel | bel/lang/completion.py | get_completions | def get_completions(
belstr: str,
cursor_loc: int,
bel_spec: BELSpec,
bel_comp: str,
bel_fmt: str,
species_id: str,
size: int,
):
"""Get BEL Assertion completions
Args:
Results:
"""
ast, errors = pparse.get_ast_dict(belstr)
spans = pparse.collect_spans(ast)
completion_text = ""
completions = []
function_help = []
log.debug(f"Cursor location BELstr: {belstr} Cursor idx: {cursor_loc}")
cursor_results = cursor(belstr, ast, cursor_loc)
log.debug(f"Cursor results: {cursor_results}")
if not cursor_results:
log.debug("Cursor results is empty")
return (completion_text, completions, function_help, spans)
completion_text = cursor_results.get("completion_text", "")
replace_span = cursor_results["replace_span"]
namespace = cursor_results.get("namespace", None)
if "parent_function" in cursor_results:
parent_function = cursor_results["parent_function"]
function_help = bel_specification.get_function_help(
cursor_results["parent_function"], bel_spec
)
args = cursor_results.get("args", [])
arg_idx = cursor_results.get("arg_idx")
replace_list = arg_completions(
completion_text,
parent_function,
args,
arg_idx,
bel_spec,
bel_fmt,
species_id,
namespace,
size,
)
elif cursor_results["type"] == "Function":
function_list = None
replace_list = function_completions(
completion_text, bel_spec, function_list, bel_fmt, size
)
elif cursor_results["type"] == "Relation":
replace_list = relation_completions(completion_text, bel_spec, bel_fmt, size)
completions.extend(
add_completions(replace_list, belstr, replace_span, completion_text)
)
return completion_text, completions, function_help, spans | python | def get_completions(
belstr: str,
cursor_loc: int,
bel_spec: BELSpec,
bel_comp: str,
bel_fmt: str,
species_id: str,
size: int,
):
"""Get BEL Assertion completions
Args:
Results:
"""
ast, errors = pparse.get_ast_dict(belstr)
spans = pparse.collect_spans(ast)
completion_text = ""
completions = []
function_help = []
log.debug(f"Cursor location BELstr: {belstr} Cursor idx: {cursor_loc}")
cursor_results = cursor(belstr, ast, cursor_loc)
log.debug(f"Cursor results: {cursor_results}")
if not cursor_results:
log.debug("Cursor results is empty")
return (completion_text, completions, function_help, spans)
completion_text = cursor_results.get("completion_text", "")
replace_span = cursor_results["replace_span"]
namespace = cursor_results.get("namespace", None)
if "parent_function" in cursor_results:
parent_function = cursor_results["parent_function"]
function_help = bel_specification.get_function_help(
cursor_results["parent_function"], bel_spec
)
args = cursor_results.get("args", [])
arg_idx = cursor_results.get("arg_idx")
replace_list = arg_completions(
completion_text,
parent_function,
args,
arg_idx,
bel_spec,
bel_fmt,
species_id,
namespace,
size,
)
elif cursor_results["type"] == "Function":
function_list = None
replace_list = function_completions(
completion_text, bel_spec, function_list, bel_fmt, size
)
elif cursor_results["type"] == "Relation":
replace_list = relation_completions(completion_text, bel_spec, bel_fmt, size)
completions.extend(
add_completions(replace_list, belstr, replace_span, completion_text)
)
return completion_text, completions, function_help, spans | [
"def",
"get_completions",
"(",
"belstr",
":",
"str",
",",
"cursor_loc",
":",
"int",
",",
"bel_spec",
":",
"BELSpec",
",",
"bel_comp",
":",
"str",
",",
"bel_fmt",
":",
"str",
",",
"species_id",
":",
"str",
",",
"size",
":",
"int",
",",
")",
":",
"ast",
",",
"errors",
"=",
"pparse",
".",
"get_ast_dict",
"(",
"belstr",
")",
"spans",
"=",
"pparse",
".",
"collect_spans",
"(",
"ast",
")",
"completion_text",
"=",
"\"\"",
"completions",
"=",
"[",
"]",
"function_help",
"=",
"[",
"]",
"log",
".",
"debug",
"(",
"f\"Cursor location BELstr: {belstr} Cursor idx: {cursor_loc}\"",
")",
"cursor_results",
"=",
"cursor",
"(",
"belstr",
",",
"ast",
",",
"cursor_loc",
")",
"log",
".",
"debug",
"(",
"f\"Cursor results: {cursor_results}\"",
")",
"if",
"not",
"cursor_results",
":",
"log",
".",
"debug",
"(",
"\"Cursor results is empty\"",
")",
"return",
"(",
"completion_text",
",",
"completions",
",",
"function_help",
",",
"spans",
")",
"completion_text",
"=",
"cursor_results",
".",
"get",
"(",
"\"completion_text\"",
",",
"\"\"",
")",
"replace_span",
"=",
"cursor_results",
"[",
"\"replace_span\"",
"]",
"namespace",
"=",
"cursor_results",
".",
"get",
"(",
"\"namespace\"",
",",
"None",
")",
"if",
"\"parent_function\"",
"in",
"cursor_results",
":",
"parent_function",
"=",
"cursor_results",
"[",
"\"parent_function\"",
"]",
"function_help",
"=",
"bel_specification",
".",
"get_function_help",
"(",
"cursor_results",
"[",
"\"parent_function\"",
"]",
",",
"bel_spec",
")",
"args",
"=",
"cursor_results",
".",
"get",
"(",
"\"args\"",
",",
"[",
"]",
")",
"arg_idx",
"=",
"cursor_results",
".",
"get",
"(",
"\"arg_idx\"",
")",
"replace_list",
"=",
"arg_completions",
"(",
"completion_text",
",",
"parent_function",
",",
"args",
",",
"arg_idx",
",",
"bel_spec",
",",
"bel_fmt",
",",
"species_id",
",",
"namespace",
",",
"size",
",",
")",
"elif",
"cursor_results",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
":",
"function_list",
"=",
"None",
"replace_list",
"=",
"function_completions",
"(",
"completion_text",
",",
"bel_spec",
",",
"function_list",
",",
"bel_fmt",
",",
"size",
")",
"elif",
"cursor_results",
"[",
"\"type\"",
"]",
"==",
"\"Relation\"",
":",
"replace_list",
"=",
"relation_completions",
"(",
"completion_text",
",",
"bel_spec",
",",
"bel_fmt",
",",
"size",
")",
"completions",
".",
"extend",
"(",
"add_completions",
"(",
"replace_list",
",",
"belstr",
",",
"replace_span",
",",
"completion_text",
")",
")",
"return",
"completion_text",
",",
"completions",
",",
"function_help",
",",
"spans"
] | Get BEL Assertion completions
Args:
Results: | [
"Get",
"BEL",
"Assertion",
"completions"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L585-L655 |
belbio/bel | bel/lang/completion.py | bel_completion | def bel_completion(
belstr: str,
cursor_loc: int = -1,
bel_version: str = default_bel_version,
bel_comp: str = None,
bel_fmt: str = "medium",
species_id: str = None,
size: int = 20,
) -> Mapping[str, Any]:
"""BEL Completion
Args:
belstr (str): BEL String to provide completion for
cursor_loc (int): cursor location - default of -1 means end of string
bel_version (str): BEL Language version to use for completion
bel_comp (str): ['subject', 'object', 'full', None] - a nested statement has to be found in object or full statement
bel_fmt (str): ['short', 'medium', 'long'] BEL function/relation format
species_id (str): optional, species id is used to filter namespace values if applicable (e.g. Gene, RNA, ... entity_types)
size: how many completions to return, defaults to 20
Returns:
Mapping[str, Any]:
{
'completions': completions,
'function_help': function_help,
'entity_spans': spans
}
"""
"""
Completion object: {
completions: [
{
'replacement': <replacement text field string,
'cursor_loc': <new cursor location>
'highlight': <highlighted match>
'label': <label for completion>
},
],
function_help: [{
"function_summary": <template>,
"argument_help": [<argument help>],
"description": <desc>
}],
"entity_spans": {<span info>}
}
"""
bel_spec = bel_specification.get_specification(bel_version)
belstrlen = len(belstr)
if cursor_loc == -1:
cursor_loc = belstrlen - 1
elif cursor_loc >= belstrlen:
cursor_loc = belstrlen - 1
# with timy.Timer() as timer:
# (completion_text, completions, function_help, spans) = get_completions(belstr, cursor_loc, bel_spec, bel_comp, bel_fmt, species_id, size)
(completion_text, completions, function_help, spans) = get_completions(
belstr, cursor_loc, bel_spec, bel_comp, bel_fmt, species_id, size
)
return {
"completion_text": completion_text,
"completions": completions,
"function_help": function_help,
"entity_spans": spans,
} | python | def bel_completion(
belstr: str,
cursor_loc: int = -1,
bel_version: str = default_bel_version,
bel_comp: str = None,
bel_fmt: str = "medium",
species_id: str = None,
size: int = 20,
) -> Mapping[str, Any]:
"""BEL Completion
Args:
belstr (str): BEL String to provide completion for
cursor_loc (int): cursor location - default of -1 means end of string
bel_version (str): BEL Language version to use for completion
bel_comp (str): ['subject', 'object', 'full', None] - a nested statement has to be found in object or full statement
bel_fmt (str): ['short', 'medium', 'long'] BEL function/relation format
species_id (str): optional, species id is used to filter namespace values if applicable (e.g. Gene, RNA, ... entity_types)
size: how many completions to return, defaults to 20
Returns:
Mapping[str, Any]:
{
'completions': completions,
'function_help': function_help,
'entity_spans': spans
}
"""
"""
Completion object: {
completions: [
{
'replacement': <replacement text field string,
'cursor_loc': <new cursor location>
'highlight': <highlighted match>
'label': <label for completion>
},
],
function_help: [{
"function_summary": <template>,
"argument_help": [<argument help>],
"description": <desc>
}],
"entity_spans": {<span info>}
}
"""
bel_spec = bel_specification.get_specification(bel_version)
belstrlen = len(belstr)
if cursor_loc == -1:
cursor_loc = belstrlen - 1
elif cursor_loc >= belstrlen:
cursor_loc = belstrlen - 1
# with timy.Timer() as timer:
# (completion_text, completions, function_help, spans) = get_completions(belstr, cursor_loc, bel_spec, bel_comp, bel_fmt, species_id, size)
(completion_text, completions, function_help, spans) = get_completions(
belstr, cursor_loc, bel_spec, bel_comp, bel_fmt, species_id, size
)
return {
"completion_text": completion_text,
"completions": completions,
"function_help": function_help,
"entity_spans": spans,
} | [
"def",
"bel_completion",
"(",
"belstr",
":",
"str",
",",
"cursor_loc",
":",
"int",
"=",
"-",
"1",
",",
"bel_version",
":",
"str",
"=",
"default_bel_version",
",",
"bel_comp",
":",
"str",
"=",
"None",
",",
"bel_fmt",
":",
"str",
"=",
"\"medium\"",
",",
"species_id",
":",
"str",
"=",
"None",
",",
"size",
":",
"int",
"=",
"20",
",",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"\"\"\"\n Completion object: {\n completions: [\n {\n 'replacement': <replacement text field string,\n 'cursor_loc': <new cursor location>\n 'highlight': <highlighted match>\n 'label': <label for completion>\n },\n ],\n function_help: [{\n \"function_summary\": <template>,\n \"argument_help\": [<argument help>],\n \"description\": <desc>\n }],\n \"entity_spans\": {<span info>}\n }\n\n \"\"\"",
"bel_spec",
"=",
"bel_specification",
".",
"get_specification",
"(",
"bel_version",
")",
"belstrlen",
"=",
"len",
"(",
"belstr",
")",
"if",
"cursor_loc",
"==",
"-",
"1",
":",
"cursor_loc",
"=",
"belstrlen",
"-",
"1",
"elif",
"cursor_loc",
">=",
"belstrlen",
":",
"cursor_loc",
"=",
"belstrlen",
"-",
"1",
"# with timy.Timer() as timer:",
"# (completion_text, completions, function_help, spans) = get_completions(belstr, cursor_loc, bel_spec, bel_comp, bel_fmt, species_id, size)",
"(",
"completion_text",
",",
"completions",
",",
"function_help",
",",
"spans",
")",
"=",
"get_completions",
"(",
"belstr",
",",
"cursor_loc",
",",
"bel_spec",
",",
"bel_comp",
",",
"bel_fmt",
",",
"species_id",
",",
"size",
")",
"return",
"{",
"\"completion_text\"",
":",
"completion_text",
",",
"\"completions\"",
":",
"completions",
",",
"\"function_help\"",
":",
"function_help",
",",
"\"entity_spans\"",
":",
"spans",
",",
"}"
] | BEL Completion
Args:
belstr (str): BEL String to provide completion for
cursor_loc (int): cursor location - default of -1 means end of string
bel_version (str): BEL Language version to use for completion
bel_comp (str): ['subject', 'object', 'full', None] - a nested statement has to be found in object or full statement
bel_fmt (str): ['short', 'medium', 'long'] BEL function/relation format
species_id (str): optional, species id is used to filter namespace values if applicable (e.g. Gene, RNA, ... entity_types)
size: how many completions to return, defaults to 20
Returns:
Mapping[str, Any]:
{
'completions': completions,
'function_help': function_help,
'entity_spans': spans
} | [
"BEL",
"Completion"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L658-L726 |
belbio/bel | bel/lang/partialparse.py | parse_chars | def parse_chars(bels: list, errors: Errors) -> Tuple[CharLocs, Errors]:
"""Scan BEL string to map parens, quotes, commas
Args:
bels: bel string as an array of characters
errors: list of error tuples ('<type>', '<msg>')
Returns:
(char_locs, errors): character locations and errors
"""
pstack, qstack, nested_pstack = [], [], []
parens, nested_parens, quotes, commas = {}, {}, {}, {}
notquoted_flag = True
for i, c in enumerate(bels):
prior_char = i - 1
# print('BEL', prior_char, b[prior_char])
# Find starting quote
if c == '"' and bels[prior_char] != "\\" and len(qstack) == 0:
qstack.append(i)
notquoted_flag = False
# Find closing quote
elif c == '"' and bels[prior_char] != "\\":
quotes[qstack.pop()] = i
notquoted_flag = True
# Find all escaped quotes outside of quoted string
elif c == '"' and bels[prior_char] == "\\" and len(qstack) == 0:
errors.append(
(
"ERROR",
f"Escaped quote outside of quoted string at location: {i - 1}",
(i - 1, i - 1),
)
)
# Find all nested object opening parens
elif notquoted_flag and c == "(" and bels[prior_char] == " ":
if len(nested_pstack) > 1:
errors.append(
(
"ERROR",
f"More than one nested parenthesis or left parenthesis following a space character",
(i, i),
)
)
nested_pstack.append(i)
# Find all opening parens
elif notquoted_flag and c == "(" and bels[prior_char] not in ["\\"]:
pstack.append(i)
# Find all closing parens
elif notquoted_flag and c == ")" and bels[prior_char] != "\\":
if len(pstack):
if len(pstack) > 1:
parens[pstack.pop()] = (i, "child")
else:
parens[pstack.pop()] = (i, "top")
elif len(nested_pstack):
nested_parens[nested_pstack.pop()] = (i, "top")
else:
errors.append(
(
"ERROR",
f"Missing left parenthesis for right parenthesis at location {i}",
(i, i),
)
)
# Find comma outside of quoted string
elif notquoted_flag and c == "," and len(qstack) == 0:
sparen = pstack[-1]
if sparen not in commas:
commas[sparen] = [i]
else:
commas[sparen].append(i)
while len(pstack):
errors.append(
(
"ERROR",
f"Missing right parenthesis for left parenthesis at location {pstack[-1]}",
(pstack[-1], pstack[-1]),
)
)
if len(pstack) > 1:
parens[pstack.pop()] = (-1, "child")
else:
parens[pstack.pop()] = (-1, "top")
while len(nested_pstack):
errors.append(
(
"ERROR",
f"Missing right parenthesis for nested object left parenthesis at location {nested_pstack[-1]}",
(nested_pstack[-1], nested_pstack[-1]),
)
)
nested_parens[nested_pstack.pop()] = (-1, "top")
if len(qstack):
missing_quote = qstack.pop()
errors.append(
(
"ERROR",
f"Missing right quote for left quote at location {missing_quote}",
(missing_quote, missing_quote),
)
)
return (
{
"parens": parens,
"nested_parens": nested_parens,
"quotes": quotes,
"commas": commas,
},
errors,
) | python | def parse_chars(bels: list, errors: Errors) -> Tuple[CharLocs, Errors]:
"""Scan BEL string to map parens, quotes, commas
Args:
bels: bel string as an array of characters
errors: list of error tuples ('<type>', '<msg>')
Returns:
(char_locs, errors): character locations and errors
"""
pstack, qstack, nested_pstack = [], [], []
parens, nested_parens, quotes, commas = {}, {}, {}, {}
notquoted_flag = True
for i, c in enumerate(bels):
prior_char = i - 1
# print('BEL', prior_char, b[prior_char])
# Find starting quote
if c == '"' and bels[prior_char] != "\\" and len(qstack) == 0:
qstack.append(i)
notquoted_flag = False
# Find closing quote
elif c == '"' and bels[prior_char] != "\\":
quotes[qstack.pop()] = i
notquoted_flag = True
# Find all escaped quotes outside of quoted string
elif c == '"' and bels[prior_char] == "\\" and len(qstack) == 0:
errors.append(
(
"ERROR",
f"Escaped quote outside of quoted string at location: {i - 1}",
(i - 1, i - 1),
)
)
# Find all nested object opening parens
elif notquoted_flag and c == "(" and bels[prior_char] == " ":
if len(nested_pstack) > 1:
errors.append(
(
"ERROR",
f"More than one nested parenthesis or left parenthesis following a space character",
(i, i),
)
)
nested_pstack.append(i)
# Find all opening parens
elif notquoted_flag and c == "(" and bels[prior_char] not in ["\\"]:
pstack.append(i)
# Find all closing parens
elif notquoted_flag and c == ")" and bels[prior_char] != "\\":
if len(pstack):
if len(pstack) > 1:
parens[pstack.pop()] = (i, "child")
else:
parens[pstack.pop()] = (i, "top")
elif len(nested_pstack):
nested_parens[nested_pstack.pop()] = (i, "top")
else:
errors.append(
(
"ERROR",
f"Missing left parenthesis for right parenthesis at location {i}",
(i, i),
)
)
# Find comma outside of quoted string
elif notquoted_flag and c == "," and len(qstack) == 0:
sparen = pstack[-1]
if sparen not in commas:
commas[sparen] = [i]
else:
commas[sparen].append(i)
while len(pstack):
errors.append(
(
"ERROR",
f"Missing right parenthesis for left parenthesis at location {pstack[-1]}",
(pstack[-1], pstack[-1]),
)
)
if len(pstack) > 1:
parens[pstack.pop()] = (-1, "child")
else:
parens[pstack.pop()] = (-1, "top")
while len(nested_pstack):
errors.append(
(
"ERROR",
f"Missing right parenthesis for nested object left parenthesis at location {nested_pstack[-1]}",
(nested_pstack[-1], nested_pstack[-1]),
)
)
nested_parens[nested_pstack.pop()] = (-1, "top")
if len(qstack):
missing_quote = qstack.pop()
errors.append(
(
"ERROR",
f"Missing right quote for left quote at location {missing_quote}",
(missing_quote, missing_quote),
)
)
return (
{
"parens": parens,
"nested_parens": nested_parens,
"quotes": quotes,
"commas": commas,
},
errors,
) | [
"def",
"parse_chars",
"(",
"bels",
":",
"list",
",",
"errors",
":",
"Errors",
")",
"->",
"Tuple",
"[",
"CharLocs",
",",
"Errors",
"]",
":",
"pstack",
",",
"qstack",
",",
"nested_pstack",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"parens",
",",
"nested_parens",
",",
"quotes",
",",
"commas",
"=",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
"notquoted_flag",
"=",
"True",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"bels",
")",
":",
"prior_char",
"=",
"i",
"-",
"1",
"# print('BEL', prior_char, b[prior_char])",
"# Find starting quote",
"if",
"c",
"==",
"'\"'",
"and",
"bels",
"[",
"prior_char",
"]",
"!=",
"\"\\\\\"",
"and",
"len",
"(",
"qstack",
")",
"==",
"0",
":",
"qstack",
".",
"append",
"(",
"i",
")",
"notquoted_flag",
"=",
"False",
"# Find closing quote",
"elif",
"c",
"==",
"'\"'",
"and",
"bels",
"[",
"prior_char",
"]",
"!=",
"\"\\\\\"",
":",
"quotes",
"[",
"qstack",
".",
"pop",
"(",
")",
"]",
"=",
"i",
"notquoted_flag",
"=",
"True",
"# Find all escaped quotes outside of quoted string",
"elif",
"c",
"==",
"'\"'",
"and",
"bels",
"[",
"prior_char",
"]",
"==",
"\"\\\\\"",
"and",
"len",
"(",
"qstack",
")",
"==",
"0",
":",
"errors",
".",
"append",
"(",
"(",
"\"ERROR\"",
",",
"f\"Escaped quote outside of quoted string at location: {i - 1}\"",
",",
"(",
"i",
"-",
"1",
",",
"i",
"-",
"1",
")",
",",
")",
")",
"# Find all nested object opening parens",
"elif",
"notquoted_flag",
"and",
"c",
"==",
"\"(\"",
"and",
"bels",
"[",
"prior_char",
"]",
"==",
"\" \"",
":",
"if",
"len",
"(",
"nested_pstack",
")",
">",
"1",
":",
"errors",
".",
"append",
"(",
"(",
"\"ERROR\"",
",",
"f\"More than one nested parenthesis or left parenthesis following a space character\"",
",",
"(",
"i",
",",
"i",
")",
",",
")",
")",
"nested_pstack",
".",
"append",
"(",
"i",
")",
"# Find all opening parens",
"elif",
"notquoted_flag",
"and",
"c",
"==",
"\"(\"",
"and",
"bels",
"[",
"prior_char",
"]",
"not",
"in",
"[",
"\"\\\\\"",
"]",
":",
"pstack",
".",
"append",
"(",
"i",
")",
"# Find all closing parens",
"elif",
"notquoted_flag",
"and",
"c",
"==",
"\")\"",
"and",
"bels",
"[",
"prior_char",
"]",
"!=",
"\"\\\\\"",
":",
"if",
"len",
"(",
"pstack",
")",
":",
"if",
"len",
"(",
"pstack",
")",
">",
"1",
":",
"parens",
"[",
"pstack",
".",
"pop",
"(",
")",
"]",
"=",
"(",
"i",
",",
"\"child\"",
")",
"else",
":",
"parens",
"[",
"pstack",
".",
"pop",
"(",
")",
"]",
"=",
"(",
"i",
",",
"\"top\"",
")",
"elif",
"len",
"(",
"nested_pstack",
")",
":",
"nested_parens",
"[",
"nested_pstack",
".",
"pop",
"(",
")",
"]",
"=",
"(",
"i",
",",
"\"top\"",
")",
"else",
":",
"errors",
".",
"append",
"(",
"(",
"\"ERROR\"",
",",
"f\"Missing left parenthesis for right parenthesis at location {i}\"",
",",
"(",
"i",
",",
"i",
")",
",",
")",
")",
"# Find comma outside of quoted string",
"elif",
"notquoted_flag",
"and",
"c",
"==",
"\",\"",
"and",
"len",
"(",
"qstack",
")",
"==",
"0",
":",
"sparen",
"=",
"pstack",
"[",
"-",
"1",
"]",
"if",
"sparen",
"not",
"in",
"commas",
":",
"commas",
"[",
"sparen",
"]",
"=",
"[",
"i",
"]",
"else",
":",
"commas",
"[",
"sparen",
"]",
".",
"append",
"(",
"i",
")",
"while",
"len",
"(",
"pstack",
")",
":",
"errors",
".",
"append",
"(",
"(",
"\"ERROR\"",
",",
"f\"Missing right parenthesis for left parenthesis at location {pstack[-1]}\"",
",",
"(",
"pstack",
"[",
"-",
"1",
"]",
",",
"pstack",
"[",
"-",
"1",
"]",
")",
",",
")",
")",
"if",
"len",
"(",
"pstack",
")",
">",
"1",
":",
"parens",
"[",
"pstack",
".",
"pop",
"(",
")",
"]",
"=",
"(",
"-",
"1",
",",
"\"child\"",
")",
"else",
":",
"parens",
"[",
"pstack",
".",
"pop",
"(",
")",
"]",
"=",
"(",
"-",
"1",
",",
"\"top\"",
")",
"while",
"len",
"(",
"nested_pstack",
")",
":",
"errors",
".",
"append",
"(",
"(",
"\"ERROR\"",
",",
"f\"Missing right parenthesis for nested object left parenthesis at location {nested_pstack[-1]}\"",
",",
"(",
"nested_pstack",
"[",
"-",
"1",
"]",
",",
"nested_pstack",
"[",
"-",
"1",
"]",
")",
",",
")",
")",
"nested_parens",
"[",
"nested_pstack",
".",
"pop",
"(",
")",
"]",
"=",
"(",
"-",
"1",
",",
"\"top\"",
")",
"if",
"len",
"(",
"qstack",
")",
":",
"missing_quote",
"=",
"qstack",
".",
"pop",
"(",
")",
"errors",
".",
"append",
"(",
"(",
"\"ERROR\"",
",",
"f\"Missing right quote for left quote at location {missing_quote}\"",
",",
"(",
"missing_quote",
",",
"missing_quote",
")",
",",
")",
")",
"return",
"(",
"{",
"\"parens\"",
":",
"parens",
",",
"\"nested_parens\"",
":",
"nested_parens",
",",
"\"quotes\"",
":",
"quotes",
",",
"\"commas\"",
":",
"commas",
",",
"}",
",",
"errors",
",",
")"
] | Scan BEL string to map parens, quotes, commas
Args:
bels: bel string as an array of characters
errors: list of error tuples ('<type>', '<msg>')
Returns:
(char_locs, errors): character locations and errors | [
"Scan",
"BEL",
"string",
"to",
"map",
"parens",
"quotes",
"commas"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L110-L232 |
belbio/bel | bel/lang/partialparse.py | parse_functions | def parse_functions(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
"""Parse functions from BEL using paren, comma, quote character locations
Args:
bels: BEL string as list of chars
char_locs: paren, comma, quote character locations
errors: Any error messages generated during the parse
Returns:
(functions, errors): function names and locations and error messages
"""
parens = char_locs["parens"]
# Handle partial top-level function name
if not parens:
bels_len = len(bels) - 1
span = (0, bels_len)
parsed[span] = {
"name": "".join(bels),
"type": "Function",
"span": span,
"name_span": (span),
"function_level": "top",
}
return parsed, errors
for sp in sorted(parens): # sp = starting paren, ep = ending_paren
ep, function_level = parens[sp]
# Functions can't have a space between function name and left paren
if bels[sp - 1] == " ":
continue
# look in front of start paren for function name
for i in range(sp - 1, 0, -1):
if bels[i] in [" ", ",", "("]: # function name upstream boundary chars
if i < sp - 1:
if ep == -1:
span = (i + 1, len(bels) - 1)
else:
span = (i + 1, ep)
parsed[span] = {
"name": "".join(bels[i + 1 : sp]),
"type": "Function",
"span": span,
"name_span": (i + 1, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
break
else:
if ep == -1:
span = (0, len(bels) - 1)
else:
span = (0, ep)
parsed[span] = {
"name": "".join(bels[0:sp]),
"type": "Function",
"span": span,
"name_span": (0, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
return parsed, errors | python | def parse_functions(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
"""Parse functions from BEL using paren, comma, quote character locations
Args:
bels: BEL string as list of chars
char_locs: paren, comma, quote character locations
errors: Any error messages generated during the parse
Returns:
(functions, errors): function names and locations and error messages
"""
parens = char_locs["parens"]
# Handle partial top-level function name
if not parens:
bels_len = len(bels) - 1
span = (0, bels_len)
parsed[span] = {
"name": "".join(bels),
"type": "Function",
"span": span,
"name_span": (span),
"function_level": "top",
}
return parsed, errors
for sp in sorted(parens): # sp = starting paren, ep = ending_paren
ep, function_level = parens[sp]
# Functions can't have a space between function name and left paren
if bels[sp - 1] == " ":
continue
# look in front of start paren for function name
for i in range(sp - 1, 0, -1):
if bels[i] in [" ", ",", "("]: # function name upstream boundary chars
if i < sp - 1:
if ep == -1:
span = (i + 1, len(bels) - 1)
else:
span = (i + 1, ep)
parsed[span] = {
"name": "".join(bels[i + 1 : sp]),
"type": "Function",
"span": span,
"name_span": (i + 1, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
break
else:
if ep == -1:
span = (0, len(bels) - 1)
else:
span = (0, ep)
parsed[span] = {
"name": "".join(bels[0:sp]),
"type": "Function",
"span": span,
"name_span": (0, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
return parsed, errors | [
"def",
"parse_functions",
"(",
"bels",
":",
"list",
",",
"char_locs",
":",
"CharLocs",
",",
"parsed",
":",
"Parsed",
",",
"errors",
":",
"Errors",
")",
"->",
"Tuple",
"[",
"Parsed",
",",
"Errors",
"]",
":",
"parens",
"=",
"char_locs",
"[",
"\"parens\"",
"]",
"# Handle partial top-level function name",
"if",
"not",
"parens",
":",
"bels_len",
"=",
"len",
"(",
"bels",
")",
"-",
"1",
"span",
"=",
"(",
"0",
",",
"bels_len",
")",
"parsed",
"[",
"span",
"]",
"=",
"{",
"\"name\"",
":",
"\"\"",
".",
"join",
"(",
"bels",
")",
",",
"\"type\"",
":",
"\"Function\"",
",",
"\"span\"",
":",
"span",
",",
"\"name_span\"",
":",
"(",
"span",
")",
",",
"\"function_level\"",
":",
"\"top\"",
",",
"}",
"return",
"parsed",
",",
"errors",
"for",
"sp",
"in",
"sorted",
"(",
"parens",
")",
":",
"# sp = starting paren, ep = ending_paren",
"ep",
",",
"function_level",
"=",
"parens",
"[",
"sp",
"]",
"# Functions can't have a space between function name and left paren",
"if",
"bels",
"[",
"sp",
"-",
"1",
"]",
"==",
"\" \"",
":",
"continue",
"# look in front of start paren for function name",
"for",
"i",
"in",
"range",
"(",
"sp",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"if",
"bels",
"[",
"i",
"]",
"in",
"[",
"\" \"",
",",
"\",\"",
",",
"\"(\"",
"]",
":",
"# function name upstream boundary chars",
"if",
"i",
"<",
"sp",
"-",
"1",
":",
"if",
"ep",
"==",
"-",
"1",
":",
"span",
"=",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"bels",
")",
"-",
"1",
")",
"else",
":",
"span",
"=",
"(",
"i",
"+",
"1",
",",
"ep",
")",
"parsed",
"[",
"span",
"]",
"=",
"{",
"\"name\"",
":",
"\"\"",
".",
"join",
"(",
"bels",
"[",
"i",
"+",
"1",
":",
"sp",
"]",
")",
",",
"\"type\"",
":",
"\"Function\"",
",",
"\"span\"",
":",
"span",
",",
"\"name_span\"",
":",
"(",
"i",
"+",
"1",
",",
"sp",
"-",
"1",
")",
",",
"\"parens_span\"",
":",
"(",
"sp",
",",
"ep",
")",
",",
"\"function_level\"",
":",
"function_level",
",",
"}",
"break",
"else",
":",
"if",
"ep",
"==",
"-",
"1",
":",
"span",
"=",
"(",
"0",
",",
"len",
"(",
"bels",
")",
"-",
"1",
")",
"else",
":",
"span",
"=",
"(",
"0",
",",
"ep",
")",
"parsed",
"[",
"span",
"]",
"=",
"{",
"\"name\"",
":",
"\"\"",
".",
"join",
"(",
"bels",
"[",
"0",
":",
"sp",
"]",
")",
",",
"\"type\"",
":",
"\"Function\"",
",",
"\"span\"",
":",
"span",
",",
"\"name_span\"",
":",
"(",
"0",
",",
"sp",
"-",
"1",
")",
",",
"\"parens_span\"",
":",
"(",
"sp",
",",
"ep",
")",
",",
"\"function_level\"",
":",
"function_level",
",",
"}",
"return",
"parsed",
",",
"errors"
] | Parse functions from BEL using paren, comma, quote character locations
Args:
bels: BEL string as list of chars
char_locs: paren, comma, quote character locations
errors: Any error messages generated during the parse
Returns:
(functions, errors): function names and locations and error messages | [
"Parse",
"functions",
"from",
"BEL",
"using",
"paren",
"comma",
"quote",
"character",
"locations"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L235-L303 |
belbio/bel | bel/lang/partialparse.py | parse_args | def parse_args(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
"""Parse arguments from functions
Args:
bels: BEL string as list of chars
char_locs: char locations for parens, commas and quotes
parsed: function locations
errors: error messages
Returns:
(functions, errors): function and arg locations plus error messages
"""
commas = char_locs["commas"]
# Process each span key in parsed from beginning
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue # Skip if not argument-less
sp, ep = parsed[span]["parens_span"]
# calculate args_end position
if ep == -1: # supports bel completion
args_end = len(bels) - 1 # 1
else:
args_end = ep - 1 # 1
# Parse arguments
args = []
arg_start = sp + 1
each_arg_end_list = sorted([end - 1 for end in commas.get(sp, [])] + [args_end])
for arg_end in each_arg_end_list:
# log.debug(f'Arg_start: {arg_start} Arg_end: {arg_end}')
# Skip blanks at beginning of argument
while arg_start < args_end and bels[arg_start] == " ":
arg_start += 1
# Trim arg_end (e.g. HGNC:AKT1 , HGNC:EGF) - if there are spaces before comma
trimmed_arg_end = arg_end
while trimmed_arg_end > arg_start and bels[trimmed_arg_end] == " ":
trimmed_arg_end -= 1
if trimmed_arg_end < arg_start:
trimmed_arg_end = arg_start
arg = "".join(bels[arg_start : trimmed_arg_end + 1])
# log.debug(f'Adding arg to args: {arg_start} {trimmed_arg_end}')
args.append({"arg": arg, "span": (arg_start, trimmed_arg_end)})
arg_start = arg_end + 2
parsed[span]["args"] = args
return parsed, errors | python | def parse_args(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
"""Parse arguments from functions
Args:
bels: BEL string as list of chars
char_locs: char locations for parens, commas and quotes
parsed: function locations
errors: error messages
Returns:
(functions, errors): function and arg locations plus error messages
"""
commas = char_locs["commas"]
# Process each span key in parsed from beginning
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue # Skip if not argument-less
sp, ep = parsed[span]["parens_span"]
# calculate args_end position
if ep == -1: # supports bel completion
args_end = len(bels) - 1 # 1
else:
args_end = ep - 1 # 1
# Parse arguments
args = []
arg_start = sp + 1
each_arg_end_list = sorted([end - 1 for end in commas.get(sp, [])] + [args_end])
for arg_end in each_arg_end_list:
# log.debug(f'Arg_start: {arg_start} Arg_end: {arg_end}')
# Skip blanks at beginning of argument
while arg_start < args_end and bels[arg_start] == " ":
arg_start += 1
# Trim arg_end (e.g. HGNC:AKT1 , HGNC:EGF) - if there are spaces before comma
trimmed_arg_end = arg_end
while trimmed_arg_end > arg_start and bels[trimmed_arg_end] == " ":
trimmed_arg_end -= 1
if trimmed_arg_end < arg_start:
trimmed_arg_end = arg_start
arg = "".join(bels[arg_start : trimmed_arg_end + 1])
# log.debug(f'Adding arg to args: {arg_start} {trimmed_arg_end}')
args.append({"arg": arg, "span": (arg_start, trimmed_arg_end)})
arg_start = arg_end + 2
parsed[span]["args"] = args
return parsed, errors | [
"def",
"parse_args",
"(",
"bels",
":",
"list",
",",
"char_locs",
":",
"CharLocs",
",",
"parsed",
":",
"Parsed",
",",
"errors",
":",
"Errors",
")",
"->",
"Tuple",
"[",
"Parsed",
",",
"Errors",
"]",
":",
"commas",
"=",
"char_locs",
"[",
"\"commas\"",
"]",
"# Process each span key in parsed from beginning",
"for",
"span",
"in",
"parsed",
":",
"if",
"parsed",
"[",
"span",
"]",
"[",
"\"type\"",
"]",
"!=",
"\"Function\"",
"or",
"\"parens_span\"",
"not",
"in",
"parsed",
"[",
"span",
"]",
":",
"continue",
"# Skip if not argument-less",
"sp",
",",
"ep",
"=",
"parsed",
"[",
"span",
"]",
"[",
"\"parens_span\"",
"]",
"# calculate args_end position",
"if",
"ep",
"==",
"-",
"1",
":",
"# supports bel completion",
"args_end",
"=",
"len",
"(",
"bels",
")",
"-",
"1",
"# 1",
"else",
":",
"args_end",
"=",
"ep",
"-",
"1",
"# 1",
"# Parse arguments",
"args",
"=",
"[",
"]",
"arg_start",
"=",
"sp",
"+",
"1",
"each_arg_end_list",
"=",
"sorted",
"(",
"[",
"end",
"-",
"1",
"for",
"end",
"in",
"commas",
".",
"get",
"(",
"sp",
",",
"[",
"]",
")",
"]",
"+",
"[",
"args_end",
"]",
")",
"for",
"arg_end",
"in",
"each_arg_end_list",
":",
"# log.debug(f'Arg_start: {arg_start} Arg_end: {arg_end}')",
"# Skip blanks at beginning of argument",
"while",
"arg_start",
"<",
"args_end",
"and",
"bels",
"[",
"arg_start",
"]",
"==",
"\" \"",
":",
"arg_start",
"+=",
"1",
"# Trim arg_end (e.g. HGNC:AKT1 , HGNC:EGF) - if there are spaces before comma",
"trimmed_arg_end",
"=",
"arg_end",
"while",
"trimmed_arg_end",
">",
"arg_start",
"and",
"bels",
"[",
"trimmed_arg_end",
"]",
"==",
"\" \"",
":",
"trimmed_arg_end",
"-=",
"1",
"if",
"trimmed_arg_end",
"<",
"arg_start",
":",
"trimmed_arg_end",
"=",
"arg_start",
"arg",
"=",
"\"\"",
".",
"join",
"(",
"bels",
"[",
"arg_start",
":",
"trimmed_arg_end",
"+",
"1",
"]",
")",
"# log.debug(f'Adding arg to args: {arg_start} {trimmed_arg_end}')",
"args",
".",
"append",
"(",
"{",
"\"arg\"",
":",
"arg",
",",
"\"span\"",
":",
"(",
"arg_start",
",",
"trimmed_arg_end",
")",
"}",
")",
"arg_start",
"=",
"arg_end",
"+",
"2",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
"=",
"args",
"return",
"parsed",
",",
"errors"
] | Parse arguments from functions
Args:
bels: BEL string as list of chars
char_locs: char locations for parens, commas and quotes
parsed: function locations
errors: error messages
Returns:
(functions, errors): function and arg locations plus error messages | [
"Parse",
"arguments",
"from",
"functions"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L306-L362 |
belbio/bel | bel/lang/partialparse.py | arg_types | def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]:
"""Add argument types to parsed function data structure
Args:
parsed: function and arg locations in BEL string
errors: error messages
Returns:
(parsed, errors): parsed, arguments with arg types plus error messages
"""
func_pattern = re.compile(r"\s*[a-zA-Z]+\(")
nsarg_pattern = re.compile(r"^\s*([A-Z]+):(.*?)\s*$")
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue
for i, arg in enumerate(parsed[span]["args"]):
nsarg_matches = nsarg_pattern.match(arg["arg"])
if func_pattern.match(arg["arg"]):
parsed[span]["args"][i].update({"type": "Function"})
elif nsarg_matches:
(start, end) = arg["span"]
ns = nsarg_matches.group(1)
ns_val = nsarg_matches.group(2)
ns_span = nsarg_matches.span(1)
ns_span = (ns_span[0] + start, ns_span[1] + start - 1)
ns_val_span = nsarg_matches.span(2)
ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1)
parsed[span]["args"][i].update(
{
"type": "NSArg",
"ns": ns,
"ns_span": ns_span,
"ns_val": ns_val,
"ns_val_span": ns_val_span,
}
)
else:
parsed[span]["args"][i].update({"type": "StrArg"})
return parsed, errors | python | def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]:
"""Add argument types to parsed function data structure
Args:
parsed: function and arg locations in BEL string
errors: error messages
Returns:
(parsed, errors): parsed, arguments with arg types plus error messages
"""
func_pattern = re.compile(r"\s*[a-zA-Z]+\(")
nsarg_pattern = re.compile(r"^\s*([A-Z]+):(.*?)\s*$")
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue
for i, arg in enumerate(parsed[span]["args"]):
nsarg_matches = nsarg_pattern.match(arg["arg"])
if func_pattern.match(arg["arg"]):
parsed[span]["args"][i].update({"type": "Function"})
elif nsarg_matches:
(start, end) = arg["span"]
ns = nsarg_matches.group(1)
ns_val = nsarg_matches.group(2)
ns_span = nsarg_matches.span(1)
ns_span = (ns_span[0] + start, ns_span[1] + start - 1)
ns_val_span = nsarg_matches.span(2)
ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1)
parsed[span]["args"][i].update(
{
"type": "NSArg",
"ns": ns,
"ns_span": ns_span,
"ns_val": ns_val,
"ns_val_span": ns_val_span,
}
)
else:
parsed[span]["args"][i].update({"type": "StrArg"})
return parsed, errors | [
"def",
"arg_types",
"(",
"parsed",
":",
"Parsed",
",",
"errors",
":",
"Errors",
")",
"->",
"Tuple",
"[",
"Parsed",
",",
"Errors",
"]",
":",
"func_pattern",
"=",
"re",
".",
"compile",
"(",
"r\"\\s*[a-zA-Z]+\\(\"",
")",
"nsarg_pattern",
"=",
"re",
".",
"compile",
"(",
"r\"^\\s*([A-Z]+):(.*?)\\s*$\"",
")",
"for",
"span",
"in",
"parsed",
":",
"if",
"parsed",
"[",
"span",
"]",
"[",
"\"type\"",
"]",
"!=",
"\"Function\"",
"or",
"\"parens_span\"",
"not",
"in",
"parsed",
"[",
"span",
"]",
":",
"continue",
"for",
"i",
",",
"arg",
"in",
"enumerate",
"(",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
")",
":",
"nsarg_matches",
"=",
"nsarg_pattern",
".",
"match",
"(",
"arg",
"[",
"\"arg\"",
"]",
")",
"if",
"func_pattern",
".",
"match",
"(",
"arg",
"[",
"\"arg\"",
"]",
")",
":",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
"[",
"i",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"Function\"",
"}",
")",
"elif",
"nsarg_matches",
":",
"(",
"start",
",",
"end",
")",
"=",
"arg",
"[",
"\"span\"",
"]",
"ns",
"=",
"nsarg_matches",
".",
"group",
"(",
"1",
")",
"ns_val",
"=",
"nsarg_matches",
".",
"group",
"(",
"2",
")",
"ns_span",
"=",
"nsarg_matches",
".",
"span",
"(",
"1",
")",
"ns_span",
"=",
"(",
"ns_span",
"[",
"0",
"]",
"+",
"start",
",",
"ns_span",
"[",
"1",
"]",
"+",
"start",
"-",
"1",
")",
"ns_val_span",
"=",
"nsarg_matches",
".",
"span",
"(",
"2",
")",
"ns_val_span",
"=",
"(",
"ns_val_span",
"[",
"0",
"]",
"+",
"start",
",",
"ns_val_span",
"[",
"1",
"]",
"+",
"start",
"-",
"1",
")",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
"[",
"i",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"NSArg\"",
",",
"\"ns\"",
":",
"ns",
",",
"\"ns_span\"",
":",
"ns_span",
",",
"\"ns_val\"",
":",
"ns_val",
",",
"\"ns_val_span\"",
":",
"ns_val_span",
",",
"}",
")",
"else",
":",
"parsed",
"[",
"span",
"]",
"[",
"\"args\"",
"]",
"[",
"i",
"]",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"StrArg\"",
"}",
")",
"return",
"parsed",
",",
"errors"
] | Add argument types to parsed function data structure
Args:
parsed: function and arg locations in BEL string
errors: error messages
Returns:
(parsed, errors): parsed, arguments with arg types plus error messages | [
"Add",
"argument",
"types",
"to",
"parsed",
"function",
"data",
"structure"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L365-L408 |
belbio/bel | bel/lang/partialparse.py | parse_relations | def parse_relations(
belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
"""Parse relations from BEL string
Args:
belstr: BEL string as one single string (not list of chars)
char_locs: paren, comma and quote char locations
parsed: data structure for parsed functions, relations, nested
errors: error messages
Returns:
(parsed, errors):
"""
quotes = char_locs["quotes"]
quoted_range = set([i for start, end in quotes.items() for i in range(start, end)])
for match in relations_pattern_middle.finditer(belstr):
(start, end) = match.span(1)
# log.debug(f'Relation-middle {match}')
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
for match in relations_pattern_end.finditer(belstr):
(start, end) = match.span(1)
log.debug(f"Relation-end {match}")
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
return parsed, errors | python | def parse_relations(
belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
"""Parse relations from BEL string
Args:
belstr: BEL string as one single string (not list of chars)
char_locs: paren, comma and quote char locations
parsed: data structure for parsed functions, relations, nested
errors: error messages
Returns:
(parsed, errors):
"""
quotes = char_locs["quotes"]
quoted_range = set([i for start, end in quotes.items() for i in range(start, end)])
for match in relations_pattern_middle.finditer(belstr):
(start, end) = match.span(1)
# log.debug(f'Relation-middle {match}')
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
for match in relations_pattern_end.finditer(belstr):
(start, end) = match.span(1)
log.debug(f"Relation-end {match}")
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
return parsed, errors | [
"def",
"parse_relations",
"(",
"belstr",
":",
"str",
",",
"char_locs",
":",
"CharLocs",
",",
"parsed",
":",
"Parsed",
",",
"errors",
":",
"Errors",
")",
"->",
"Tuple",
"[",
"Parsed",
",",
"Errors",
"]",
":",
"quotes",
"=",
"char_locs",
"[",
"\"quotes\"",
"]",
"quoted_range",
"=",
"set",
"(",
"[",
"i",
"for",
"start",
",",
"end",
"in",
"quotes",
".",
"items",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"end",
")",
"]",
")",
"for",
"match",
"in",
"relations_pattern_middle",
".",
"finditer",
"(",
"belstr",
")",
":",
"(",
"start",
",",
"end",
")",
"=",
"match",
".",
"span",
"(",
"1",
")",
"# log.debug(f'Relation-middle {match}')",
"end",
"=",
"end",
"-",
"1",
"# adjust end to match actual end character index",
"if",
"start",
"!=",
"end",
":",
"test_range",
"=",
"set",
"(",
"range",
"(",
"start",
",",
"end",
")",
")",
"else",
":",
"test_range",
"=",
"set",
"(",
"start",
")",
"# Skip if relation overlaps with quoted string",
"if",
"test_range",
".",
"intersection",
"(",
"quoted_range",
")",
":",
"continue",
"span_key",
"=",
"(",
"start",
",",
"end",
")",
"parsed",
"[",
"span_key",
"]",
"=",
"{",
"\"type\"",
":",
"\"Relation\"",
",",
"\"name\"",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"\"span\"",
":",
"(",
"start",
",",
"end",
")",
",",
"}",
"for",
"match",
"in",
"relations_pattern_end",
".",
"finditer",
"(",
"belstr",
")",
":",
"(",
"start",
",",
"end",
")",
"=",
"match",
".",
"span",
"(",
"1",
")",
"log",
".",
"debug",
"(",
"f\"Relation-end {match}\"",
")",
"end",
"=",
"end",
"-",
"1",
"# adjust end to match actual end character index",
"if",
"start",
"!=",
"end",
":",
"test_range",
"=",
"set",
"(",
"range",
"(",
"start",
",",
"end",
")",
")",
"else",
":",
"test_range",
"=",
"set",
"(",
"start",
")",
"# Skip if relation overlaps with quoted string",
"if",
"test_range",
".",
"intersection",
"(",
"quoted_range",
")",
":",
"continue",
"span_key",
"=",
"(",
"start",
",",
"end",
")",
"parsed",
"[",
"span_key",
"]",
"=",
"{",
"\"type\"",
":",
"\"Relation\"",
",",
"\"name\"",
":",
"match",
".",
"group",
"(",
"1",
")",
",",
"\"span\"",
":",
"(",
"start",
",",
"end",
")",
",",
"}",
"return",
"parsed",
",",
"errors"
] | Parse relations from BEL string
Args:
belstr: BEL string as one single string (not list of chars)
char_locs: paren, comma and quote char locations
parsed: data structure for parsed functions, relations, nested
errors: error messages
Returns:
(parsed, errors): | [
"Parse",
"relations",
"from",
"BEL",
"string"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L411-L468 |
belbio/bel | bel/lang/partialparse.py | parse_nested | def parse_nested(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
""" Parse nested BEL object """
for sp in char_locs[
"nested_parens"
]: # sp = start parenthesis, ep = end parenthesis
ep, level = char_locs["nested_parens"][sp]
if ep == -1:
ep = len(bels) + 1
parsed[(sp, ep)] = {"type": "Nested", "span": (sp, ep)}
return parsed, errors | python | def parse_nested(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
""" Parse nested BEL object """
for sp in char_locs[
"nested_parens"
]: # sp = start parenthesis, ep = end parenthesis
ep, level = char_locs["nested_parens"][sp]
if ep == -1:
ep = len(bels) + 1
parsed[(sp, ep)] = {"type": "Nested", "span": (sp, ep)}
return parsed, errors | [
"def",
"parse_nested",
"(",
"bels",
":",
"list",
",",
"char_locs",
":",
"CharLocs",
",",
"parsed",
":",
"Parsed",
",",
"errors",
":",
"Errors",
")",
"->",
"Tuple",
"[",
"Parsed",
",",
"Errors",
"]",
":",
"for",
"sp",
"in",
"char_locs",
"[",
"\"nested_parens\"",
"]",
":",
"# sp = start parenthesis, ep = end parenthesis",
"ep",
",",
"level",
"=",
"char_locs",
"[",
"\"nested_parens\"",
"]",
"[",
"sp",
"]",
"if",
"ep",
"==",
"-",
"1",
":",
"ep",
"=",
"len",
"(",
"bels",
")",
"+",
"1",
"parsed",
"[",
"(",
"sp",
",",
"ep",
")",
"]",
"=",
"{",
"\"type\"",
":",
"\"Nested\"",
",",
"\"span\"",
":",
"(",
"sp",
",",
"ep",
")",
"}",
"return",
"parsed",
",",
"errors"
] | Parse nested BEL object | [
"Parse",
"nested",
"BEL",
"object"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L471-L484 |
belbio/bel | bel/lang/partialparse.py | dump_json | def dump_json(d: dict) -> None:
"""Dump json when using tuples for dictionary keys
Have to convert tuples to strings to dump out as json
"""
import json
k = d.keys()
v = d.values()
k1 = [str(i) for i in k]
return json.dumps(dict(zip(*[k1, v])), indent=4) | python | def dump_json(d: dict) -> None:
"""Dump json when using tuples for dictionary keys
Have to convert tuples to strings to dump out as json
"""
import json
k = d.keys()
v = d.values()
k1 = [str(i) for i in k]
return json.dumps(dict(zip(*[k1, v])), indent=4) | [
"def",
"dump_json",
"(",
"d",
":",
"dict",
")",
"->",
"None",
":",
"import",
"json",
"k",
"=",
"d",
".",
"keys",
"(",
")",
"v",
"=",
"d",
".",
"values",
"(",
")",
"k1",
"=",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"k",
"]",
"return",
"json",
".",
"dumps",
"(",
"dict",
"(",
"zip",
"(",
"*",
"[",
"k1",
",",
"v",
"]",
")",
")",
",",
"indent",
"=",
"4",
")"
] | Dump json when using tuples for dictionary keys
Have to convert tuples to strings to dump out as json | [
"Dump",
"json",
"when",
"using",
"tuples",
"for",
"dictionary",
"keys"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L487-L499 |
belbio/bel | bel/lang/partialparse.py | collect_spans | def collect_spans(ast: AST) -> List[Tuple[str, Tuple[int, int]]]:
"""Collect flattened list of spans of BEL syntax types
Provide simple list of BEL syntax type spans for highlighting.
Function names, NSargs, NS prefix, NS value and StrArgs will be
tagged.
Args:
ast: AST of BEL assertion
Returns:
List[Tuple[str, Tuple[int, int]]]: list of span objects (<type>, (<start>, <end>))
"""
spans = []
if ast.get("subject", False):
spans.extend(collect_spans(ast["subject"]))
if ast.get("object", False):
spans.extend(collect_spans(ast["object"]))
if ast.get("nested", False):
spans.extend(collect_spans(ast["nested"]))
if ast.get("function", False):
log.debug(f"Processing function")
spans.append(("Function", ast["function"]["name_span"]))
log.debug(f"Spans: {spans}")
if ast.get("args", False):
for idx, arg in enumerate(ast["args"]):
log.debug(f"Arg {arg}")
if arg.get("function", False):
log.debug(f"Recursing on arg function")
results = collect_spans(arg)
log.debug(f"Results {results}")
spans.extend(results) # Recurse arg function
elif arg.get("nsarg", False):
log.debug(f"Processing NSArg Arg {arg}")
spans.append(("NSArg", arg["span"]))
spans.append(("NSPrefix", arg["nsarg"]["ns_span"]))
spans.append(("NSVal", arg["nsarg"]["ns_val_span"]))
elif arg["type"] == "StrArg":
spans.append(("StrArg", arg["span"]))
log.debug(f"Spans: {spans}")
return spans | python | def collect_spans(ast: AST) -> List[Tuple[str, Tuple[int, int]]]:
"""Collect flattened list of spans of BEL syntax types
Provide simple list of BEL syntax type spans for highlighting.
Function names, NSargs, NS prefix, NS value and StrArgs will be
tagged.
Args:
ast: AST of BEL assertion
Returns:
List[Tuple[str, Tuple[int, int]]]: list of span objects (<type>, (<start>, <end>))
"""
spans = []
if ast.get("subject", False):
spans.extend(collect_spans(ast["subject"]))
if ast.get("object", False):
spans.extend(collect_spans(ast["object"]))
if ast.get("nested", False):
spans.extend(collect_spans(ast["nested"]))
if ast.get("function", False):
log.debug(f"Processing function")
spans.append(("Function", ast["function"]["name_span"]))
log.debug(f"Spans: {spans}")
if ast.get("args", False):
for idx, arg in enumerate(ast["args"]):
log.debug(f"Arg {arg}")
if arg.get("function", False):
log.debug(f"Recursing on arg function")
results = collect_spans(arg)
log.debug(f"Results {results}")
spans.extend(results) # Recurse arg function
elif arg.get("nsarg", False):
log.debug(f"Processing NSArg Arg {arg}")
spans.append(("NSArg", arg["span"]))
spans.append(("NSPrefix", arg["nsarg"]["ns_span"]))
spans.append(("NSVal", arg["nsarg"]["ns_val_span"]))
elif arg["type"] == "StrArg":
spans.append(("StrArg", arg["span"]))
log.debug(f"Spans: {spans}")
return spans | [
"def",
"collect_spans",
"(",
"ast",
":",
"AST",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"Tuple",
"[",
"int",
",",
"int",
"]",
"]",
"]",
":",
"spans",
"=",
"[",
"]",
"if",
"ast",
".",
"get",
"(",
"\"subject\"",
",",
"False",
")",
":",
"spans",
".",
"extend",
"(",
"collect_spans",
"(",
"ast",
"[",
"\"subject\"",
"]",
")",
")",
"if",
"ast",
".",
"get",
"(",
"\"object\"",
",",
"False",
")",
":",
"spans",
".",
"extend",
"(",
"collect_spans",
"(",
"ast",
"[",
"\"object\"",
"]",
")",
")",
"if",
"ast",
".",
"get",
"(",
"\"nested\"",
",",
"False",
")",
":",
"spans",
".",
"extend",
"(",
"collect_spans",
"(",
"ast",
"[",
"\"nested\"",
"]",
")",
")",
"if",
"ast",
".",
"get",
"(",
"\"function\"",
",",
"False",
")",
":",
"log",
".",
"debug",
"(",
"f\"Processing function\"",
")",
"spans",
".",
"append",
"(",
"(",
"\"Function\"",
",",
"ast",
"[",
"\"function\"",
"]",
"[",
"\"name_span\"",
"]",
")",
")",
"log",
".",
"debug",
"(",
"f\"Spans: {spans}\"",
")",
"if",
"ast",
".",
"get",
"(",
"\"args\"",
",",
"False",
")",
":",
"for",
"idx",
",",
"arg",
"in",
"enumerate",
"(",
"ast",
"[",
"\"args\"",
"]",
")",
":",
"log",
".",
"debug",
"(",
"f\"Arg {arg}\"",
")",
"if",
"arg",
".",
"get",
"(",
"\"function\"",
",",
"False",
")",
":",
"log",
".",
"debug",
"(",
"f\"Recursing on arg function\"",
")",
"results",
"=",
"collect_spans",
"(",
"arg",
")",
"log",
".",
"debug",
"(",
"f\"Results {results}\"",
")",
"spans",
".",
"extend",
"(",
"results",
")",
"# Recurse arg function",
"elif",
"arg",
".",
"get",
"(",
"\"nsarg\"",
",",
"False",
")",
":",
"log",
".",
"debug",
"(",
"f\"Processing NSArg Arg {arg}\"",
")",
"spans",
".",
"append",
"(",
"(",
"\"NSArg\"",
",",
"arg",
"[",
"\"span\"",
"]",
")",
")",
"spans",
".",
"append",
"(",
"(",
"\"NSPrefix\"",
",",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns_span\"",
"]",
")",
")",
"spans",
".",
"append",
"(",
"(",
"\"NSVal\"",
",",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns_val_span\"",
"]",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"StrArg\"",
":",
"spans",
".",
"append",
"(",
"(",
"\"StrArg\"",
",",
"arg",
"[",
"\"span\"",
"]",
")",
")",
"log",
".",
"debug",
"(",
"f\"Spans: {spans}\"",
")",
"return",
"spans"
] | Collect flattened list of spans of BEL syntax types
Provide simple list of BEL syntax type spans for highlighting.
Function names, NSargs, NS prefix, NS value and StrArgs will be
tagged.
Args:
ast: AST of BEL assertion
Returns:
List[Tuple[str, Tuple[int, int]]]: list of span objects (<type>, (<start>, <end>)) | [
"Collect",
"flattened",
"list",
"of",
"spans",
"of",
"BEL",
"syntax",
"types"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L502-L550 |
belbio/bel | bel/lang/partialparse.py | print_spans | def print_spans(spans, max_idx: int) -> None:
"""Quick test to show how character spans match original BEL String
Mostly for debugging purposes
"""
bel_spans = [" "] * (max_idx + 3)
for val, span in spans:
if val in ["Nested", "NSArg"]:
continue
for i in range(span[0], span[1] + 1):
bel_spans[i] = val[0]
# print(''.join(bel_spans))
# Add second layer for Nested Objects if available
bel_spans = [" "] * (max_idx + 3)
for val, span in spans:
if val not in ["Nested"]:
continue
for i in range(span[0], span[1] + 1):
bel_spans[i] = val[0] | python | def print_spans(spans, max_idx: int) -> None:
"""Quick test to show how character spans match original BEL String
Mostly for debugging purposes
"""
bel_spans = [" "] * (max_idx + 3)
for val, span in spans:
if val in ["Nested", "NSArg"]:
continue
for i in range(span[0], span[1] + 1):
bel_spans[i] = val[0]
# print(''.join(bel_spans))
# Add second layer for Nested Objects if available
bel_spans = [" "] * (max_idx + 3)
for val, span in spans:
if val not in ["Nested"]:
continue
for i in range(span[0], span[1] + 1):
bel_spans[i] = val[0] | [
"def",
"print_spans",
"(",
"spans",
",",
"max_idx",
":",
"int",
")",
"->",
"None",
":",
"bel_spans",
"=",
"[",
"\" \"",
"]",
"*",
"(",
"max_idx",
"+",
"3",
")",
"for",
"val",
",",
"span",
"in",
"spans",
":",
"if",
"val",
"in",
"[",
"\"Nested\"",
",",
"\"NSArg\"",
"]",
":",
"continue",
"for",
"i",
"in",
"range",
"(",
"span",
"[",
"0",
"]",
",",
"span",
"[",
"1",
"]",
"+",
"1",
")",
":",
"bel_spans",
"[",
"i",
"]",
"=",
"val",
"[",
"0",
"]",
"# print(''.join(bel_spans))",
"# Add second layer for Nested Objects if available",
"bel_spans",
"=",
"[",
"\" \"",
"]",
"*",
"(",
"max_idx",
"+",
"3",
")",
"for",
"val",
",",
"span",
"in",
"spans",
":",
"if",
"val",
"not",
"in",
"[",
"\"Nested\"",
"]",
":",
"continue",
"for",
"i",
"in",
"range",
"(",
"span",
"[",
"0",
"]",
",",
"span",
"[",
"1",
"]",
"+",
"1",
")",
":",
"bel_spans",
"[",
"i",
"]",
"=",
"val",
"[",
"0",
"]"
] | Quick test to show how character spans match original BEL String
Mostly for debugging purposes | [
"Quick",
"test",
"to",
"show",
"how",
"character",
"spans",
"match",
"original",
"BEL",
"String"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L577-L598 |
belbio/bel | bel/lang/partialparse.py | parsed_function_to_ast | def parsed_function_to_ast(parsed: Parsed, parsed_key):
"""Create AST for top-level functions"""
sub = parsed[parsed_key]
subtree = {
"type": "Function",
"span": sub["span"],
"function": {
"name": sub["name"],
"name_span": sub["name_span"],
"parens_span": sub.get("parens_span", []),
},
}
args = []
for arg in parsed[parsed_key].get("args", []):
# pdb.set_trace()
if arg["type"] == "Function":
args.append(parsed_function_to_ast(parsed, arg["span"]))
elif arg["type"] == "NSArg":
args.append(
{
"arg": arg["arg"],
"type": arg["type"],
"span": arg["span"],
"nsarg": {
"ns": arg["ns"],
"ns_val": arg["ns_val"],
"ns_span": arg["ns_span"],
"ns_val_span": arg["ns_val_span"],
},
}
)
elif arg["type"] == "StrArg":
args.append({"arg": arg["arg"], "type": arg["type"], "span": arg["span"]})
subtree["args"] = copy.deepcopy(args)
return subtree | python | def parsed_function_to_ast(parsed: Parsed, parsed_key):
"""Create AST for top-level functions"""
sub = parsed[parsed_key]
subtree = {
"type": "Function",
"span": sub["span"],
"function": {
"name": sub["name"],
"name_span": sub["name_span"],
"parens_span": sub.get("parens_span", []),
},
}
args = []
for arg in parsed[parsed_key].get("args", []):
# pdb.set_trace()
if arg["type"] == "Function":
args.append(parsed_function_to_ast(parsed, arg["span"]))
elif arg["type"] == "NSArg":
args.append(
{
"arg": arg["arg"],
"type": arg["type"],
"span": arg["span"],
"nsarg": {
"ns": arg["ns"],
"ns_val": arg["ns_val"],
"ns_span": arg["ns_span"],
"ns_val_span": arg["ns_val_span"],
},
}
)
elif arg["type"] == "StrArg":
args.append({"arg": arg["arg"], "type": arg["type"], "span": arg["span"]})
subtree["args"] = copy.deepcopy(args)
return subtree | [
"def",
"parsed_function_to_ast",
"(",
"parsed",
":",
"Parsed",
",",
"parsed_key",
")",
":",
"sub",
"=",
"parsed",
"[",
"parsed_key",
"]",
"subtree",
"=",
"{",
"\"type\"",
":",
"\"Function\"",
",",
"\"span\"",
":",
"sub",
"[",
"\"span\"",
"]",
",",
"\"function\"",
":",
"{",
"\"name\"",
":",
"sub",
"[",
"\"name\"",
"]",
",",
"\"name_span\"",
":",
"sub",
"[",
"\"name_span\"",
"]",
",",
"\"parens_span\"",
":",
"sub",
".",
"get",
"(",
"\"parens_span\"",
",",
"[",
"]",
")",
",",
"}",
",",
"}",
"args",
"=",
"[",
"]",
"for",
"arg",
"in",
"parsed",
"[",
"parsed_key",
"]",
".",
"get",
"(",
"\"args\"",
",",
"[",
"]",
")",
":",
"# pdb.set_trace()",
"if",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
":",
"args",
".",
"append",
"(",
"parsed_function_to_ast",
"(",
"parsed",
",",
"arg",
"[",
"\"span\"",
"]",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"NSArg\"",
":",
"args",
".",
"append",
"(",
"{",
"\"arg\"",
":",
"arg",
"[",
"\"arg\"",
"]",
",",
"\"type\"",
":",
"arg",
"[",
"\"type\"",
"]",
",",
"\"span\"",
":",
"arg",
"[",
"\"span\"",
"]",
",",
"\"nsarg\"",
":",
"{",
"\"ns\"",
":",
"arg",
"[",
"\"ns\"",
"]",
",",
"\"ns_val\"",
":",
"arg",
"[",
"\"ns_val\"",
"]",
",",
"\"ns_span\"",
":",
"arg",
"[",
"\"ns_span\"",
"]",
",",
"\"ns_val_span\"",
":",
"arg",
"[",
"\"ns_val_span\"",
"]",
",",
"}",
",",
"}",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"StrArg\"",
":",
"args",
".",
"append",
"(",
"{",
"\"arg\"",
":",
"arg",
"[",
"\"arg\"",
"]",
",",
"\"type\"",
":",
"arg",
"[",
"\"type\"",
"]",
",",
"\"span\"",
":",
"arg",
"[",
"\"span\"",
"]",
"}",
")",
"subtree",
"[",
"\"args\"",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"args",
")",
"return",
"subtree"
] | Create AST for top-level functions | [
"Create",
"AST",
"for",
"top",
"-",
"level",
"functions"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L603-L644 |
belbio/bel | bel/lang/partialparse.py | parsed_top_level_errors | def parsed_top_level_errors(parsed, errors, component_type: str = "") -> Errors:
"""Check full parse for errors
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
"""
# Error check
fn_cnt = 0
rel_cnt = 0
nested_cnt = 0
for key in parsed:
if parsed[key]["type"] == "Function":
fn_cnt += 1
if parsed[key]["type"] == "Relation":
rel_cnt += 1
if parsed[key]["type"] == "Nested":
nested_cnt += 1
if not component_type:
if nested_cnt > 1:
errors.append(
(
"Error",
"Too many nested objects - can only have one per BEL Assertion",
)
)
if nested_cnt:
if rel_cnt > 2:
errors.append(
(
"Error",
"Too many relations - can only have two in a nested BEL Assertion",
)
)
elif fn_cnt > 4:
errors.append(("Error", "Too many BEL subject and object candidates"))
else:
if rel_cnt > 1:
errors.append(
(
"Error",
"Too many relations - can only have one in a BEL Assertion",
)
)
elif fn_cnt > 2:
errors.append(("Error", "Too many BEL subject and object candidates"))
elif component_type == "subject":
if rel_cnt > 0:
errors.append(
("Error", "Too many relations - cannot have any in a BEL Subject")
)
elif fn_cnt > 1:
errors.append(
("Error", "Too many BEL subject candidates - can only have one")
)
elif component_type == "object":
if nested_cnt:
if rel_cnt > 1:
errors.append(
(
"Error",
"Too many relations - can only have one in a nested BEL object",
)
)
elif fn_cnt > 2:
errors.append(
(
"Error",
"Too many BEL subject and object candidates in a nested BEL object",
)
)
else:
if rel_cnt > 0:
errors.append(
("Error", "Too many relations - cannot have any in a BEL Subject")
)
elif fn_cnt > 1:
errors.append(
("Error", "Too many BEL subject candidates - can only have one")
)
return errors | python | def parsed_top_level_errors(parsed, errors, component_type: str = "") -> Errors:
"""Check full parse for errors
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
"""
# Error check
fn_cnt = 0
rel_cnt = 0
nested_cnt = 0
for key in parsed:
if parsed[key]["type"] == "Function":
fn_cnt += 1
if parsed[key]["type"] == "Relation":
rel_cnt += 1
if parsed[key]["type"] == "Nested":
nested_cnt += 1
if not component_type:
if nested_cnt > 1:
errors.append(
(
"Error",
"Too many nested objects - can only have one per BEL Assertion",
)
)
if nested_cnt:
if rel_cnt > 2:
errors.append(
(
"Error",
"Too many relations - can only have two in a nested BEL Assertion",
)
)
elif fn_cnt > 4:
errors.append(("Error", "Too many BEL subject and object candidates"))
else:
if rel_cnt > 1:
errors.append(
(
"Error",
"Too many relations - can only have one in a BEL Assertion",
)
)
elif fn_cnt > 2:
errors.append(("Error", "Too many BEL subject and object candidates"))
elif component_type == "subject":
if rel_cnt > 0:
errors.append(
("Error", "Too many relations - cannot have any in a BEL Subject")
)
elif fn_cnt > 1:
errors.append(
("Error", "Too many BEL subject candidates - can only have one")
)
elif component_type == "object":
if nested_cnt:
if rel_cnt > 1:
errors.append(
(
"Error",
"Too many relations - can only have one in a nested BEL object",
)
)
elif fn_cnt > 2:
errors.append(
(
"Error",
"Too many BEL subject and object candidates in a nested BEL object",
)
)
else:
if rel_cnt > 0:
errors.append(
("Error", "Too many relations - cannot have any in a BEL Subject")
)
elif fn_cnt > 1:
errors.append(
("Error", "Too many BEL subject candidates - can only have one")
)
return errors | [
"def",
"parsed_top_level_errors",
"(",
"parsed",
",",
"errors",
",",
"component_type",
":",
"str",
"=",
"\"\"",
")",
"->",
"Errors",
":",
"# Error check",
"fn_cnt",
"=",
"0",
"rel_cnt",
"=",
"0",
"nested_cnt",
"=",
"0",
"for",
"key",
"in",
"parsed",
":",
"if",
"parsed",
"[",
"key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
":",
"fn_cnt",
"+=",
"1",
"if",
"parsed",
"[",
"key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Relation\"",
":",
"rel_cnt",
"+=",
"1",
"if",
"parsed",
"[",
"key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Nested\"",
":",
"nested_cnt",
"+=",
"1",
"if",
"not",
"component_type",
":",
"if",
"nested_cnt",
">",
"1",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many nested objects - can only have one per BEL Assertion\"",
",",
")",
")",
"if",
"nested_cnt",
":",
"if",
"rel_cnt",
">",
"2",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many relations - can only have two in a nested BEL Assertion\"",
",",
")",
")",
"elif",
"fn_cnt",
">",
"4",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many BEL subject and object candidates\"",
")",
")",
"else",
":",
"if",
"rel_cnt",
">",
"1",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many relations - can only have one in a BEL Assertion\"",
",",
")",
")",
"elif",
"fn_cnt",
">",
"2",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many BEL subject and object candidates\"",
")",
")",
"elif",
"component_type",
"==",
"\"subject\"",
":",
"if",
"rel_cnt",
">",
"0",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many relations - cannot have any in a BEL Subject\"",
")",
")",
"elif",
"fn_cnt",
">",
"1",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many BEL subject candidates - can only have one\"",
")",
")",
"elif",
"component_type",
"==",
"\"object\"",
":",
"if",
"nested_cnt",
":",
"if",
"rel_cnt",
">",
"1",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many relations - can only have one in a nested BEL object\"",
",",
")",
")",
"elif",
"fn_cnt",
">",
"2",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many BEL subject and object candidates in a nested BEL object\"",
",",
")",
")",
"else",
":",
"if",
"rel_cnt",
">",
"0",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many relations - cannot have any in a BEL Subject\"",
")",
")",
"elif",
"fn_cnt",
">",
"1",
":",
"errors",
".",
"append",
"(",
"(",
"\"Error\"",
",",
"\"Too many BEL subject candidates - can only have one\"",
")",
")",
"return",
"errors"
] | Check full parse for errors
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input | [
"Check",
"full",
"parse",
"for",
"errors"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L647-L736 |
belbio/bel | bel/lang/partialparse.py | parsed_to_ast | def parsed_to_ast(parsed: Parsed, errors: Errors, component_type: str = ""):
"""Convert parsed data struct to AST dictionary
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
"""
ast = {}
sorted_keys = sorted(parsed.keys())
# Setup top-level tree
for key in sorted_keys:
if parsed[key]["type"] == "Nested":
nested_component_stack = ["subject", "object"]
if component_type:
component_stack = [component_type]
else:
component_stack = ["subject", "object"]
for key in sorted_keys:
if parsed[key]["type"] == "Function" and parsed[key]["function_level"] == "top":
ast[component_stack.pop(0)] = parsed_function_to_ast(parsed, key)
elif parsed[key]["type"] == "Relation" and "relation" not in ast:
ast["relation"] = {
"name": parsed[key]["name"],
"type": "Relation",
"span": key,
}
elif parsed[key]["type"] == "Nested":
ast["nested"] = {}
for nested_key in sorted_keys:
if nested_key <= key:
continue
if (
parsed[nested_key]["type"] == "Function"
and parsed[nested_key]["function_level"] == "top"
):
ast["nested"][
nested_component_stack.pop(0)
] = parsed_function_to_ast(parsed, nested_key)
elif (
parsed[nested_key]["type"] == "Relation"
and "relation" not in ast["nested"]
):
ast["nested"]["relation"] = {
"name": parsed[nested_key]["name"],
"type": "Relation",
"span": parsed[nested_key]["span"],
}
return ast, errors
return ast, errors | python | def parsed_to_ast(parsed: Parsed, errors: Errors, component_type: str = ""):
"""Convert parsed data struct to AST dictionary
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
"""
ast = {}
sorted_keys = sorted(parsed.keys())
# Setup top-level tree
for key in sorted_keys:
if parsed[key]["type"] == "Nested":
nested_component_stack = ["subject", "object"]
if component_type:
component_stack = [component_type]
else:
component_stack = ["subject", "object"]
for key in sorted_keys:
if parsed[key]["type"] == "Function" and parsed[key]["function_level"] == "top":
ast[component_stack.pop(0)] = parsed_function_to_ast(parsed, key)
elif parsed[key]["type"] == "Relation" and "relation" not in ast:
ast["relation"] = {
"name": parsed[key]["name"],
"type": "Relation",
"span": key,
}
elif parsed[key]["type"] == "Nested":
ast["nested"] = {}
for nested_key in sorted_keys:
if nested_key <= key:
continue
if (
parsed[nested_key]["type"] == "Function"
and parsed[nested_key]["function_level"] == "top"
):
ast["nested"][
nested_component_stack.pop(0)
] = parsed_function_to_ast(parsed, nested_key)
elif (
parsed[nested_key]["type"] == "Relation"
and "relation" not in ast["nested"]
):
ast["nested"]["relation"] = {
"name": parsed[nested_key]["name"],
"type": "Relation",
"span": parsed[nested_key]["span"],
}
return ast, errors
return ast, errors | [
"def",
"parsed_to_ast",
"(",
"parsed",
":",
"Parsed",
",",
"errors",
":",
"Errors",
",",
"component_type",
":",
"str",
"=",
"\"\"",
")",
":",
"ast",
"=",
"{",
"}",
"sorted_keys",
"=",
"sorted",
"(",
"parsed",
".",
"keys",
"(",
")",
")",
"# Setup top-level tree",
"for",
"key",
"in",
"sorted_keys",
":",
"if",
"parsed",
"[",
"key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Nested\"",
":",
"nested_component_stack",
"=",
"[",
"\"subject\"",
",",
"\"object\"",
"]",
"if",
"component_type",
":",
"component_stack",
"=",
"[",
"component_type",
"]",
"else",
":",
"component_stack",
"=",
"[",
"\"subject\"",
",",
"\"object\"",
"]",
"for",
"key",
"in",
"sorted_keys",
":",
"if",
"parsed",
"[",
"key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
"and",
"parsed",
"[",
"key",
"]",
"[",
"\"function_level\"",
"]",
"==",
"\"top\"",
":",
"ast",
"[",
"component_stack",
".",
"pop",
"(",
"0",
")",
"]",
"=",
"parsed_function_to_ast",
"(",
"parsed",
",",
"key",
")",
"elif",
"parsed",
"[",
"key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Relation\"",
"and",
"\"relation\"",
"not",
"in",
"ast",
":",
"ast",
"[",
"\"relation\"",
"]",
"=",
"{",
"\"name\"",
":",
"parsed",
"[",
"key",
"]",
"[",
"\"name\"",
"]",
",",
"\"type\"",
":",
"\"Relation\"",
",",
"\"span\"",
":",
"key",
",",
"}",
"elif",
"parsed",
"[",
"key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Nested\"",
":",
"ast",
"[",
"\"nested\"",
"]",
"=",
"{",
"}",
"for",
"nested_key",
"in",
"sorted_keys",
":",
"if",
"nested_key",
"<=",
"key",
":",
"continue",
"if",
"(",
"parsed",
"[",
"nested_key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
"and",
"parsed",
"[",
"nested_key",
"]",
"[",
"\"function_level\"",
"]",
"==",
"\"top\"",
")",
":",
"ast",
"[",
"\"nested\"",
"]",
"[",
"nested_component_stack",
".",
"pop",
"(",
"0",
")",
"]",
"=",
"parsed_function_to_ast",
"(",
"parsed",
",",
"nested_key",
")",
"elif",
"(",
"parsed",
"[",
"nested_key",
"]",
"[",
"\"type\"",
"]",
"==",
"\"Relation\"",
"and",
"\"relation\"",
"not",
"in",
"ast",
"[",
"\"nested\"",
"]",
")",
":",
"ast",
"[",
"\"nested\"",
"]",
"[",
"\"relation\"",
"]",
"=",
"{",
"\"name\"",
":",
"parsed",
"[",
"nested_key",
"]",
"[",
"\"name\"",
"]",
",",
"\"type\"",
":",
"\"Relation\"",
",",
"\"span\"",
":",
"parsed",
"[",
"nested_key",
"]",
"[",
"\"span\"",
"]",
",",
"}",
"return",
"ast",
",",
"errors",
"return",
"ast",
",",
"errors"
] | Convert parsed data struct to AST dictionary
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input | [
"Convert",
"parsed",
"data",
"struct",
"to",
"AST",
"dictionary"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L739-L796 |
belbio/bel | bel/lang/partialparse.py | get_ast_dict | def get_ast_dict(belstr, component_type: str = ""):
"""Convert BEL string to AST dictionary
Args:
belstr: BEL string
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
"""
errors = []
parsed = {}
bels = list(belstr)
char_locs, errors = parse_chars(bels, errors)
parsed, errors = parse_functions(belstr, char_locs, parsed, errors)
parsed, errors = parse_args(bels, char_locs, parsed, errors)
parsed, errors = arg_types(parsed, errors)
parsed, errors = parse_relations(belstr, char_locs, parsed, errors)
parsed, errors = parse_nested(bels, char_locs, parsed, errors)
errors = parsed_top_level_errors(parsed, errors)
ast, errors = parsed_to_ast(parsed, errors, component_type=component_type)
return ast, errors | python | def get_ast_dict(belstr, component_type: str = ""):
"""Convert BEL string to AST dictionary
Args:
belstr: BEL string
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
"""
errors = []
parsed = {}
bels = list(belstr)
char_locs, errors = parse_chars(bels, errors)
parsed, errors = parse_functions(belstr, char_locs, parsed, errors)
parsed, errors = parse_args(bels, char_locs, parsed, errors)
parsed, errors = arg_types(parsed, errors)
parsed, errors = parse_relations(belstr, char_locs, parsed, errors)
parsed, errors = parse_nested(bels, char_locs, parsed, errors)
errors = parsed_top_level_errors(parsed, errors)
ast, errors = parsed_to_ast(parsed, errors, component_type=component_type)
return ast, errors | [
"def",
"get_ast_dict",
"(",
"belstr",
",",
"component_type",
":",
"str",
"=",
"\"\"",
")",
":",
"errors",
"=",
"[",
"]",
"parsed",
"=",
"{",
"}",
"bels",
"=",
"list",
"(",
"belstr",
")",
"char_locs",
",",
"errors",
"=",
"parse_chars",
"(",
"bels",
",",
"errors",
")",
"parsed",
",",
"errors",
"=",
"parse_functions",
"(",
"belstr",
",",
"char_locs",
",",
"parsed",
",",
"errors",
")",
"parsed",
",",
"errors",
"=",
"parse_args",
"(",
"bels",
",",
"char_locs",
",",
"parsed",
",",
"errors",
")",
"parsed",
",",
"errors",
"=",
"arg_types",
"(",
"parsed",
",",
"errors",
")",
"parsed",
",",
"errors",
"=",
"parse_relations",
"(",
"belstr",
",",
"char_locs",
",",
"parsed",
",",
"errors",
")",
"parsed",
",",
"errors",
"=",
"parse_nested",
"(",
"bels",
",",
"char_locs",
",",
"parsed",
",",
"errors",
")",
"errors",
"=",
"parsed_top_level_errors",
"(",
"parsed",
",",
"errors",
")",
"ast",
",",
"errors",
"=",
"parsed_to_ast",
"(",
"parsed",
",",
"errors",
",",
"component_type",
"=",
"component_type",
")",
"return",
"ast",
",",
"errors"
] | Convert BEL string to AST dictionary
Args:
belstr: BEL string
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input | [
"Convert",
"BEL",
"string",
"to",
"AST",
"dictionary"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L799-L821 |
belbio/bel | bel/lang/partialparse.py | get_ast_obj | def get_ast_obj(belstr, bel_version, component_type: str = ""):
"""Convert AST partialparse dict to BELAst"""
ast_dict, errors = get_ast_dict(belstr, component_type)
spec = bel_specification.get_specification(bel_version)
subj = ast_dict["subject"]
subj_ast = add_ast_fn(subj, spec)
relation = None
obj = None
if "relation" in ast_dict:
relation = ast_dict["relation"]["name"]
if "object" in ast_dict:
obj = ast_dict["object"]
obj_ast = add_ast_fn(obj, spec)
return BELAst(subj_ast, relation, obj_ast, spec)
elif "nested" in ast_dict:
nested_subj = ast_dict["nested"]["subject"]
nested_subj_ast = add_ast_fn(nested_subj, spec)
nested_relation = ast_dict["nested"]["relation"]["name"]
nested_obj = ast_dict["nested"]["object"]
nested_obj_ast = add_ast_fn(nested_obj, spec)
return BELAst(
subj_ast,
relation,
BELAst(nested_subj_ast, nested_relation, nested_obj_ast, spec),
spec,
)
return BELAst(subj_ast, None, None, spec) | python | def get_ast_obj(belstr, bel_version, component_type: str = ""):
"""Convert AST partialparse dict to BELAst"""
ast_dict, errors = get_ast_dict(belstr, component_type)
spec = bel_specification.get_specification(bel_version)
subj = ast_dict["subject"]
subj_ast = add_ast_fn(subj, spec)
relation = None
obj = None
if "relation" in ast_dict:
relation = ast_dict["relation"]["name"]
if "object" in ast_dict:
obj = ast_dict["object"]
obj_ast = add_ast_fn(obj, spec)
return BELAst(subj_ast, relation, obj_ast, spec)
elif "nested" in ast_dict:
nested_subj = ast_dict["nested"]["subject"]
nested_subj_ast = add_ast_fn(nested_subj, spec)
nested_relation = ast_dict["nested"]["relation"]["name"]
nested_obj = ast_dict["nested"]["object"]
nested_obj_ast = add_ast_fn(nested_obj, spec)
return BELAst(
subj_ast,
relation,
BELAst(nested_subj_ast, nested_relation, nested_obj_ast, spec),
spec,
)
return BELAst(subj_ast, None, None, spec) | [
"def",
"get_ast_obj",
"(",
"belstr",
",",
"bel_version",
",",
"component_type",
":",
"str",
"=",
"\"\"",
")",
":",
"ast_dict",
",",
"errors",
"=",
"get_ast_dict",
"(",
"belstr",
",",
"component_type",
")",
"spec",
"=",
"bel_specification",
".",
"get_specification",
"(",
"bel_version",
")",
"subj",
"=",
"ast_dict",
"[",
"\"subject\"",
"]",
"subj_ast",
"=",
"add_ast_fn",
"(",
"subj",
",",
"spec",
")",
"relation",
"=",
"None",
"obj",
"=",
"None",
"if",
"\"relation\"",
"in",
"ast_dict",
":",
"relation",
"=",
"ast_dict",
"[",
"\"relation\"",
"]",
"[",
"\"name\"",
"]",
"if",
"\"object\"",
"in",
"ast_dict",
":",
"obj",
"=",
"ast_dict",
"[",
"\"object\"",
"]",
"obj_ast",
"=",
"add_ast_fn",
"(",
"obj",
",",
"spec",
")",
"return",
"BELAst",
"(",
"subj_ast",
",",
"relation",
",",
"obj_ast",
",",
"spec",
")",
"elif",
"\"nested\"",
"in",
"ast_dict",
":",
"nested_subj",
"=",
"ast_dict",
"[",
"\"nested\"",
"]",
"[",
"\"subject\"",
"]",
"nested_subj_ast",
"=",
"add_ast_fn",
"(",
"nested_subj",
",",
"spec",
")",
"nested_relation",
"=",
"ast_dict",
"[",
"\"nested\"",
"]",
"[",
"\"relation\"",
"]",
"[",
"\"name\"",
"]",
"nested_obj",
"=",
"ast_dict",
"[",
"\"nested\"",
"]",
"[",
"\"object\"",
"]",
"nested_obj_ast",
"=",
"add_ast_fn",
"(",
"nested_obj",
",",
"spec",
")",
"return",
"BELAst",
"(",
"subj_ast",
",",
"relation",
",",
"BELAst",
"(",
"nested_subj_ast",
",",
"nested_relation",
",",
"nested_obj_ast",
",",
"spec",
")",
",",
"spec",
",",
")",
"return",
"BELAst",
"(",
"subj_ast",
",",
"None",
",",
"None",
",",
"spec",
")"
] | Convert AST partialparse dict to BELAst | [
"Convert",
"AST",
"partialparse",
"dict",
"to",
"BELAst"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L824-L859 |
belbio/bel | bel/lang/partialparse.py | add_ast_fn | def add_ast_fn(d, spec, parent_function=None):
"""Convert dict AST to object AST Function
Args:
ast_fn: AST object Function
d: AST as dictionary
spec: BEL Specification
Return:
ast_fn
"""
if d["type"] == "Function":
ast_fn = Function(d["function"]["name"], spec, parent_function=parent_function)
for arg in d["args"]:
if arg["type"] == "Function":
ast_fn.add_argument(add_ast_fn(arg, spec, parent_function=ast_fn))
elif arg["type"] == "NSArg":
ast_fn.add_argument(
NSArg(arg["nsarg"]["ns"], arg["nsarg"]["ns_val"], ast_fn)
)
elif arg["type"] == "StrArg":
ast_fn.add_argument(StrArg(arg["arg"], ast_fn))
return ast_fn | python | def add_ast_fn(d, spec, parent_function=None):
"""Convert dict AST to object AST Function
Args:
ast_fn: AST object Function
d: AST as dictionary
spec: BEL Specification
Return:
ast_fn
"""
if d["type"] == "Function":
ast_fn = Function(d["function"]["name"], spec, parent_function=parent_function)
for arg in d["args"]:
if arg["type"] == "Function":
ast_fn.add_argument(add_ast_fn(arg, spec, parent_function=ast_fn))
elif arg["type"] == "NSArg":
ast_fn.add_argument(
NSArg(arg["nsarg"]["ns"], arg["nsarg"]["ns_val"], ast_fn)
)
elif arg["type"] == "StrArg":
ast_fn.add_argument(StrArg(arg["arg"], ast_fn))
return ast_fn | [
"def",
"add_ast_fn",
"(",
"d",
",",
"spec",
",",
"parent_function",
"=",
"None",
")",
":",
"if",
"d",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
":",
"ast_fn",
"=",
"Function",
"(",
"d",
"[",
"\"function\"",
"]",
"[",
"\"name\"",
"]",
",",
"spec",
",",
"parent_function",
"=",
"parent_function",
")",
"for",
"arg",
"in",
"d",
"[",
"\"args\"",
"]",
":",
"if",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"Function\"",
":",
"ast_fn",
".",
"add_argument",
"(",
"add_ast_fn",
"(",
"arg",
",",
"spec",
",",
"parent_function",
"=",
"ast_fn",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"NSArg\"",
":",
"ast_fn",
".",
"add_argument",
"(",
"NSArg",
"(",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns\"",
"]",
",",
"arg",
"[",
"\"nsarg\"",
"]",
"[",
"\"ns_val\"",
"]",
",",
"ast_fn",
")",
")",
"elif",
"arg",
"[",
"\"type\"",
"]",
"==",
"\"StrArg\"",
":",
"ast_fn",
".",
"add_argument",
"(",
"StrArg",
"(",
"arg",
"[",
"\"arg\"",
"]",
",",
"ast_fn",
")",
")",
"return",
"ast_fn"
] | Convert dict AST to object AST Function
Args:
ast_fn: AST object Function
d: AST as dictionary
spec: BEL Specification
Return:
ast_fn | [
"Convert",
"dict",
"AST",
"to",
"object",
"AST",
"Function"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/partialparse.py#L862-L885 |
belbio/bel | bel/lang/bel_utils.py | convert_nsarg | def convert_nsarg(
nsarg: str,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
) -> str:
"""[De]Canonicalize NSArg
Args:
nsarg (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: converted NSArg
"""
if not api_url:
api_url = config["bel_api"]["servers"]["api_url"]
if not api_url:
log.error("Missing api url - cannot convert namespace")
return None
params = None
if namespace_targets:
namespace_targets_str = json.dumps(namespace_targets)
params = {"namespace_targets": namespace_targets_str}
if not namespace_targets:
if canonicalize:
api_url = api_url + "/terms/{}/canonicalized"
elif decanonicalize:
api_url = api_url + "/terms/{}/decanonicalized"
else:
log.warning("Missing (de)canonical flag - cannot convert namespaces")
return nsarg
else:
api_url = (
api_url + "/terms/{}/canonicalized"
) # overriding with namespace_targets
request_url = api_url.format(url_path_param_quoting(nsarg))
r = get_url(request_url, params=params, timeout=10)
if r and r.status_code == 200:
nsarg = r.json().get("term_id", nsarg)
elif not r or r.status_code == 404:
log.error(f"[de]Canonicalization endpoint missing: {request_url}")
return nsarg | python | def convert_nsarg(
nsarg: str,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
) -> str:
"""[De]Canonicalize NSArg
Args:
nsarg (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: converted NSArg
"""
if not api_url:
api_url = config["bel_api"]["servers"]["api_url"]
if not api_url:
log.error("Missing api url - cannot convert namespace")
return None
params = None
if namespace_targets:
namespace_targets_str = json.dumps(namespace_targets)
params = {"namespace_targets": namespace_targets_str}
if not namespace_targets:
if canonicalize:
api_url = api_url + "/terms/{}/canonicalized"
elif decanonicalize:
api_url = api_url + "/terms/{}/decanonicalized"
else:
log.warning("Missing (de)canonical flag - cannot convert namespaces")
return nsarg
else:
api_url = (
api_url + "/terms/{}/canonicalized"
) # overriding with namespace_targets
request_url = api_url.format(url_path_param_quoting(nsarg))
r = get_url(request_url, params=params, timeout=10)
if r and r.status_code == 200:
nsarg = r.json().get("term_id", nsarg)
elif not r or r.status_code == 404:
log.error(f"[de]Canonicalization endpoint missing: {request_url}")
return nsarg | [
"def",
"convert_nsarg",
"(",
"nsarg",
":",
"str",
",",
"api_url",
":",
"str",
"=",
"None",
",",
"namespace_targets",
":",
"Mapping",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"canonicalize",
":",
"bool",
"=",
"False",
",",
"decanonicalize",
":",
"bool",
"=",
"False",
",",
")",
"->",
"str",
":",
"if",
"not",
"api_url",
":",
"api_url",
"=",
"config",
"[",
"\"bel_api\"",
"]",
"[",
"\"servers\"",
"]",
"[",
"\"api_url\"",
"]",
"if",
"not",
"api_url",
":",
"log",
".",
"error",
"(",
"\"Missing api url - cannot convert namespace\"",
")",
"return",
"None",
"params",
"=",
"None",
"if",
"namespace_targets",
":",
"namespace_targets_str",
"=",
"json",
".",
"dumps",
"(",
"namespace_targets",
")",
"params",
"=",
"{",
"\"namespace_targets\"",
":",
"namespace_targets_str",
"}",
"if",
"not",
"namespace_targets",
":",
"if",
"canonicalize",
":",
"api_url",
"=",
"api_url",
"+",
"\"/terms/{}/canonicalized\"",
"elif",
"decanonicalize",
":",
"api_url",
"=",
"api_url",
"+",
"\"/terms/{}/decanonicalized\"",
"else",
":",
"log",
".",
"warning",
"(",
"\"Missing (de)canonical flag - cannot convert namespaces\"",
")",
"return",
"nsarg",
"else",
":",
"api_url",
"=",
"(",
"api_url",
"+",
"\"/terms/{}/canonicalized\"",
")",
"# overriding with namespace_targets",
"request_url",
"=",
"api_url",
".",
"format",
"(",
"url_path_param_quoting",
"(",
"nsarg",
")",
")",
"r",
"=",
"get_url",
"(",
"request_url",
",",
"params",
"=",
"params",
",",
"timeout",
"=",
"10",
")",
"if",
"r",
"and",
"r",
".",
"status_code",
"==",
"200",
":",
"nsarg",
"=",
"r",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"term_id\"",
",",
"nsarg",
")",
"elif",
"not",
"r",
"or",
"r",
".",
"status_code",
"==",
"404",
":",
"log",
".",
"error",
"(",
"f\"[de]Canonicalization endpoint missing: {request_url}\"",
")",
"return",
"nsarg"
] | [De]Canonicalize NSArg
Args:
nsarg (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: converted NSArg | [
"[",
"De",
"]",
"Canonicalize",
"NSArg"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L28-L82 |
belbio/bel | bel/lang/bel_utils.py | convert_namespaces_str | def convert_namespaces_str(
bel_str: str,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
) -> str:
"""Convert namespace in string
Uses a regex expression to extract all NSArgs and replace them with the
updated NSArg from the BEL.bio API terms endpoint.
Args:
bel_str (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: bel statement with namespaces converted
"""
# pattern - look for capitalized namespace followed by colon
# and either a quoted string or a string that
# can include any char other than space, comma or ')'
matches = re.findall(r'([A-Z]+:"(?:\\.|[^"\\])*"|[A-Z]+:(?:[^\),\s]+))', bel_str)
for nsarg in matches:
if "DEFAULT:" in nsarg: # skip default namespaces
continue
updated_nsarg = convert_nsarg(
nsarg,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
if updated_nsarg != nsarg:
bel_str = bel_str.replace(nsarg, updated_nsarg)
return bel_str | python | def convert_namespaces_str(
bel_str: str,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
) -> str:
"""Convert namespace in string
Uses a regex expression to extract all NSArgs and replace them with the
updated NSArg from the BEL.bio API terms endpoint.
Args:
bel_str (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: bel statement with namespaces converted
"""
# pattern - look for capitalized namespace followed by colon
# and either a quoted string or a string that
# can include any char other than space, comma or ')'
matches = re.findall(r'([A-Z]+:"(?:\\.|[^"\\])*"|[A-Z]+:(?:[^\),\s]+))', bel_str)
for nsarg in matches:
if "DEFAULT:" in nsarg: # skip default namespaces
continue
updated_nsarg = convert_nsarg(
nsarg,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
if updated_nsarg != nsarg:
bel_str = bel_str.replace(nsarg, updated_nsarg)
return bel_str | [
"def",
"convert_namespaces_str",
"(",
"bel_str",
":",
"str",
",",
"api_url",
":",
"str",
"=",
"None",
",",
"namespace_targets",
":",
"Mapping",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"canonicalize",
":",
"bool",
"=",
"False",
",",
"decanonicalize",
":",
"bool",
"=",
"False",
",",
")",
"->",
"str",
":",
"# pattern - look for capitalized namespace followed by colon",
"# and either a quoted string or a string that",
"# can include any char other than space, comma or ')'",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'([A-Z]+:\"(?:\\\\.|[^\"\\\\])*\"|[A-Z]+:(?:[^\\),\\s]+))'",
",",
"bel_str",
")",
"for",
"nsarg",
"in",
"matches",
":",
"if",
"\"DEFAULT:\"",
"in",
"nsarg",
":",
"# skip default namespaces",
"continue",
"updated_nsarg",
"=",
"convert_nsarg",
"(",
"nsarg",
",",
"api_url",
"=",
"api_url",
",",
"namespace_targets",
"=",
"namespace_targets",
",",
"canonicalize",
"=",
"canonicalize",
",",
"decanonicalize",
"=",
"decanonicalize",
",",
")",
"if",
"updated_nsarg",
"!=",
"nsarg",
":",
"bel_str",
"=",
"bel_str",
".",
"replace",
"(",
"nsarg",
",",
"updated_nsarg",
")",
"return",
"bel_str"
] | Convert namespace in string
Uses a regex expression to extract all NSArgs and replace them with the
updated NSArg from the BEL.bio API terms endpoint.
Args:
bel_str (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: bel statement with namespaces converted | [
"Convert",
"namespace",
"in",
"string"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L85-L126 |
belbio/bel | bel/lang/bel_utils.py | convert_namespaces_ast | def convert_namespaces_ast(
ast,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
):
"""Recursively convert namespaces of BEL Entities in BEL AST using API endpoint
Canonicalization and decanonicalization is determined by endpoint used and namespace_targets.
Args:
ast (BEL): BEL AST
api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized)
namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities
Returns:
BEL: BEL AST
"""
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
# Get normalized term if necessary
if (canonicalize and not ast.canonical) or (
decanonicalize and not ast.decanonical
):
normalized_term = convert_nsarg(
given_term_id,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
if canonicalize:
ast.canonical = normalized_term
elif decanonicalize:
ast.decanonical = normalized_term
# Update normalized term
if canonicalize:
ns, value = ast.canonical.split(":")
ast.change_nsvalue(ns, value)
elif decanonicalize:
ns, value = ast.canonical.split(":")
ast.change_nsvalue(ns, value)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
convert_namespaces_ast(
arg,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
return ast | python | def convert_namespaces_ast(
ast,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
):
"""Recursively convert namespaces of BEL Entities in BEL AST using API endpoint
Canonicalization and decanonicalization is determined by endpoint used and namespace_targets.
Args:
ast (BEL): BEL AST
api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized)
namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities
Returns:
BEL: BEL AST
"""
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
# Get normalized term if necessary
if (canonicalize and not ast.canonical) or (
decanonicalize and not ast.decanonical
):
normalized_term = convert_nsarg(
given_term_id,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
if canonicalize:
ast.canonical = normalized_term
elif decanonicalize:
ast.decanonical = normalized_term
# Update normalized term
if canonicalize:
ns, value = ast.canonical.split(":")
ast.change_nsvalue(ns, value)
elif decanonicalize:
ns, value = ast.canonical.split(":")
ast.change_nsvalue(ns, value)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
convert_namespaces_ast(
arg,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
return ast | [
"def",
"convert_namespaces_ast",
"(",
"ast",
",",
"api_url",
":",
"str",
"=",
"None",
",",
"namespace_targets",
":",
"Mapping",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"canonicalize",
":",
"bool",
"=",
"False",
",",
"decanonicalize",
":",
"bool",
"=",
"False",
",",
")",
":",
"if",
"isinstance",
"(",
"ast",
",",
"NSArg",
")",
":",
"given_term_id",
"=",
"\"{}:{}\"",
".",
"format",
"(",
"ast",
".",
"namespace",
",",
"ast",
".",
"value",
")",
"# Get normalized term if necessary",
"if",
"(",
"canonicalize",
"and",
"not",
"ast",
".",
"canonical",
")",
"or",
"(",
"decanonicalize",
"and",
"not",
"ast",
".",
"decanonical",
")",
":",
"normalized_term",
"=",
"convert_nsarg",
"(",
"given_term_id",
",",
"api_url",
"=",
"api_url",
",",
"namespace_targets",
"=",
"namespace_targets",
",",
"canonicalize",
"=",
"canonicalize",
",",
"decanonicalize",
"=",
"decanonicalize",
",",
")",
"if",
"canonicalize",
":",
"ast",
".",
"canonical",
"=",
"normalized_term",
"elif",
"decanonicalize",
":",
"ast",
".",
"decanonical",
"=",
"normalized_term",
"# Update normalized term",
"if",
"canonicalize",
":",
"ns",
",",
"value",
"=",
"ast",
".",
"canonical",
".",
"split",
"(",
"\":\"",
")",
"ast",
".",
"change_nsvalue",
"(",
"ns",
",",
"value",
")",
"elif",
"decanonicalize",
":",
"ns",
",",
"value",
"=",
"ast",
".",
"canonical",
".",
"split",
"(",
"\":\"",
")",
"ast",
".",
"change_nsvalue",
"(",
"ns",
",",
"value",
")",
"# Recursively process every NSArg by processing BELAst and Functions",
"if",
"hasattr",
"(",
"ast",
",",
"\"args\"",
")",
":",
"for",
"arg",
"in",
"ast",
".",
"args",
":",
"convert_namespaces_ast",
"(",
"arg",
",",
"api_url",
"=",
"api_url",
",",
"namespace_targets",
"=",
"namespace_targets",
",",
"canonicalize",
"=",
"canonicalize",
",",
"decanonicalize",
"=",
"decanonicalize",
",",
")",
"return",
"ast"
] | Recursively convert namespaces of BEL Entities in BEL AST using API endpoint
Canonicalization and decanonicalization is determined by endpoint used and namespace_targets.
Args:
ast (BEL): BEL AST
api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized)
namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities
Returns:
BEL: BEL AST | [
"Recursively",
"convert",
"namespaces",
"of",
"BEL",
"Entities",
"in",
"BEL",
"AST",
"using",
"API",
"endpoint"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L129-L187 |
belbio/bel | bel/lang/bel_utils.py | populate_ast_nsarg_defaults | def populate_ast_nsarg_defaults(ast, belast, species_id=None):
"""Recursively populate NSArg AST entries for default (de)canonical values
This was added specifically for the BEL Pipeline. It is designed to
run directly against ArangoDB and not through the BELAPI.
Args:
ast (BEL): BEL AST
Returns:
BEL: BEL AST
"""
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
r = bel.terms.terms.get_normalized_terms(given_term_id)
ast.canonical = r["canonical"]
ast.decanonical = r["decanonical"]
r = bel.terms.terms.get_terms(ast.canonical)
if len(r) > 0:
ast.species_id = r[0].get("species_id", False)
ast.species_label = r[0].get("species_label", False)
# Check to see if species is set and if it's consistent
# if species is not consistent for the entire AST - set species_id/label
# on belast to False (instead of None)
if ast.species_id and species_id is None:
species_id = ast.species_id
belast.species.add((ast.species_id, ast.species_label))
elif ast.species_id and species_id and species_id != ast.species_id:
belast.species_id = False
belast.species_label = False
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_defaults(arg, belast, species_id)
return ast | python | def populate_ast_nsarg_defaults(ast, belast, species_id=None):
"""Recursively populate NSArg AST entries for default (de)canonical values
This was added specifically for the BEL Pipeline. It is designed to
run directly against ArangoDB and not through the BELAPI.
Args:
ast (BEL): BEL AST
Returns:
BEL: BEL AST
"""
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
r = bel.terms.terms.get_normalized_terms(given_term_id)
ast.canonical = r["canonical"]
ast.decanonical = r["decanonical"]
r = bel.terms.terms.get_terms(ast.canonical)
if len(r) > 0:
ast.species_id = r[0].get("species_id", False)
ast.species_label = r[0].get("species_label", False)
# Check to see if species is set and if it's consistent
# if species is not consistent for the entire AST - set species_id/label
# on belast to False (instead of None)
if ast.species_id and species_id is None:
species_id = ast.species_id
belast.species.add((ast.species_id, ast.species_label))
elif ast.species_id and species_id and species_id != ast.species_id:
belast.species_id = False
belast.species_label = False
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_defaults(arg, belast, species_id)
return ast | [
"def",
"populate_ast_nsarg_defaults",
"(",
"ast",
",",
"belast",
",",
"species_id",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"ast",
",",
"NSArg",
")",
":",
"given_term_id",
"=",
"\"{}:{}\"",
".",
"format",
"(",
"ast",
".",
"namespace",
",",
"ast",
".",
"value",
")",
"r",
"=",
"bel",
".",
"terms",
".",
"terms",
".",
"get_normalized_terms",
"(",
"given_term_id",
")",
"ast",
".",
"canonical",
"=",
"r",
"[",
"\"canonical\"",
"]",
"ast",
".",
"decanonical",
"=",
"r",
"[",
"\"decanonical\"",
"]",
"r",
"=",
"bel",
".",
"terms",
".",
"terms",
".",
"get_terms",
"(",
"ast",
".",
"canonical",
")",
"if",
"len",
"(",
"r",
")",
">",
"0",
":",
"ast",
".",
"species_id",
"=",
"r",
"[",
"0",
"]",
".",
"get",
"(",
"\"species_id\"",
",",
"False",
")",
"ast",
".",
"species_label",
"=",
"r",
"[",
"0",
"]",
".",
"get",
"(",
"\"species_label\"",
",",
"False",
")",
"# Check to see if species is set and if it's consistent",
"# if species is not consistent for the entire AST - set species_id/label",
"# on belast to False (instead of None)",
"if",
"ast",
".",
"species_id",
"and",
"species_id",
"is",
"None",
":",
"species_id",
"=",
"ast",
".",
"species_id",
"belast",
".",
"species",
".",
"add",
"(",
"(",
"ast",
".",
"species_id",
",",
"ast",
".",
"species_label",
")",
")",
"elif",
"ast",
".",
"species_id",
"and",
"species_id",
"and",
"species_id",
"!=",
"ast",
".",
"species_id",
":",
"belast",
".",
"species_id",
"=",
"False",
"belast",
".",
"species_label",
"=",
"False",
"# Recursively process every NSArg by processing BELAst and Functions",
"if",
"hasattr",
"(",
"ast",
",",
"\"args\"",
")",
":",
"for",
"arg",
"in",
"ast",
".",
"args",
":",
"populate_ast_nsarg_defaults",
"(",
"arg",
",",
"belast",
",",
"species_id",
")",
"return",
"ast"
] | Recursively populate NSArg AST entries for default (de)canonical values
This was added specifically for the BEL Pipeline. It is designed to
run directly against ArangoDB and not through the BELAPI.
Args:
ast (BEL): BEL AST
Returns:
BEL: BEL AST | [
"Recursively",
"populate",
"NSArg",
"AST",
"entries",
"for",
"default",
"(",
"de",
")",
"canonical",
"values"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L190-L232 |
belbio/bel | bel/lang/bel_utils.py | orthologize | def orthologize(ast, bo, species_id: str):
"""Recursively orthologize BEL Entities in BEL AST using API endpoint
NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog)
Args:
ast (BEL): BEL AST
endpoint (str): endpoint url with a placeholder for the term_id
Returns:
BEL: BEL AST
"""
# if species_id == 'TAX:9606' and str(ast) == 'MGI:Sult2a1':
# import pdb; pdb.set_trace()
if not species_id:
bo.validation_messages.append(
("WARNING", "No species id was provided for orthologization")
)
return ast
if isinstance(ast, NSArg):
if ast.orthologs:
# log.debug(f'AST: {ast.to_string()} species_id: {species_id} orthologs: {ast.orthologs}')
if ast.orthologs.get(species_id, None):
orthologized_nsarg_val = ast.orthologs[species_id]["decanonical"]
ns, value = orthologized_nsarg_val.split(":")
ast.change_nsvalue(ns, value)
ast.canonical = ast.orthologs[species_id]["canonical"]
ast.decanonical = ast.orthologs[species_id]["decanonical"]
ast.orthologized = True
bo.ast.species.add(
(species_id, ast.orthologs[species_id]["species_label"])
)
else:
bo.ast.species.add((ast.species_id, ast.species_label))
bo.validation_messages.append(
("WARNING", f"No ortholog found for {ast.namespace}:{ast.value}")
)
elif ast.species_id:
bo.ast.species.add((ast.species_id, ast.species_label))
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
orthologize(arg, bo, species_id)
return ast | python | def orthologize(ast, bo, species_id: str):
"""Recursively orthologize BEL Entities in BEL AST using API endpoint
NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog)
Args:
ast (BEL): BEL AST
endpoint (str): endpoint url with a placeholder for the term_id
Returns:
BEL: BEL AST
"""
# if species_id == 'TAX:9606' and str(ast) == 'MGI:Sult2a1':
# import pdb; pdb.set_trace()
if not species_id:
bo.validation_messages.append(
("WARNING", "No species id was provided for orthologization")
)
return ast
if isinstance(ast, NSArg):
if ast.orthologs:
# log.debug(f'AST: {ast.to_string()} species_id: {species_id} orthologs: {ast.orthologs}')
if ast.orthologs.get(species_id, None):
orthologized_nsarg_val = ast.orthologs[species_id]["decanonical"]
ns, value = orthologized_nsarg_val.split(":")
ast.change_nsvalue(ns, value)
ast.canonical = ast.orthologs[species_id]["canonical"]
ast.decanonical = ast.orthologs[species_id]["decanonical"]
ast.orthologized = True
bo.ast.species.add(
(species_id, ast.orthologs[species_id]["species_label"])
)
else:
bo.ast.species.add((ast.species_id, ast.species_label))
bo.validation_messages.append(
("WARNING", f"No ortholog found for {ast.namespace}:{ast.value}")
)
elif ast.species_id:
bo.ast.species.add((ast.species_id, ast.species_label))
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
orthologize(arg, bo, species_id)
return ast | [
"def",
"orthologize",
"(",
"ast",
",",
"bo",
",",
"species_id",
":",
"str",
")",
":",
"# if species_id == 'TAX:9606' and str(ast) == 'MGI:Sult2a1':",
"# import pdb; pdb.set_trace()",
"if",
"not",
"species_id",
":",
"bo",
".",
"validation_messages",
".",
"append",
"(",
"(",
"\"WARNING\"",
",",
"\"No species id was provided for orthologization\"",
")",
")",
"return",
"ast",
"if",
"isinstance",
"(",
"ast",
",",
"NSArg",
")",
":",
"if",
"ast",
".",
"orthologs",
":",
"# log.debug(f'AST: {ast.to_string()} species_id: {species_id} orthologs: {ast.orthologs}')",
"if",
"ast",
".",
"orthologs",
".",
"get",
"(",
"species_id",
",",
"None",
")",
":",
"orthologized_nsarg_val",
"=",
"ast",
".",
"orthologs",
"[",
"species_id",
"]",
"[",
"\"decanonical\"",
"]",
"ns",
",",
"value",
"=",
"orthologized_nsarg_val",
".",
"split",
"(",
"\":\"",
")",
"ast",
".",
"change_nsvalue",
"(",
"ns",
",",
"value",
")",
"ast",
".",
"canonical",
"=",
"ast",
".",
"orthologs",
"[",
"species_id",
"]",
"[",
"\"canonical\"",
"]",
"ast",
".",
"decanonical",
"=",
"ast",
".",
"orthologs",
"[",
"species_id",
"]",
"[",
"\"decanonical\"",
"]",
"ast",
".",
"orthologized",
"=",
"True",
"bo",
".",
"ast",
".",
"species",
".",
"add",
"(",
"(",
"species_id",
",",
"ast",
".",
"orthologs",
"[",
"species_id",
"]",
"[",
"\"species_label\"",
"]",
")",
")",
"else",
":",
"bo",
".",
"ast",
".",
"species",
".",
"add",
"(",
"(",
"ast",
".",
"species_id",
",",
"ast",
".",
"species_label",
")",
")",
"bo",
".",
"validation_messages",
".",
"append",
"(",
"(",
"\"WARNING\"",
",",
"f\"No ortholog found for {ast.namespace}:{ast.value}\"",
")",
")",
"elif",
"ast",
".",
"species_id",
":",
"bo",
".",
"ast",
".",
"species",
".",
"add",
"(",
"(",
"ast",
".",
"species_id",
",",
"ast",
".",
"species_label",
")",
")",
"# Recursively process every NSArg by processing BELAst and Functions",
"if",
"hasattr",
"(",
"ast",
",",
"\"args\"",
")",
":",
"for",
"arg",
"in",
"ast",
".",
"args",
":",
"orthologize",
"(",
"arg",
",",
"bo",
",",
"species_id",
")",
"return",
"ast"
] | Recursively orthologize BEL Entities in BEL AST using API endpoint
NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog)
Args:
ast (BEL): BEL AST
endpoint (str): endpoint url with a placeholder for the term_id
Returns:
BEL: BEL AST | [
"Recursively",
"orthologize",
"BEL",
"Entities",
"in",
"BEL",
"AST",
"using",
"API",
"endpoint"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L235-L283 |
belbio/bel | bel/lang/bel_utils.py | populate_ast_nsarg_orthologs | def populate_ast_nsarg_orthologs(ast, species):
"""Recursively collect NSArg orthologs for BEL AST
This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available
Args:
ast: AST at recursive point in belobj
species: dictionary of species ids vs labels for or
"""
ortholog_namespace = "EG"
if isinstance(ast, NSArg):
if re.match(ortholog_namespace, ast.canonical):
orthologs = bel.terms.orthologs.get_orthologs(
ast.canonical, list(species.keys())
)
for species_id in species:
if species_id in orthologs:
orthologs[species_id]["species_label"] = species[species_id]
ast.orthologs = copy.deepcopy(orthologs)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_orthologs(arg, species)
return ast | python | def populate_ast_nsarg_orthologs(ast, species):
"""Recursively collect NSArg orthologs for BEL AST
This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available
Args:
ast: AST at recursive point in belobj
species: dictionary of species ids vs labels for or
"""
ortholog_namespace = "EG"
if isinstance(ast, NSArg):
if re.match(ortholog_namespace, ast.canonical):
orthologs = bel.terms.orthologs.get_orthologs(
ast.canonical, list(species.keys())
)
for species_id in species:
if species_id in orthologs:
orthologs[species_id]["species_label"] = species[species_id]
ast.orthologs = copy.deepcopy(orthologs)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_orthologs(arg, species)
return ast | [
"def",
"populate_ast_nsarg_orthologs",
"(",
"ast",
",",
"species",
")",
":",
"ortholog_namespace",
"=",
"\"EG\"",
"if",
"isinstance",
"(",
"ast",
",",
"NSArg",
")",
":",
"if",
"re",
".",
"match",
"(",
"ortholog_namespace",
",",
"ast",
".",
"canonical",
")",
":",
"orthologs",
"=",
"bel",
".",
"terms",
".",
"orthologs",
".",
"get_orthologs",
"(",
"ast",
".",
"canonical",
",",
"list",
"(",
"species",
".",
"keys",
"(",
")",
")",
")",
"for",
"species_id",
"in",
"species",
":",
"if",
"species_id",
"in",
"orthologs",
":",
"orthologs",
"[",
"species_id",
"]",
"[",
"\"species_label\"",
"]",
"=",
"species",
"[",
"species_id",
"]",
"ast",
".",
"orthologs",
"=",
"copy",
".",
"deepcopy",
"(",
"orthologs",
")",
"# Recursively process every NSArg by processing BELAst and Functions",
"if",
"hasattr",
"(",
"ast",
",",
"\"args\"",
")",
":",
"for",
"arg",
"in",
"ast",
".",
"args",
":",
"populate_ast_nsarg_orthologs",
"(",
"arg",
",",
"species",
")",
"return",
"ast"
] | Recursively collect NSArg orthologs for BEL AST
This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available
Args:
ast: AST at recursive point in belobj
species: dictionary of species ids vs labels for or | [
"Recursively",
"collect",
"NSArg",
"orthologs",
"for",
"BEL",
"AST"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L286-L314 |
belbio/bel | bel/lang/bel_utils.py | preprocess_bel_stmt | def preprocess_bel_stmt(stmt: str) -> str:
"""Clean up basic formatting of BEL statement
Args:
stmt: BEL statement as single string
Returns:
cleaned BEL statement
"""
stmt = stmt.strip() # remove newline at end of stmt
stmt = re.sub(r",+", ",", stmt) # remove multiple commas
stmt = re.sub(r",", ", ", stmt) # add space after each comma
stmt = re.sub(r" +", " ", stmt) # remove multiple spaces
return stmt | python | def preprocess_bel_stmt(stmt: str) -> str:
"""Clean up basic formatting of BEL statement
Args:
stmt: BEL statement as single string
Returns:
cleaned BEL statement
"""
stmt = stmt.strip() # remove newline at end of stmt
stmt = re.sub(r",+", ",", stmt) # remove multiple commas
stmt = re.sub(r",", ", ", stmt) # add space after each comma
stmt = re.sub(r" +", " ", stmt) # remove multiple spaces
return stmt | [
"def",
"preprocess_bel_stmt",
"(",
"stmt",
":",
"str",
")",
"->",
"str",
":",
"stmt",
"=",
"stmt",
".",
"strip",
"(",
")",
"# remove newline at end of stmt",
"stmt",
"=",
"re",
".",
"sub",
"(",
"r\",+\"",
",",
"\",\"",
",",
"stmt",
")",
"# remove multiple commas",
"stmt",
"=",
"re",
".",
"sub",
"(",
"r\",\"",
",",
"\", \"",
",",
"stmt",
")",
"# add space after each comma",
"stmt",
"=",
"re",
".",
"sub",
"(",
"r\" +\"",
",",
"\" \"",
",",
"stmt",
")",
"# remove multiple spaces",
"return",
"stmt"
] | Clean up basic formatting of BEL statement
Args:
stmt: BEL statement as single string
Returns:
cleaned BEL statement | [
"Clean",
"up",
"basic",
"formatting",
"of",
"BEL",
"statement"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L317-L332 |
belbio/bel | bel/lang/bel_utils.py | quoting_nsarg | def quoting_nsarg(nsarg_value):
"""Quoting nsargs
If needs quotes (only if it contains whitespace, comma or ')' ), make sure
it is quoted, else don't add them.
"""
quoted = re.findall(r'^"(.*)"$', nsarg_value)
if re.search(
r"[),\s]", nsarg_value
): # quote only if it contains whitespace, comma or ')'
if quoted:
return nsarg_value
else:
return f'"{nsarg_value}"'
else:
if quoted:
return quoted[0]
else:
return nsarg_value | python | def quoting_nsarg(nsarg_value):
"""Quoting nsargs
If needs quotes (only if it contains whitespace, comma or ')' ), make sure
it is quoted, else don't add them.
"""
quoted = re.findall(r'^"(.*)"$', nsarg_value)
if re.search(
r"[),\s]", nsarg_value
): # quote only if it contains whitespace, comma or ')'
if quoted:
return nsarg_value
else:
return f'"{nsarg_value}"'
else:
if quoted:
return quoted[0]
else:
return nsarg_value | [
"def",
"quoting_nsarg",
"(",
"nsarg_value",
")",
":",
"quoted",
"=",
"re",
".",
"findall",
"(",
"r'^\"(.*)\"$'",
",",
"nsarg_value",
")",
"if",
"re",
".",
"search",
"(",
"r\"[),\\s]\"",
",",
"nsarg_value",
")",
":",
"# quote only if it contains whitespace, comma or ')'",
"if",
"quoted",
":",
"return",
"nsarg_value",
"else",
":",
"return",
"f'\"{nsarg_value}\"'",
"else",
":",
"if",
"quoted",
":",
"return",
"quoted",
"[",
"0",
"]",
"else",
":",
"return",
"nsarg_value"
] | Quoting nsargs
If needs quotes (only if it contains whitespace, comma or ')' ), make sure
it is quoted, else don't add them. | [
"Quoting",
"nsargs"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L336-L357 |
belbio/bel | bel/lang/bel_utils.py | _dump_spec | def _dump_spec(spec):
"""Dump bel specification dictionary using YAML
Formats this with an extra indentation for lists to make it easier to
use cold folding on the YAML version of the spec dictionary.
"""
with open("spec.yaml", "w") as f:
yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False) | python | def _dump_spec(spec):
"""Dump bel specification dictionary using YAML
Formats this with an extra indentation for lists to make it easier to
use cold folding on the YAML version of the spec dictionary.
"""
with open("spec.yaml", "w") as f:
yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False) | [
"def",
"_dump_spec",
"(",
"spec",
")",
":",
"with",
"open",
"(",
"\"spec.yaml\"",
",",
"\"w\"",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"spec",
",",
"f",
",",
"Dumper",
"=",
"MyDumper",
",",
"default_flow_style",
"=",
"False",
")"
] | Dump bel specification dictionary using YAML
Formats this with an extra indentation for lists to make it easier to
use cold folding on the YAML version of the spec dictionary. | [
"Dump",
"bel",
"specification",
"dictionary",
"using",
"YAML"
] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L450-L457 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.