repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
belbio/bel
bel/edge/computed.py
compute_edges
def compute_edges(ast: BELAst, spec: BELSpec) -> Edges: """Compute edges""" edges = [] if ast.bel_object.__class__.__name__ == "BELAst": edges.append(ast.bel_object) process_ast(edges, ast, spec) return edges
python
def compute_edges(ast: BELAst, spec: BELSpec) -> Edges: """Compute edges""" edges = [] if ast.bel_object.__class__.__name__ == "BELAst": edges.append(ast.bel_object) process_ast(edges, ast, spec) return edges
[ "def", "compute_edges", "(", "ast", ":", "BELAst", ",", "spec", ":", "BELSpec", ")", "->", "Edges", ":", "edges", "=", "[", "]", "if", "ast", ".", "bel_object", ".", "__class__", ".", "__name__", "==", "\"BELAst\"", ":", "edges", ".", "append", "(", "ast", ".", "bel_object", ")", "process_ast", "(", "edges", ",", "ast", ",", "spec", ")", "return", "edges" ]
Compute edges
[ "Compute", "edges" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/computed.py#L23-L31
belbio/bel
bel/edge/computed.py
process_rule
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec): """Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule """ ast_type = ast.__class__.__name__ trigger_functions = rule.get("trigger_function", []) trigger_types = rule.get("trigger_type", []) rule_subject = rule.get("subject") rule_relation = rule.get("relation") rule_object = rule.get("object") log.debug(f"Running {rule_relation} Type: {ast_type}") if isinstance(ast, Function): function_name = ast.name args = ast.args parent_function = ast.parent_function if function_name in trigger_functions: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"1: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"2: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) elif ast_type in trigger_types: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"3: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"4: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if isinstance(ast, NSArg): term = "{}:{}".format(ast.namespace, ast.value) parent_function = ast.parent_function if ast_type in trigger_types: if rule_subject == "trigger_value": subject = term if rule_object == "args": for arg in args: log.debug(f"5: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"6: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) # Recursively process every element by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: process_rule(edges, arg, rule, spec)
python
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec): """Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule """ ast_type = ast.__class__.__name__ trigger_functions = rule.get("trigger_function", []) trigger_types = rule.get("trigger_type", []) rule_subject = rule.get("subject") rule_relation = rule.get("relation") rule_object = rule.get("object") log.debug(f"Running {rule_relation} Type: {ast_type}") if isinstance(ast, Function): function_name = ast.name args = ast.args parent_function = ast.parent_function if function_name in trigger_functions: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"1: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"2: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) elif ast_type in trigger_types: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"3: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"4: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if isinstance(ast, NSArg): term = "{}:{}".format(ast.namespace, ast.value) parent_function = ast.parent_function if ast_type in trigger_types: if rule_subject == "trigger_value": subject = term if rule_object == "args": for arg in args: log.debug(f"5: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"6: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) # Recursively process every element by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: process_rule(edges, arg, rule, spec)
[ "def", "process_rule", "(", "edges", ":", "Edges", ",", "ast", ":", "Function", ",", "rule", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "spec", ":", "BELSpec", ")", ":", "ast_type", "=", "ast", ".", "__class__", ".", "__name__", "trigger_functions", "=", "rule", ".", "get", "(", "\"trigger_function\"", ",", "[", "]", ")", "trigger_types", "=", "rule", ".", "get", "(", "\"trigger_type\"", ",", "[", "]", ")", "rule_subject", "=", "rule", ".", "get", "(", "\"subject\"", ")", "rule_relation", "=", "rule", ".", "get", "(", "\"relation\"", ")", "rule_object", "=", "rule", ".", "get", "(", "\"object\"", ")", "log", ".", "debug", "(", "f\"Running {rule_relation} Type: {ast_type}\"", ")", "if", "isinstance", "(", "ast", ",", "Function", ")", ":", "function_name", "=", "ast", ".", "name", "args", "=", "ast", ".", "args", "parent_function", "=", "ast", ".", "parent_function", "if", "function_name", "in", "trigger_functions", ":", "if", "rule_subject", "==", "\"trigger_value\"", ":", "subject", "=", "ast", "if", "rule_object", "==", "\"args\"", ":", "for", "arg", "in", "args", ":", "log", ".", "debug", "(", "f\"1: {subject} {arg}\"", ")", "edge_ast", "=", "BELAst", "(", "subject", ",", "rule_relation", ",", "arg", ",", "spec", ")", "edges", ".", "append", "(", "edge_ast", ")", "elif", "rule_object", "==", "\"parent_function\"", "and", "parent_function", ":", "log", ".", "debug", "(", "f\"2: {subject} {parent_function}\"", ")", "edge_ast", "=", "BELAst", "(", "subject", ",", "rule_relation", ",", "parent_function", ",", "spec", ")", "edges", ".", "append", "(", "edge_ast", ")", "elif", "ast_type", "in", "trigger_types", ":", "if", "rule_subject", "==", "\"trigger_value\"", ":", "subject", "=", "ast", "if", "rule_object", "==", "\"args\"", ":", "for", "arg", "in", "args", ":", "log", ".", "debug", "(", "f\"3: {subject} {arg}\"", ")", "edge_ast", "=", "BELAst", "(", "subject", ",", "rule_relation", ",", "arg", ",", "spec", ")", "edges", ".", "append", "(", "edge_ast", ")", "elif", "rule_object", "==", "\"parent_function\"", "and", "parent_function", ":", "log", ".", "debug", "(", "f\"4: {subject} {parent_function}\"", ")", "edge_ast", "=", "BELAst", "(", "subject", ",", "rule_relation", ",", "parent_function", ",", "spec", ")", "edges", ".", "append", "(", "edge_ast", ")", "if", "isinstance", "(", "ast", ",", "NSArg", ")", ":", "term", "=", "\"{}:{}\"", ".", "format", "(", "ast", ".", "namespace", ",", "ast", ".", "value", ")", "parent_function", "=", "ast", ".", "parent_function", "if", "ast_type", "in", "trigger_types", ":", "if", "rule_subject", "==", "\"trigger_value\"", ":", "subject", "=", "term", "if", "rule_object", "==", "\"args\"", ":", "for", "arg", "in", "args", ":", "log", ".", "debug", "(", "f\"5: {subject} {arg}\"", ")", "edge_ast", "=", "BELAst", "(", "subject", ",", "rule_relation", ",", "arg", ",", "spec", ")", "edges", ".", "append", "(", "edge_ast", ")", "elif", "rule_object", "==", "\"parent_function\"", "and", "parent_function", ":", "log", ".", "debug", "(", "f\"6: {subject} {parent_function}\"", ")", "edge_ast", "=", "BELAst", "(", "subject", ",", "rule_relation", ",", "parent_function", ",", "spec", ")", "edges", ".", "append", "(", "edge_ast", ")", "# Recursively process every element by processing BELAst and Functions", "if", "hasattr", "(", "ast", ",", "\"args\"", ")", ":", "for", "arg", "in", "ast", ".", "args", ":", "process_rule", "(", "edges", ",", "arg", ",", "rule", ",", "spec", ")" ]
Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule
[ "Process", "computed", "edge", "rule" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/computed.py#L151-L224
belbio/bel
bel/resources/ortholog.py
load_orthologs
def load_orthologs(fo: IO, metadata: dict): """Load orthologs into ArangoDB Args: fo: file obj - orthologs file metadata: dict containing the metadata for orthologs """ version = metadata["metadata"]["version"] # LOAD ORTHOLOGS INTO ArangoDB with timy.Timer("Load Orthologs") as timer: arango_client = arangodb.get_client() belns_db = arangodb.get_belns_handle(arango_client) arangodb.batch_load_docs( belns_db, orthologs_iterator(fo, version), on_duplicate="update" ) log.info( "Load orthologs", elapsed=timer.elapsed, source=metadata["metadata"]["source"], ) # Clean up old entries remove_old_ortholog_edges = f""" FOR edge in ortholog_edges FILTER edge.source == "{metadata["metadata"]["source"]}" FILTER edge.version != "{version}" REMOVE edge IN ortholog_edges """ remove_old_ortholog_nodes = f""" FOR node in ortholog_nodes FILTER node.source == "{metadata["metadata"]["source"]}" FILTER node.version != "{version}" REMOVE node IN ortholog_nodes """ arangodb.aql_query(belns_db, remove_old_ortholog_edges) arangodb.aql_query(belns_db, remove_old_ortholog_nodes) # Add metadata to resource metadata collection metadata["_key"] = f"Orthologs_{metadata['metadata']['source']}" try: belns_db.collection(arangodb.belns_metadata_name).insert(metadata) except ArangoError as ae: belns_db.collection(arangodb.belns_metadata_name).replace(metadata)
python
def load_orthologs(fo: IO, metadata: dict): """Load orthologs into ArangoDB Args: fo: file obj - orthologs file metadata: dict containing the metadata for orthologs """ version = metadata["metadata"]["version"] # LOAD ORTHOLOGS INTO ArangoDB with timy.Timer("Load Orthologs") as timer: arango_client = arangodb.get_client() belns_db = arangodb.get_belns_handle(arango_client) arangodb.batch_load_docs( belns_db, orthologs_iterator(fo, version), on_duplicate="update" ) log.info( "Load orthologs", elapsed=timer.elapsed, source=metadata["metadata"]["source"], ) # Clean up old entries remove_old_ortholog_edges = f""" FOR edge in ortholog_edges FILTER edge.source == "{metadata["metadata"]["source"]}" FILTER edge.version != "{version}" REMOVE edge IN ortholog_edges """ remove_old_ortholog_nodes = f""" FOR node in ortholog_nodes FILTER node.source == "{metadata["metadata"]["source"]}" FILTER node.version != "{version}" REMOVE node IN ortholog_nodes """ arangodb.aql_query(belns_db, remove_old_ortholog_edges) arangodb.aql_query(belns_db, remove_old_ortholog_nodes) # Add metadata to resource metadata collection metadata["_key"] = f"Orthologs_{metadata['metadata']['source']}" try: belns_db.collection(arangodb.belns_metadata_name).insert(metadata) except ArangoError as ae: belns_db.collection(arangodb.belns_metadata_name).replace(metadata)
[ "def", "load_orthologs", "(", "fo", ":", "IO", ",", "metadata", ":", "dict", ")", ":", "version", "=", "metadata", "[", "\"metadata\"", "]", "[", "\"version\"", "]", "# LOAD ORTHOLOGS INTO ArangoDB", "with", "timy", ".", "Timer", "(", "\"Load Orthologs\"", ")", "as", "timer", ":", "arango_client", "=", "arangodb", ".", "get_client", "(", ")", "belns_db", "=", "arangodb", ".", "get_belns_handle", "(", "arango_client", ")", "arangodb", ".", "batch_load_docs", "(", "belns_db", ",", "orthologs_iterator", "(", "fo", ",", "version", ")", ",", "on_duplicate", "=", "\"update\"", ")", "log", ".", "info", "(", "\"Load orthologs\"", ",", "elapsed", "=", "timer", ".", "elapsed", ",", "source", "=", "metadata", "[", "\"metadata\"", "]", "[", "\"source\"", "]", ",", ")", "# Clean up old entries", "remove_old_ortholog_edges", "=", "f\"\"\"\n FOR edge in ortholog_edges\n FILTER edge.source == \"{metadata[\"metadata\"][\"source\"]}\"\n FILTER edge.version != \"{version}\"\n REMOVE edge IN ortholog_edges\n \"\"\"", "remove_old_ortholog_nodes", "=", "f\"\"\"\n FOR node in ortholog_nodes\n FILTER node.source == \"{metadata[\"metadata\"][\"source\"]}\"\n FILTER node.version != \"{version}\"\n REMOVE node IN ortholog_nodes\n \"\"\"", "arangodb", ".", "aql_query", "(", "belns_db", ",", "remove_old_ortholog_edges", ")", "arangodb", ".", "aql_query", "(", "belns_db", ",", "remove_old_ortholog_nodes", ")", "# Add metadata to resource metadata collection", "metadata", "[", "\"_key\"", "]", "=", "f\"Orthologs_{metadata['metadata']['source']}\"", "try", ":", "belns_db", ".", "collection", "(", "arangodb", ".", "belns_metadata_name", ")", ".", "insert", "(", "metadata", ")", "except", "ArangoError", "as", "ae", ":", "belns_db", ".", "collection", "(", "arangodb", ".", "belns_metadata_name", ")", ".", "replace", "(", "metadata", ")" ]
Load orthologs into ArangoDB Args: fo: file obj - orthologs file metadata: dict containing the metadata for orthologs
[ "Load", "orthologs", "into", "ArangoDB" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/ortholog.py#L19-L64
belbio/bel
bel/resources/ortholog.py
orthologs_iterator
def orthologs_iterator(fo, version): """Ortholog node and edge iterator""" species_list = config["bel_resources"].get("species_list", []) fo.seek(0) with gzip.open(fo, "rt") as f: for line in f: edge = json.loads(line) if "metadata" in edge: source = edge["metadata"]["source"] continue if "ortholog" in edge: edge = edge["ortholog"] subj_tax_id = edge["subject"]["tax_id"] obj_tax_id = edge["object"]["tax_id"] # Skip if species not listed in species_list if species_list and subj_tax_id and subj_tax_id not in species_list: continue if species_list and obj_tax_id and obj_tax_id not in species_list: continue # Converted to ArangoDB legal chars for _key subj_key = arangodb.arango_id_to_key(edge["subject"]["id"]) subj_id = edge["subject"]["id"] # Converted to ArangoDB legal chars for _key obj_key = arangodb.arango_id_to_key(edge["object"]["id"]) obj_id = edge["object"]["id"] # Subject node yield ( arangodb.ortholog_nodes_name, { "_key": subj_key, "name": subj_id, "tax_id": edge["subject"]["tax_id"], "source": source, "version": version, }, ) # Object node yield ( arangodb.ortholog_nodes_name, { "_key": obj_key, "name": obj_id, "tax_id": edge["object"]["tax_id"], "source": source, "version": version, }, ) arango_edge = { "_from": f"{arangodb.ortholog_nodes_name}/{subj_key}", "_to": f"{arangodb.ortholog_nodes_name}/{obj_key}", "_key": bel.utils._create_hash(f"{subj_id}>>{obj_id}"), "type": "ortholog_to", "source": source, "version": version, } yield (arangodb.ortholog_edges_name, arango_edge)
python
def orthologs_iterator(fo, version): """Ortholog node and edge iterator""" species_list = config["bel_resources"].get("species_list", []) fo.seek(0) with gzip.open(fo, "rt") as f: for line in f: edge = json.loads(line) if "metadata" in edge: source = edge["metadata"]["source"] continue if "ortholog" in edge: edge = edge["ortholog"] subj_tax_id = edge["subject"]["tax_id"] obj_tax_id = edge["object"]["tax_id"] # Skip if species not listed in species_list if species_list and subj_tax_id and subj_tax_id not in species_list: continue if species_list and obj_tax_id and obj_tax_id not in species_list: continue # Converted to ArangoDB legal chars for _key subj_key = arangodb.arango_id_to_key(edge["subject"]["id"]) subj_id = edge["subject"]["id"] # Converted to ArangoDB legal chars for _key obj_key = arangodb.arango_id_to_key(edge["object"]["id"]) obj_id = edge["object"]["id"] # Subject node yield ( arangodb.ortholog_nodes_name, { "_key": subj_key, "name": subj_id, "tax_id": edge["subject"]["tax_id"], "source": source, "version": version, }, ) # Object node yield ( arangodb.ortholog_nodes_name, { "_key": obj_key, "name": obj_id, "tax_id": edge["object"]["tax_id"], "source": source, "version": version, }, ) arango_edge = { "_from": f"{arangodb.ortholog_nodes_name}/{subj_key}", "_to": f"{arangodb.ortholog_nodes_name}/{obj_key}", "_key": bel.utils._create_hash(f"{subj_id}>>{obj_id}"), "type": "ortholog_to", "source": source, "version": version, } yield (arangodb.ortholog_edges_name, arango_edge)
[ "def", "orthologs_iterator", "(", "fo", ",", "version", ")", ":", "species_list", "=", "config", "[", "\"bel_resources\"", "]", ".", "get", "(", "\"species_list\"", ",", "[", "]", ")", "fo", ".", "seek", "(", "0", ")", "with", "gzip", ".", "open", "(", "fo", ",", "\"rt\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "edge", "=", "json", ".", "loads", "(", "line", ")", "if", "\"metadata\"", "in", "edge", ":", "source", "=", "edge", "[", "\"metadata\"", "]", "[", "\"source\"", "]", "continue", "if", "\"ortholog\"", "in", "edge", ":", "edge", "=", "edge", "[", "\"ortholog\"", "]", "subj_tax_id", "=", "edge", "[", "\"subject\"", "]", "[", "\"tax_id\"", "]", "obj_tax_id", "=", "edge", "[", "\"object\"", "]", "[", "\"tax_id\"", "]", "# Skip if species not listed in species_list", "if", "species_list", "and", "subj_tax_id", "and", "subj_tax_id", "not", "in", "species_list", ":", "continue", "if", "species_list", "and", "obj_tax_id", "and", "obj_tax_id", "not", "in", "species_list", ":", "continue", "# Converted to ArangoDB legal chars for _key", "subj_key", "=", "arangodb", ".", "arango_id_to_key", "(", "edge", "[", "\"subject\"", "]", "[", "\"id\"", "]", ")", "subj_id", "=", "edge", "[", "\"subject\"", "]", "[", "\"id\"", "]", "# Converted to ArangoDB legal chars for _key", "obj_key", "=", "arangodb", ".", "arango_id_to_key", "(", "edge", "[", "\"object\"", "]", "[", "\"id\"", "]", ")", "obj_id", "=", "edge", "[", "\"object\"", "]", "[", "\"id\"", "]", "# Subject node", "yield", "(", "arangodb", ".", "ortholog_nodes_name", ",", "{", "\"_key\"", ":", "subj_key", ",", "\"name\"", ":", "subj_id", ",", "\"tax_id\"", ":", "edge", "[", "\"subject\"", "]", "[", "\"tax_id\"", "]", ",", "\"source\"", ":", "source", ",", "\"version\"", ":", "version", ",", "}", ",", ")", "# Object node", "yield", "(", "arangodb", ".", "ortholog_nodes_name", ",", "{", "\"_key\"", ":", "obj_key", ",", "\"name\"", ":", "obj_id", ",", "\"tax_id\"", ":", "edge", "[", "\"object\"", "]", "[", "\"tax_id\"", "]", ",", "\"source\"", ":", "source", ",", "\"version\"", ":", "version", ",", "}", ",", ")", "arango_edge", "=", "{", "\"_from\"", ":", "f\"{arangodb.ortholog_nodes_name}/{subj_key}\"", ",", "\"_to\"", ":", "f\"{arangodb.ortholog_nodes_name}/{obj_key}\"", ",", "\"_key\"", ":", "bel", ".", "utils", ".", "_create_hash", "(", "f\"{subj_id}>>{obj_id}\"", ")", ",", "\"type\"", ":", "\"ortholog_to\"", ",", "\"source\"", ":", "source", ",", "\"version\"", ":", "version", ",", "}", "yield", "(", "arangodb", ".", "ortholog_edges_name", ",", "arango_edge", ")" ]
Ortholog node and edge iterator
[ "Ortholog", "node", "and", "edge", "iterator" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/ortholog.py#L67-L131
belbio/bel
bel/edge/edges.py
nanopub_to_edges
def nanopub_to_edges(nanopub: dict = {}, rules: List[str] = [], orthologize_targets: list = []): """Process nanopub into edges and load into EdgeStore Args: nanopub: BEL Nanopub rules: list of compute rules to process orthologize_targets: list of species in TAX:<int> format Returns: list: of edges Edge object: { "edge": { "subject": { "name": subj_canon, "name_lc": subj_canon.lower(), "label": subj_lbl, "label_lc": subj_lbl.lower(), "components": subj_components, }, "relation": { # relation _key is based on a hash "relation": edge_ast.bel_relation, "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub_id, "citation": citation, "subject_canon": subj_canon, "subject": subj_lbl, "object_canon": obj_canon, "object": obj_lbl, "annotations": nanopub['annotations'], "metadata": nanopub['metadata'], "public_flag": True, # will be added when groups/permissions feature is finished, "edge_types": edge_types, }, 'object': { "name": obj_canon, "name_lc": obj_canon.lower(), "label": obj_lbl, "label_lc": obj_lbl.lower(), "components": obj_components, } } } """ # Collect input values #################################################### nanopub_url = nanopub.get("source_url", "") edge_dt = utils.dt_utc_formatted() # don't want this in relation_id # Extract BEL Version and make sure we can process this if nanopub["nanopub"]["type"]["name"].upper() == "BEL": bel_version = nanopub["nanopub"]["type"]["version"] versions = bel.lang.bel_specification.get_bel_versions() if bel_version not in versions: log.error( f"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}" ) return [] else: log.error( f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}" ) return [] # Required for BEL parsing/canonicalization/orthologization api_url = config["bel_api"]["servers"]["api_url"] try: citation_string = normalize_nanopub_citation(nanopub) except Exception as e: log.error(f"Could not create citation string for {nanopub_url}") citation_string = "" if orthologize_targets == []: if config["bel_api"].get("edges", None): orthologize_targets = config["bel_api"]["edges"].get("orthologize_targets", []) # orig_species_id = [anno['id'] for anno in nanopub['nanopub']['annotations'] if anno['type'] == 'Species'] # if orig_species_id: # orig_species_id = orig_species_id[0] master_annotations = copy.deepcopy(nanopub["nanopub"]["annotations"]) master_metadata = copy.deepcopy(nanopub["nanopub"]["metadata"]) master_metadata.pop("gd_abstract", None) nanopub_type = nanopub["nanopub"]["metadata"].get("nanopub_type") # Create Edge Assertion Info ############################################## # r = generate_assertion_edge_info(nanopub['nanopub']['assertions'], orig_species_id, orthologize_targets, bel_version, api_url, nanopub_type) r = generate_assertion_edge_info( nanopub["nanopub"]["assertions"], orthologize_targets, bel_version, api_url, nanopub_type ) edge_info_list = r["edge_info_list"] # Build Edges ############################################################# edges = [] errors = [] for edge_info in edge_info_list: annotations = copy.deepcopy(master_annotations) metadata = copy.deepcopy(master_metadata) errors.extend(edge_info["errors"]) if not edge_info.get("canonical"): continue # TODO - remove this # if edge_info.get('species_id', False): # annotations = orthologize_context(edge_info['species_id'], annotations) edge_hash = utils._create_hash( f'{edge_info["canonical"]["subject"]} {edge_info["canonical"]["relation"]} {edge_info["canonical"]["object"]}' ) edge = { "edge": { "subject": { "name": edge_info["canonical"]["subject"], "name_lc": edge_info["canonical"]["subject"].lower(), "label": edge_info["decanonical"]["subject"], "label_lc": edge_info["decanonical"]["subject"].lower(), "components": edge_info["subject_comp"], }, "relation": { "relation": edge_info["canonical"]["relation"], "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub["nanopub"]["id"], "citation": citation_string, "subject_canon": edge_info["canonical"]["subject"], "subject": edge_info["decanonical"]["subject"], "object_canon": edge_info["canonical"]["object"], "object": edge_info["decanonical"]["object"], "annotations": copy.deepcopy(annotations), "metadata": copy.deepcopy(metadata), "public_flag": True, "edge_types": edge_info["edge_types"], "species_id": edge_info["species_id"], "species_label": edge_info["species_label"], }, "object": { "name": edge_info["canonical"]["object"], "name_lc": edge_info["canonical"]["object"].lower(), "label": edge_info["decanonical"]["object"], "label_lc": edge_info["decanonical"]["object"].lower(), "components": edge_info["object_comp"], }, } } edges.append(copy.deepcopy(edge)) return { "edges": edges, "nanopub_id": nanopub["nanopub"]["id"], "nanopub_url": nanopub_url, "success": True, "errors": errors, }
python
def nanopub_to_edges(nanopub: dict = {}, rules: List[str] = [], orthologize_targets: list = []): """Process nanopub into edges and load into EdgeStore Args: nanopub: BEL Nanopub rules: list of compute rules to process orthologize_targets: list of species in TAX:<int> format Returns: list: of edges Edge object: { "edge": { "subject": { "name": subj_canon, "name_lc": subj_canon.lower(), "label": subj_lbl, "label_lc": subj_lbl.lower(), "components": subj_components, }, "relation": { # relation _key is based on a hash "relation": edge_ast.bel_relation, "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub_id, "citation": citation, "subject_canon": subj_canon, "subject": subj_lbl, "object_canon": obj_canon, "object": obj_lbl, "annotations": nanopub['annotations'], "metadata": nanopub['metadata'], "public_flag": True, # will be added when groups/permissions feature is finished, "edge_types": edge_types, }, 'object': { "name": obj_canon, "name_lc": obj_canon.lower(), "label": obj_lbl, "label_lc": obj_lbl.lower(), "components": obj_components, } } } """ # Collect input values #################################################### nanopub_url = nanopub.get("source_url", "") edge_dt = utils.dt_utc_formatted() # don't want this in relation_id # Extract BEL Version and make sure we can process this if nanopub["nanopub"]["type"]["name"].upper() == "BEL": bel_version = nanopub["nanopub"]["type"]["version"] versions = bel.lang.bel_specification.get_bel_versions() if bel_version not in versions: log.error( f"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}" ) return [] else: log.error( f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}" ) return [] # Required for BEL parsing/canonicalization/orthologization api_url = config["bel_api"]["servers"]["api_url"] try: citation_string = normalize_nanopub_citation(nanopub) except Exception as e: log.error(f"Could not create citation string for {nanopub_url}") citation_string = "" if orthologize_targets == []: if config["bel_api"].get("edges", None): orthologize_targets = config["bel_api"]["edges"].get("orthologize_targets", []) # orig_species_id = [anno['id'] for anno in nanopub['nanopub']['annotations'] if anno['type'] == 'Species'] # if orig_species_id: # orig_species_id = orig_species_id[0] master_annotations = copy.deepcopy(nanopub["nanopub"]["annotations"]) master_metadata = copy.deepcopy(nanopub["nanopub"]["metadata"]) master_metadata.pop("gd_abstract", None) nanopub_type = nanopub["nanopub"]["metadata"].get("nanopub_type") # Create Edge Assertion Info ############################################## # r = generate_assertion_edge_info(nanopub['nanopub']['assertions'], orig_species_id, orthologize_targets, bel_version, api_url, nanopub_type) r = generate_assertion_edge_info( nanopub["nanopub"]["assertions"], orthologize_targets, bel_version, api_url, nanopub_type ) edge_info_list = r["edge_info_list"] # Build Edges ############################################################# edges = [] errors = [] for edge_info in edge_info_list: annotations = copy.deepcopy(master_annotations) metadata = copy.deepcopy(master_metadata) errors.extend(edge_info["errors"]) if not edge_info.get("canonical"): continue # TODO - remove this # if edge_info.get('species_id', False): # annotations = orthologize_context(edge_info['species_id'], annotations) edge_hash = utils._create_hash( f'{edge_info["canonical"]["subject"]} {edge_info["canonical"]["relation"]} {edge_info["canonical"]["object"]}' ) edge = { "edge": { "subject": { "name": edge_info["canonical"]["subject"], "name_lc": edge_info["canonical"]["subject"].lower(), "label": edge_info["decanonical"]["subject"], "label_lc": edge_info["decanonical"]["subject"].lower(), "components": edge_info["subject_comp"], }, "relation": { "relation": edge_info["canonical"]["relation"], "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub["nanopub"]["id"], "citation": citation_string, "subject_canon": edge_info["canonical"]["subject"], "subject": edge_info["decanonical"]["subject"], "object_canon": edge_info["canonical"]["object"], "object": edge_info["decanonical"]["object"], "annotations": copy.deepcopy(annotations), "metadata": copy.deepcopy(metadata), "public_flag": True, "edge_types": edge_info["edge_types"], "species_id": edge_info["species_id"], "species_label": edge_info["species_label"], }, "object": { "name": edge_info["canonical"]["object"], "name_lc": edge_info["canonical"]["object"].lower(), "label": edge_info["decanonical"]["object"], "label_lc": edge_info["decanonical"]["object"].lower(), "components": edge_info["object_comp"], }, } } edges.append(copy.deepcopy(edge)) return { "edges": edges, "nanopub_id": nanopub["nanopub"]["id"], "nanopub_url": nanopub_url, "success": True, "errors": errors, }
[ "def", "nanopub_to_edges", "(", "nanopub", ":", "dict", "=", "{", "}", ",", "rules", ":", "List", "[", "str", "]", "=", "[", "]", ",", "orthologize_targets", ":", "list", "=", "[", "]", ")", ":", "# Collect input values ####################################################", "nanopub_url", "=", "nanopub", ".", "get", "(", "\"source_url\"", ",", "\"\"", ")", "edge_dt", "=", "utils", ".", "dt_utc_formatted", "(", ")", "# don't want this in relation_id", "# Extract BEL Version and make sure we can process this", "if", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", "[", "\"name\"", "]", ".", "upper", "(", ")", "==", "\"BEL\"", ":", "bel_version", "=", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", "[", "\"version\"", "]", "versions", "=", "bel", ".", "lang", ".", "bel_specification", ".", "get_bel_versions", "(", ")", "if", "bel_version", "not", "in", "versions", ":", "log", ".", "error", "(", "f\"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}\"", ")", "return", "[", "]", "else", ":", "log", ".", "error", "(", "f\"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}\"", ")", "return", "[", "]", "# Required for BEL parsing/canonicalization/orthologization", "api_url", "=", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"api_url\"", "]", "try", ":", "citation_string", "=", "normalize_nanopub_citation", "(", "nanopub", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Could not create citation string for {nanopub_url}\"", ")", "citation_string", "=", "\"\"", "if", "orthologize_targets", "==", "[", "]", ":", "if", "config", "[", "\"bel_api\"", "]", ".", "get", "(", "\"edges\"", ",", "None", ")", ":", "orthologize_targets", "=", "config", "[", "\"bel_api\"", "]", "[", "\"edges\"", "]", ".", "get", "(", "\"orthologize_targets\"", ",", "[", "]", ")", "# orig_species_id = [anno['id'] for anno in nanopub['nanopub']['annotations'] if anno['type'] == 'Species']", "# if orig_species_id:", "# orig_species_id = orig_species_id[0]", "master_annotations", "=", "copy", ".", "deepcopy", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"annotations\"", "]", ")", "master_metadata", "=", "copy", ".", "deepcopy", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"metadata\"", "]", ")", "master_metadata", ".", "pop", "(", "\"gd_abstract\"", ",", "None", ")", "nanopub_type", "=", "nanopub", "[", "\"nanopub\"", "]", "[", "\"metadata\"", "]", ".", "get", "(", "\"nanopub_type\"", ")", "# Create Edge Assertion Info ##############################################", "# r = generate_assertion_edge_info(nanopub['nanopub']['assertions'], orig_species_id, orthologize_targets, bel_version, api_url, nanopub_type)", "r", "=", "generate_assertion_edge_info", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"assertions\"", "]", ",", "orthologize_targets", ",", "bel_version", ",", "api_url", ",", "nanopub_type", ")", "edge_info_list", "=", "r", "[", "\"edge_info_list\"", "]", "# Build Edges #############################################################", "edges", "=", "[", "]", "errors", "=", "[", "]", "for", "edge_info", "in", "edge_info_list", ":", "annotations", "=", "copy", ".", "deepcopy", "(", "master_annotations", ")", "metadata", "=", "copy", ".", "deepcopy", "(", "master_metadata", ")", "errors", ".", "extend", "(", "edge_info", "[", "\"errors\"", "]", ")", "if", "not", "edge_info", ".", "get", "(", "\"canonical\"", ")", ":", "continue", "# TODO - remove this", "# if edge_info.get('species_id', False):", "# annotations = orthologize_context(edge_info['species_id'], annotations)", "edge_hash", "=", "utils", ".", "_create_hash", "(", "f'{edge_info[\"canonical\"][\"subject\"]} {edge_info[\"canonical\"][\"relation\"]} {edge_info[\"canonical\"][\"object\"]}'", ")", "edge", "=", "{", "\"edge\"", ":", "{", "\"subject\"", ":", "{", "\"name\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"subject\"", "]", ",", "\"name_lc\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"subject\"", "]", ".", "lower", "(", ")", ",", "\"label\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"subject\"", "]", ",", "\"label_lc\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"subject\"", "]", ".", "lower", "(", ")", ",", "\"components\"", ":", "edge_info", "[", "\"subject_comp\"", "]", ",", "}", ",", "\"relation\"", ":", "{", "\"relation\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"relation\"", "]", ",", "\"edge_hash\"", ":", "edge_hash", ",", "\"edge_dt\"", ":", "edge_dt", ",", "\"nanopub_url\"", ":", "nanopub_url", ",", "\"nanopub_id\"", ":", "nanopub", "[", "\"nanopub\"", "]", "[", "\"id\"", "]", ",", "\"citation\"", ":", "citation_string", ",", "\"subject_canon\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"subject\"", "]", ",", "\"subject\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"subject\"", "]", ",", "\"object_canon\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"object\"", "]", ",", "\"object\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"object\"", "]", ",", "\"annotations\"", ":", "copy", ".", "deepcopy", "(", "annotations", ")", ",", "\"metadata\"", ":", "copy", ".", "deepcopy", "(", "metadata", ")", ",", "\"public_flag\"", ":", "True", ",", "\"edge_types\"", ":", "edge_info", "[", "\"edge_types\"", "]", ",", "\"species_id\"", ":", "edge_info", "[", "\"species_id\"", "]", ",", "\"species_label\"", ":", "edge_info", "[", "\"species_label\"", "]", ",", "}", ",", "\"object\"", ":", "{", "\"name\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"object\"", "]", ",", "\"name_lc\"", ":", "edge_info", "[", "\"canonical\"", "]", "[", "\"object\"", "]", ".", "lower", "(", ")", ",", "\"label\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"object\"", "]", ",", "\"label_lc\"", ":", "edge_info", "[", "\"decanonical\"", "]", "[", "\"object\"", "]", ".", "lower", "(", ")", ",", "\"components\"", ":", "edge_info", "[", "\"object_comp\"", "]", ",", "}", ",", "}", "}", "edges", ".", "append", "(", "copy", ".", "deepcopy", "(", "edge", ")", ")", "return", "{", "\"edges\"", ":", "edges", ",", "\"nanopub_id\"", ":", "nanopub", "[", "\"nanopub\"", "]", "[", "\"id\"", "]", ",", "\"nanopub_url\"", ":", "nanopub_url", ",", "\"success\"", ":", "True", ",", "\"errors\"", ":", "errors", ",", "}" ]
Process nanopub into edges and load into EdgeStore Args: nanopub: BEL Nanopub rules: list of compute rules to process orthologize_targets: list of species in TAX:<int> format Returns: list: of edges Edge object: { "edge": { "subject": { "name": subj_canon, "name_lc": subj_canon.lower(), "label": subj_lbl, "label_lc": subj_lbl.lower(), "components": subj_components, }, "relation": { # relation _key is based on a hash "relation": edge_ast.bel_relation, "edge_hash": edge_hash, "edge_dt": edge_dt, "nanopub_url": nanopub_url, "nanopub_id": nanopub_id, "citation": citation, "subject_canon": subj_canon, "subject": subj_lbl, "object_canon": obj_canon, "object": obj_lbl, "annotations": nanopub['annotations'], "metadata": nanopub['metadata'], "public_flag": True, # will be added when groups/permissions feature is finished, "edge_types": edge_types, }, 'object': { "name": obj_canon, "name_lc": obj_canon.lower(), "label": obj_lbl, "label_lc": obj_lbl.lower(), "components": obj_components, } } }
[ "Process", "nanopub", "into", "edges", "and", "load", "into", "EdgeStore" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/edges.py#L33-L196
belbio/bel
bel/edge/edges.py
extract_ast_species
def extract_ast_species(ast): """Extract species from ast.species set of tuples (id, label)""" species_id = "None" species_label = "None" species = [ (species_id, species_label) for (species_id, species_label) in ast.species if species_id ] if len(species) == 1: (species_id, species_label) = species[0] if not species_id: species_id = "None" species_label = "None" log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}") return (species_id, species_label)
python
def extract_ast_species(ast): """Extract species from ast.species set of tuples (id, label)""" species_id = "None" species_label = "None" species = [ (species_id, species_label) for (species_id, species_label) in ast.species if species_id ] if len(species) == 1: (species_id, species_label) = species[0] if not species_id: species_id = "None" species_label = "None" log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}") return (species_id, species_label)
[ "def", "extract_ast_species", "(", "ast", ")", ":", "species_id", "=", "\"None\"", "species_label", "=", "\"None\"", "species", "=", "[", "(", "species_id", ",", "species_label", ")", "for", "(", "species_id", ",", "species_label", ")", "in", "ast", ".", "species", "if", "species_id", "]", "if", "len", "(", "species", ")", "==", "1", ":", "(", "species_id", ",", "species_label", ")", "=", "species", "[", "0", "]", "if", "not", "species_id", ":", "species_id", "=", "\"None\"", "species_label", "=", "\"None\"", "log", ".", "debug", "(", "f\"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}\"", ")", "return", "(", "species_id", ",", "species_label", ")" ]
Extract species from ast.species set of tuples (id, label)
[ "Extract", "species", "from", "ast", ".", "species", "set", "of", "tuples", "(", "id", "label", ")" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/edges.py#L199-L217
belbio/bel
bel/edge/edges.py
generate_assertion_edge_info
def generate_assertion_edge_info( assertions: List[dict], orthologize_targets: List[str], bel_version: str, api_url: str, nanopub_type: str = "", ) -> dict: """Create edges (SRO) for assertions given orthologization targets Args: assertions: list of BEL statements (SRO object) orthologize_targets: list of species in TAX:<int> format bel_version: to be used for processing assertions api_url: BEL API url endpoint to use for terminologies and orthologies """ bo = bel.lang.belobj.BEL(bel_version, api_url) bo_computed = bel.lang.belobj.BEL(bel_version, api_url) edge_info_list = [] with utils.Timer() as t: for assertion in assertions: # if not assertion.get('relation', False): # continue # Skip any subject only statements start_time = t.elapsed bo.parse(assertion) if not bo.ast: errors = [ f"{error[0]} {error[1]}" for error in bo.validation_messages if error[0] == "ERROR" ] edge_info = {"errors": copy.deepcopy(errors)} edge_info_list.append(copy.deepcopy(edge_info)) continue # populate canonical terms and orthologs for assertion bo.collect_nsarg_norms() bo.collect_orthologs(orthologize_targets) log.info( "Timing - time to collect nsargs and orthologs", delta_ms=(t.elapsed - start_time) ) (edge_species_id, edge_species_label) = extract_ast_species(bo.ast) orig_species_id = edge_species_id canon = bo.canonicalize().to_triple() components = get_node_subcomponents(bo.ast) # needs to be run after canonicalization computed_asts = bo.compute_edges( ast_result=True ) # needs to be run after canonicalization decanon = bo.decanonicalize().to_triple() if nanopub_type == "backbone": edge_types = ["backbone"] else: edge_types = ["original", "primary"] if assertion.get("relation", False): causal_edge_type = [] if "causal" in bo.spec["relations"]["info"][assertion["relation"]]["categories"]: edge_types.append("causal") edge_info = { "edge_types": edge_types, "species_id": edge_species_id, "species_label": edge_species_label, "canonical": canon, "decanonical": decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } edge_info_list.append(copy.deepcopy(edge_info)) # Loop through primary computed asts for computed_ast in computed_asts: bo_computed.ast = computed_ast bo_computed.collect_nsarg_norms() canon = bo_computed.canonicalize().to_triple() components = get_node_subcomponents( bo_computed.ast ) # needs to be run after canonicalization decanon = bo_computed.decanonicalize().to_triple() (edge_species_id, edge_species_label) = extract_ast_species(bo_computed.ast) edge_info = { "edge_types": ["computed"], "species_id": edge_species_id, "species_label": edge_species_label, "canonical": canon, "decanonical": decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } if [edge for edge in edge_info_list if edge.get("canonical", {}) == canon]: continue # skip if edge is already included (i.e. the primary is same as computed edge) edge_info_list.append(copy.deepcopy(edge_info)) # Skip orthologs if backbone nanopub if nanopub_type == "backbone": continue # only process orthologs if there are species-specific NSArgs if len(bo.ast.species) > 0: # Loop through orthologs for species_id in orthologize_targets: log.debug(f"Orig species: {orig_species_id} Target species: {species_id}") if species_id == orig_species_id: continue bo.orthologize(species_id) (edge_species_id, edge_species_label) = extract_ast_species(bo.ast) if edge_species_id == "None" or edge_species_id == orig_species_id: log.debug( f'Skipping orthologization- species == "None" or {orig_species_id} ASTspecies: {bo.ast.species} for {bo}' ) continue ortho_decanon = bo.orthologize( species_id ).to_triple() # defaults to decanonicalized orthologized form ortho_canon = bo.canonicalize().to_triple() computed_asts = bo.compute_edges( ast_result=True ) # needs to be run after canonicalization components = get_node_subcomponents( bo.ast ) # needs to be run after canonicalization if assertion.get("relation", False): edge_info = { "edge_types": ["orthologized", "primary"] + causal_edge_type, "species_id": edge_species_id, "species_label": edge_species_label, "canonical": ortho_canon, "decanonical": ortho_decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } edge_info_list.append(copy.deepcopy(edge_info)) # Loop through orthologized computed asts for computed_ast in computed_asts: bo_computed.ast = computed_ast bo_computed.collect_nsarg_norms() canon = bo_computed.canonicalize().to_triple() components = get_node_subcomponents( bo_computed.ast ) # needs to be run after canonicalization decanon = bo_computed.decanonicalize().to_triple() (edge_species_id, edge_species_label) = extract_ast_species(bo_computed.ast) edge_info = { "edge_types": ["computed", "orthologized"], "species_id": edge_species_id, "species_label": edge_species_label, "canonical": canon, "decanonical": decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } if [edge for edge in edge_info_list if edge.get("canonical", {}) == canon]: continue # skip if edge is already included (i.e. the primary is same as computed edge) edge_info_list.append(copy.deepcopy(edge_info)) log.info("Timing - Generated all edge info for all nanopub assertions", delta_ms=t.elapsed) return {"edge_info_list": edge_info_list}
python
def generate_assertion_edge_info( assertions: List[dict], orthologize_targets: List[str], bel_version: str, api_url: str, nanopub_type: str = "", ) -> dict: """Create edges (SRO) for assertions given orthologization targets Args: assertions: list of BEL statements (SRO object) orthologize_targets: list of species in TAX:<int> format bel_version: to be used for processing assertions api_url: BEL API url endpoint to use for terminologies and orthologies """ bo = bel.lang.belobj.BEL(bel_version, api_url) bo_computed = bel.lang.belobj.BEL(bel_version, api_url) edge_info_list = [] with utils.Timer() as t: for assertion in assertions: # if not assertion.get('relation', False): # continue # Skip any subject only statements start_time = t.elapsed bo.parse(assertion) if not bo.ast: errors = [ f"{error[0]} {error[1]}" for error in bo.validation_messages if error[0] == "ERROR" ] edge_info = {"errors": copy.deepcopy(errors)} edge_info_list.append(copy.deepcopy(edge_info)) continue # populate canonical terms and orthologs for assertion bo.collect_nsarg_norms() bo.collect_orthologs(orthologize_targets) log.info( "Timing - time to collect nsargs and orthologs", delta_ms=(t.elapsed - start_time) ) (edge_species_id, edge_species_label) = extract_ast_species(bo.ast) orig_species_id = edge_species_id canon = bo.canonicalize().to_triple() components = get_node_subcomponents(bo.ast) # needs to be run after canonicalization computed_asts = bo.compute_edges( ast_result=True ) # needs to be run after canonicalization decanon = bo.decanonicalize().to_triple() if nanopub_type == "backbone": edge_types = ["backbone"] else: edge_types = ["original", "primary"] if assertion.get("relation", False): causal_edge_type = [] if "causal" in bo.spec["relations"]["info"][assertion["relation"]]["categories"]: edge_types.append("causal") edge_info = { "edge_types": edge_types, "species_id": edge_species_id, "species_label": edge_species_label, "canonical": canon, "decanonical": decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } edge_info_list.append(copy.deepcopy(edge_info)) # Loop through primary computed asts for computed_ast in computed_asts: bo_computed.ast = computed_ast bo_computed.collect_nsarg_norms() canon = bo_computed.canonicalize().to_triple() components = get_node_subcomponents( bo_computed.ast ) # needs to be run after canonicalization decanon = bo_computed.decanonicalize().to_triple() (edge_species_id, edge_species_label) = extract_ast_species(bo_computed.ast) edge_info = { "edge_types": ["computed"], "species_id": edge_species_id, "species_label": edge_species_label, "canonical": canon, "decanonical": decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } if [edge for edge in edge_info_list if edge.get("canonical", {}) == canon]: continue # skip if edge is already included (i.e. the primary is same as computed edge) edge_info_list.append(copy.deepcopy(edge_info)) # Skip orthologs if backbone nanopub if nanopub_type == "backbone": continue # only process orthologs if there are species-specific NSArgs if len(bo.ast.species) > 0: # Loop through orthologs for species_id in orthologize_targets: log.debug(f"Orig species: {orig_species_id} Target species: {species_id}") if species_id == orig_species_id: continue bo.orthologize(species_id) (edge_species_id, edge_species_label) = extract_ast_species(bo.ast) if edge_species_id == "None" or edge_species_id == orig_species_id: log.debug( f'Skipping orthologization- species == "None" or {orig_species_id} ASTspecies: {bo.ast.species} for {bo}' ) continue ortho_decanon = bo.orthologize( species_id ).to_triple() # defaults to decanonicalized orthologized form ortho_canon = bo.canonicalize().to_triple() computed_asts = bo.compute_edges( ast_result=True ) # needs to be run after canonicalization components = get_node_subcomponents( bo.ast ) # needs to be run after canonicalization if assertion.get("relation", False): edge_info = { "edge_types": ["orthologized", "primary"] + causal_edge_type, "species_id": edge_species_id, "species_label": edge_species_label, "canonical": ortho_canon, "decanonical": ortho_decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } edge_info_list.append(copy.deepcopy(edge_info)) # Loop through orthologized computed asts for computed_ast in computed_asts: bo_computed.ast = computed_ast bo_computed.collect_nsarg_norms() canon = bo_computed.canonicalize().to_triple() components = get_node_subcomponents( bo_computed.ast ) # needs to be run after canonicalization decanon = bo_computed.decanonicalize().to_triple() (edge_species_id, edge_species_label) = extract_ast_species(bo_computed.ast) edge_info = { "edge_types": ["computed", "orthologized"], "species_id": edge_species_id, "species_label": edge_species_label, "canonical": canon, "decanonical": decanon, "subject_comp": components["subject_comp"], "object_comp": components["object_comp"], "errors": [], } if [edge for edge in edge_info_list if edge.get("canonical", {}) == canon]: continue # skip if edge is already included (i.e. the primary is same as computed edge) edge_info_list.append(copy.deepcopy(edge_info)) log.info("Timing - Generated all edge info for all nanopub assertions", delta_ms=t.elapsed) return {"edge_info_list": edge_info_list}
[ "def", "generate_assertion_edge_info", "(", "assertions", ":", "List", "[", "dict", "]", ",", "orthologize_targets", ":", "List", "[", "str", "]", ",", "bel_version", ":", "str", ",", "api_url", ":", "str", ",", "nanopub_type", ":", "str", "=", "\"\"", ",", ")", "->", "dict", ":", "bo", "=", "bel", ".", "lang", ".", "belobj", ".", "BEL", "(", "bel_version", ",", "api_url", ")", "bo_computed", "=", "bel", ".", "lang", ".", "belobj", ".", "BEL", "(", "bel_version", ",", "api_url", ")", "edge_info_list", "=", "[", "]", "with", "utils", ".", "Timer", "(", ")", "as", "t", ":", "for", "assertion", "in", "assertions", ":", "# if not assertion.get('relation', False):", "# continue # Skip any subject only statements", "start_time", "=", "t", ".", "elapsed", "bo", ".", "parse", "(", "assertion", ")", "if", "not", "bo", ".", "ast", ":", "errors", "=", "[", "f\"{error[0]} {error[1]}\"", "for", "error", "in", "bo", ".", "validation_messages", "if", "error", "[", "0", "]", "==", "\"ERROR\"", "]", "edge_info", "=", "{", "\"errors\"", ":", "copy", ".", "deepcopy", "(", "errors", ")", "}", "edge_info_list", ".", "append", "(", "copy", ".", "deepcopy", "(", "edge_info", ")", ")", "continue", "# populate canonical terms and orthologs for assertion", "bo", ".", "collect_nsarg_norms", "(", ")", "bo", ".", "collect_orthologs", "(", "orthologize_targets", ")", "log", ".", "info", "(", "\"Timing - time to collect nsargs and orthologs\"", ",", "delta_ms", "=", "(", "t", ".", "elapsed", "-", "start_time", ")", ")", "(", "edge_species_id", ",", "edge_species_label", ")", "=", "extract_ast_species", "(", "bo", ".", "ast", ")", "orig_species_id", "=", "edge_species_id", "canon", "=", "bo", ".", "canonicalize", "(", ")", ".", "to_triple", "(", ")", "components", "=", "get_node_subcomponents", "(", "bo", ".", "ast", ")", "# needs to be run after canonicalization", "computed_asts", "=", "bo", ".", "compute_edges", "(", "ast_result", "=", "True", ")", "# needs to be run after canonicalization", "decanon", "=", "bo", ".", "decanonicalize", "(", ")", ".", "to_triple", "(", ")", "if", "nanopub_type", "==", "\"backbone\"", ":", "edge_types", "=", "[", "\"backbone\"", "]", "else", ":", "edge_types", "=", "[", "\"original\"", ",", "\"primary\"", "]", "if", "assertion", ".", "get", "(", "\"relation\"", ",", "False", ")", ":", "causal_edge_type", "=", "[", "]", "if", "\"causal\"", "in", "bo", ".", "spec", "[", "\"relations\"", "]", "[", "\"info\"", "]", "[", "assertion", "[", "\"relation\"", "]", "]", "[", "\"categories\"", "]", ":", "edge_types", ".", "append", "(", "\"causal\"", ")", "edge_info", "=", "{", "\"edge_types\"", ":", "edge_types", ",", "\"species_id\"", ":", "edge_species_id", ",", "\"species_label\"", ":", "edge_species_label", ",", "\"canonical\"", ":", "canon", ",", "\"decanonical\"", ":", "decanon", ",", "\"subject_comp\"", ":", "components", "[", "\"subject_comp\"", "]", ",", "\"object_comp\"", ":", "components", "[", "\"object_comp\"", "]", ",", "\"errors\"", ":", "[", "]", ",", "}", "edge_info_list", ".", "append", "(", "copy", ".", "deepcopy", "(", "edge_info", ")", ")", "# Loop through primary computed asts", "for", "computed_ast", "in", "computed_asts", ":", "bo_computed", ".", "ast", "=", "computed_ast", "bo_computed", ".", "collect_nsarg_norms", "(", ")", "canon", "=", "bo_computed", ".", "canonicalize", "(", ")", ".", "to_triple", "(", ")", "components", "=", "get_node_subcomponents", "(", "bo_computed", ".", "ast", ")", "# needs to be run after canonicalization", "decanon", "=", "bo_computed", ".", "decanonicalize", "(", ")", ".", "to_triple", "(", ")", "(", "edge_species_id", ",", "edge_species_label", ")", "=", "extract_ast_species", "(", "bo_computed", ".", "ast", ")", "edge_info", "=", "{", "\"edge_types\"", ":", "[", "\"computed\"", "]", ",", "\"species_id\"", ":", "edge_species_id", ",", "\"species_label\"", ":", "edge_species_label", ",", "\"canonical\"", ":", "canon", ",", "\"decanonical\"", ":", "decanon", ",", "\"subject_comp\"", ":", "components", "[", "\"subject_comp\"", "]", ",", "\"object_comp\"", ":", "components", "[", "\"object_comp\"", "]", ",", "\"errors\"", ":", "[", "]", ",", "}", "if", "[", "edge", "for", "edge", "in", "edge_info_list", "if", "edge", ".", "get", "(", "\"canonical\"", ",", "{", "}", ")", "==", "canon", "]", ":", "continue", "# skip if edge is already included (i.e. the primary is same as computed edge)", "edge_info_list", ".", "append", "(", "copy", ".", "deepcopy", "(", "edge_info", ")", ")", "# Skip orthologs if backbone nanopub", "if", "nanopub_type", "==", "\"backbone\"", ":", "continue", "# only process orthologs if there are species-specific NSArgs", "if", "len", "(", "bo", ".", "ast", ".", "species", ")", ">", "0", ":", "# Loop through orthologs", "for", "species_id", "in", "orthologize_targets", ":", "log", ".", "debug", "(", "f\"Orig species: {orig_species_id} Target species: {species_id}\"", ")", "if", "species_id", "==", "orig_species_id", ":", "continue", "bo", ".", "orthologize", "(", "species_id", ")", "(", "edge_species_id", ",", "edge_species_label", ")", "=", "extract_ast_species", "(", "bo", ".", "ast", ")", "if", "edge_species_id", "==", "\"None\"", "or", "edge_species_id", "==", "orig_species_id", ":", "log", ".", "debug", "(", "f'Skipping orthologization- species == \"None\" or {orig_species_id} ASTspecies: {bo.ast.species} for {bo}'", ")", "continue", "ortho_decanon", "=", "bo", ".", "orthologize", "(", "species_id", ")", ".", "to_triple", "(", ")", "# defaults to decanonicalized orthologized form", "ortho_canon", "=", "bo", ".", "canonicalize", "(", ")", ".", "to_triple", "(", ")", "computed_asts", "=", "bo", ".", "compute_edges", "(", "ast_result", "=", "True", ")", "# needs to be run after canonicalization", "components", "=", "get_node_subcomponents", "(", "bo", ".", "ast", ")", "# needs to be run after canonicalization", "if", "assertion", ".", "get", "(", "\"relation\"", ",", "False", ")", ":", "edge_info", "=", "{", "\"edge_types\"", ":", "[", "\"orthologized\"", ",", "\"primary\"", "]", "+", "causal_edge_type", ",", "\"species_id\"", ":", "edge_species_id", ",", "\"species_label\"", ":", "edge_species_label", ",", "\"canonical\"", ":", "ortho_canon", ",", "\"decanonical\"", ":", "ortho_decanon", ",", "\"subject_comp\"", ":", "components", "[", "\"subject_comp\"", "]", ",", "\"object_comp\"", ":", "components", "[", "\"object_comp\"", "]", ",", "\"errors\"", ":", "[", "]", ",", "}", "edge_info_list", ".", "append", "(", "copy", ".", "deepcopy", "(", "edge_info", ")", ")", "# Loop through orthologized computed asts", "for", "computed_ast", "in", "computed_asts", ":", "bo_computed", ".", "ast", "=", "computed_ast", "bo_computed", ".", "collect_nsarg_norms", "(", ")", "canon", "=", "bo_computed", ".", "canonicalize", "(", ")", ".", "to_triple", "(", ")", "components", "=", "get_node_subcomponents", "(", "bo_computed", ".", "ast", ")", "# needs to be run after canonicalization", "decanon", "=", "bo_computed", ".", "decanonicalize", "(", ")", ".", "to_triple", "(", ")", "(", "edge_species_id", ",", "edge_species_label", ")", "=", "extract_ast_species", "(", "bo_computed", ".", "ast", ")", "edge_info", "=", "{", "\"edge_types\"", ":", "[", "\"computed\"", ",", "\"orthologized\"", "]", ",", "\"species_id\"", ":", "edge_species_id", ",", "\"species_label\"", ":", "edge_species_label", ",", "\"canonical\"", ":", "canon", ",", "\"decanonical\"", ":", "decanon", ",", "\"subject_comp\"", ":", "components", "[", "\"subject_comp\"", "]", ",", "\"object_comp\"", ":", "components", "[", "\"object_comp\"", "]", ",", "\"errors\"", ":", "[", "]", ",", "}", "if", "[", "edge", "for", "edge", "in", "edge_info_list", "if", "edge", ".", "get", "(", "\"canonical\"", ",", "{", "}", ")", "==", "canon", "]", ":", "continue", "# skip if edge is already included (i.e. the primary is same as computed edge)", "edge_info_list", ".", "append", "(", "copy", ".", "deepcopy", "(", "edge_info", ")", ")", "log", ".", "info", "(", "\"Timing - Generated all edge info for all nanopub assertions\"", ",", "delta_ms", "=", "t", ".", "elapsed", ")", "return", "{", "\"edge_info_list\"", ":", "edge_info_list", "}" ]
Create edges (SRO) for assertions given orthologization targets Args: assertions: list of BEL statements (SRO object) orthologize_targets: list of species in TAX:<int> format bel_version: to be used for processing assertions api_url: BEL API url endpoint to use for terminologies and orthologies
[ "Create", "edges", "(", "SRO", ")", "for", "assertions", "given", "orthologization", "targets" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/edges.py#L221-L403
belbio/bel
bel/edge/edges.py
orthologize_context
def orthologize_context( orthologize_target: str, annotations: Mapping[str, Any] ) -> Mapping[str, Any]: """Orthologize context Replace Species context with new orthologize target and add a annotation type of OrthologizedFrom """ url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{orthologize_target}' r = utils.get_url(url) species_label = r.json().get("label", "unlabeled") orthologized_from = {} for idx, annotation in enumerate(annotations): if annotation["type"] == "Species": orthologized_from = {"id": annotation["id"], "label": annotation["label"]} annotations[idx] = {"type": "Species", "id": orthologize_target, "label": species_label} if "id" in orthologized_from: annotations.append( { "type": "OrigSpecies", "id": f'Orig-{orthologized_from["id"]}', "label": f'Orig-{orthologized_from["label"]}', } ) return annotations
python
def orthologize_context( orthologize_target: str, annotations: Mapping[str, Any] ) -> Mapping[str, Any]: """Orthologize context Replace Species context with new orthologize target and add a annotation type of OrthologizedFrom """ url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{orthologize_target}' r = utils.get_url(url) species_label = r.json().get("label", "unlabeled") orthologized_from = {} for idx, annotation in enumerate(annotations): if annotation["type"] == "Species": orthologized_from = {"id": annotation["id"], "label": annotation["label"]} annotations[idx] = {"type": "Species", "id": orthologize_target, "label": species_label} if "id" in orthologized_from: annotations.append( { "type": "OrigSpecies", "id": f'Orig-{orthologized_from["id"]}', "label": f'Orig-{orthologized_from["label"]}', } ) return annotations
[ "def", "orthologize_context", "(", "orthologize_target", ":", "str", ",", "annotations", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "url", "=", "f'{config[\"bel_api\"][\"servers\"][\"api_url\"]}/terms/{orthologize_target}'", "r", "=", "utils", ".", "get_url", "(", "url", ")", "species_label", "=", "r", ".", "json", "(", ")", ".", "get", "(", "\"label\"", ",", "\"unlabeled\"", ")", "orthologized_from", "=", "{", "}", "for", "idx", ",", "annotation", "in", "enumerate", "(", "annotations", ")", ":", "if", "annotation", "[", "\"type\"", "]", "==", "\"Species\"", ":", "orthologized_from", "=", "{", "\"id\"", ":", "annotation", "[", "\"id\"", "]", ",", "\"label\"", ":", "annotation", "[", "\"label\"", "]", "}", "annotations", "[", "idx", "]", "=", "{", "\"type\"", ":", "\"Species\"", ",", "\"id\"", ":", "orthologize_target", ",", "\"label\"", ":", "species_label", "}", "if", "\"id\"", "in", "orthologized_from", ":", "annotations", ".", "append", "(", "{", "\"type\"", ":", "\"OrigSpecies\"", ",", "\"id\"", ":", "f'Orig-{orthologized_from[\"id\"]}'", ",", "\"label\"", ":", "f'Orig-{orthologized_from[\"label\"]}'", ",", "}", ")", "return", "annotations" ]
Orthologize context Replace Species context with new orthologize target and add a annotation type of OrthologizedFrom
[ "Orthologize", "context" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/edges.py#L429-L456
belbio/bel
bel/lang/migrate_1_2.py
migrate
def migrate(belstr: str) -> str: """Migrate BEL 1 to 2.0.0 Args: bel: BEL 1 Returns: bel: BEL 2 """ bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0") return migrate_ast(bo.ast).to_string()
python
def migrate(belstr: str) -> str: """Migrate BEL 1 to 2.0.0 Args: bel: BEL 1 Returns: bel: BEL 2 """ bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0") return migrate_ast(bo.ast).to_string()
[ "def", "migrate", "(", "belstr", ":", "str", ")", "->", "str", ":", "bo", ".", "ast", "=", "bel", ".", "lang", ".", "partialparse", ".", "get_ast_obj", "(", "belstr", ",", "\"2.0.0\"", ")", "return", "migrate_ast", "(", "bo", ".", "ast", ")", ".", "to_string", "(", ")" ]
Migrate BEL 1 to 2.0.0 Args: bel: BEL 1 Returns: bel: BEL 2
[ "Migrate", "BEL", "1", "to", "2", ".", "0", ".", "0" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L26-L38
belbio/bel
bel/lang/migrate_1_2.py
migrate_into_triple
def migrate_into_triple(belstr: str) -> str: """Migrate BEL1 assertion into BEL 2.0.0 SRO triple""" bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0") return migrate_ast(bo.ast).to_triple()
python
def migrate_into_triple(belstr: str) -> str: """Migrate BEL1 assertion into BEL 2.0.0 SRO triple""" bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0") return migrate_ast(bo.ast).to_triple()
[ "def", "migrate_into_triple", "(", "belstr", ":", "str", ")", "->", "str", ":", "bo", ".", "ast", "=", "bel", ".", "lang", ".", "partialparse", ".", "get_ast_obj", "(", "belstr", ",", "\"2.0.0\"", ")", "return", "migrate_ast", "(", "bo", ".", "ast", ")", ".", "to_triple", "(", ")" ]
Migrate BEL1 assertion into BEL 2.0.0 SRO triple
[ "Migrate", "BEL1", "assertion", "into", "BEL", "2", ".", "0", ".", "0", "SRO", "triple" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L41-L46
belbio/bel
bel/lang/migrate_1_2.py
convert
def convert(ast): """Convert BEL1 AST Function to BEL2 AST Function""" if ast and ast.type == "Function": # Activity function conversion if ( ast.name != "molecularActivity" and ast.name in spec["namespaces"]["Activity"]["list"] ): print("name", ast.name, "type", ast.type) ast = convert_activity(ast) return ast # Otherwise - this will trigger on the BEL2 molecularActivity # translocation conversion elif ast.name in ["tloc", "translocation"]: ast = convert_tloc(ast) fus_flag = False for idx, arg in enumerate(ast.args): if arg.__class__.__name__ == "Function": # Fix substitution -> variation() if arg.name in ["sub", "substitution"]: ast.args[idx] = convert_sub(arg) elif arg.name in ["trunc", "truncation"]: ast.args[idx] = convert_trunc(arg) elif arg.name in ["pmod", "proteinModification"]: ast.args[idx] = convert_pmod(arg) elif arg.name in ["fus", "fusion"]: fus_flag = True # Recursively process Functions ast.args[idx] = convert(ast.args[idx]) if fus_flag: ast = convert_fus(ast) return ast
python
def convert(ast): """Convert BEL1 AST Function to BEL2 AST Function""" if ast and ast.type == "Function": # Activity function conversion if ( ast.name != "molecularActivity" and ast.name in spec["namespaces"]["Activity"]["list"] ): print("name", ast.name, "type", ast.type) ast = convert_activity(ast) return ast # Otherwise - this will trigger on the BEL2 molecularActivity # translocation conversion elif ast.name in ["tloc", "translocation"]: ast = convert_tloc(ast) fus_flag = False for idx, arg in enumerate(ast.args): if arg.__class__.__name__ == "Function": # Fix substitution -> variation() if arg.name in ["sub", "substitution"]: ast.args[idx] = convert_sub(arg) elif arg.name in ["trunc", "truncation"]: ast.args[idx] = convert_trunc(arg) elif arg.name in ["pmod", "proteinModification"]: ast.args[idx] = convert_pmod(arg) elif arg.name in ["fus", "fusion"]: fus_flag = True # Recursively process Functions ast.args[idx] = convert(ast.args[idx]) if fus_flag: ast = convert_fus(ast) return ast
[ "def", "convert", "(", "ast", ")", ":", "if", "ast", "and", "ast", ".", "type", "==", "\"Function\"", ":", "# Activity function conversion", "if", "(", "ast", ".", "name", "!=", "\"molecularActivity\"", "and", "ast", ".", "name", "in", "spec", "[", "\"namespaces\"", "]", "[", "\"Activity\"", "]", "[", "\"list\"", "]", ")", ":", "print", "(", "\"name\"", ",", "ast", ".", "name", ",", "\"type\"", ",", "ast", ".", "type", ")", "ast", "=", "convert_activity", "(", "ast", ")", "return", "ast", "# Otherwise - this will trigger on the BEL2 molecularActivity", "# translocation conversion", "elif", "ast", ".", "name", "in", "[", "\"tloc\"", ",", "\"translocation\"", "]", ":", "ast", "=", "convert_tloc", "(", "ast", ")", "fus_flag", "=", "False", "for", "idx", ",", "arg", "in", "enumerate", "(", "ast", ".", "args", ")", ":", "if", "arg", ".", "__class__", ".", "__name__", "==", "\"Function\"", ":", "# Fix substitution -> variation()", "if", "arg", ".", "name", "in", "[", "\"sub\"", ",", "\"substitution\"", "]", ":", "ast", ".", "args", "[", "idx", "]", "=", "convert_sub", "(", "arg", ")", "elif", "arg", ".", "name", "in", "[", "\"trunc\"", ",", "\"truncation\"", "]", ":", "ast", ".", "args", "[", "idx", "]", "=", "convert_trunc", "(", "arg", ")", "elif", "arg", ".", "name", "in", "[", "\"pmod\"", ",", "\"proteinModification\"", "]", ":", "ast", ".", "args", "[", "idx", "]", "=", "convert_pmod", "(", "arg", ")", "elif", "arg", ".", "name", "in", "[", "\"fus\"", ",", "\"fusion\"", "]", ":", "fus_flag", "=", "True", "# Recursively process Functions", "ast", ".", "args", "[", "idx", "]", "=", "convert", "(", "ast", ".", "args", "[", "idx", "]", ")", "if", "fus_flag", ":", "ast", "=", "convert_fus", "(", "ast", ")", "return", "ast" ]
Convert BEL1 AST Function to BEL2 AST Function
[ "Convert", "BEL1", "AST", "Function", "to", "BEL2", "AST", "Function" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L65-L105
belbio/bel
bel/lang/migrate_1_2.py
convert_tloc
def convert_tloc(ast): """Convert BEL1 tloc() to BEL2""" from_loc_arg = ast.args[1] to_loc_arg = ast.args[2] from_loc = Function("fromLoc", spec, parent_function=ast) from_loc.add_argument( NSArg(from_loc_arg.namespace, from_loc_arg.value, parent_function=from_loc) ) to_loc = Function("toLoc", spec, parent_function=ast) to_loc.add_argument( NSArg(to_loc_arg.namespace, to_loc_arg.value, parent_function=to_loc) ) ast.args[1] = from_loc ast.args[2] = to_loc return ast
python
def convert_tloc(ast): """Convert BEL1 tloc() to BEL2""" from_loc_arg = ast.args[1] to_loc_arg = ast.args[2] from_loc = Function("fromLoc", spec, parent_function=ast) from_loc.add_argument( NSArg(from_loc_arg.namespace, from_loc_arg.value, parent_function=from_loc) ) to_loc = Function("toLoc", spec, parent_function=ast) to_loc.add_argument( NSArg(to_loc_arg.namespace, to_loc_arg.value, parent_function=to_loc) ) ast.args[1] = from_loc ast.args[2] = to_loc return ast
[ "def", "convert_tloc", "(", "ast", ")", ":", "from_loc_arg", "=", "ast", ".", "args", "[", "1", "]", "to_loc_arg", "=", "ast", ".", "args", "[", "2", "]", "from_loc", "=", "Function", "(", "\"fromLoc\"", ",", "spec", ",", "parent_function", "=", "ast", ")", "from_loc", ".", "add_argument", "(", "NSArg", "(", "from_loc_arg", ".", "namespace", ",", "from_loc_arg", ".", "value", ",", "parent_function", "=", "from_loc", ")", ")", "to_loc", "=", "Function", "(", "\"toLoc\"", ",", "spec", ",", "parent_function", "=", "ast", ")", "to_loc", ".", "add_argument", "(", "NSArg", "(", "to_loc_arg", ".", "namespace", ",", "to_loc_arg", ".", "value", ",", "parent_function", "=", "to_loc", ")", ")", "ast", ".", "args", "[", "1", "]", "=", "from_loc", "ast", ".", "args", "[", "2", "]", "=", "to_loc", "return", "ast" ]
Convert BEL1 tloc() to BEL2
[ "Convert", "BEL1", "tloc", "()", "to", "BEL2" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L108-L125
belbio/bel
bel/lang/migrate_1_2.py
convert_activity
def convert_activity(ast): """Convert BEL1 activities to BEL2 act()""" if len(ast.args) > 1: log.error(f"Activity should not have more than 1 argument {ast.to_string()}") p_arg = ast.args[0] # protein argument print("p_arg", p_arg) ma_arg = Function("ma", bo.spec) ma_arg.add_argument(StrArg(ast.name, ma_arg)) p_arg.change_parent_fn(ma_arg) ast = Function("activity", bo.spec) p_arg.change_parent_fn(ast) ast.add_argument(p_arg) ast.add_argument(ma_arg) return ast
python
def convert_activity(ast): """Convert BEL1 activities to BEL2 act()""" if len(ast.args) > 1: log.error(f"Activity should not have more than 1 argument {ast.to_string()}") p_arg = ast.args[0] # protein argument print("p_arg", p_arg) ma_arg = Function("ma", bo.spec) ma_arg.add_argument(StrArg(ast.name, ma_arg)) p_arg.change_parent_fn(ma_arg) ast = Function("activity", bo.spec) p_arg.change_parent_fn(ast) ast.add_argument(p_arg) ast.add_argument(ma_arg) return ast
[ "def", "convert_activity", "(", "ast", ")", ":", "if", "len", "(", "ast", ".", "args", ")", ">", "1", ":", "log", ".", "error", "(", "f\"Activity should not have more than 1 argument {ast.to_string()}\"", ")", "p_arg", "=", "ast", ".", "args", "[", "0", "]", "# protein argument", "print", "(", "\"p_arg\"", ",", "p_arg", ")", "ma_arg", "=", "Function", "(", "\"ma\"", ",", "bo", ".", "spec", ")", "ma_arg", ".", "add_argument", "(", "StrArg", "(", "ast", ".", "name", ",", "ma_arg", ")", ")", "p_arg", ".", "change_parent_fn", "(", "ma_arg", ")", "ast", "=", "Function", "(", "\"activity\"", ",", "bo", ".", "spec", ")", "p_arg", ".", "change_parent_fn", "(", "ast", ")", "ast", ".", "add_argument", "(", "p_arg", ")", "ast", ".", "add_argument", "(", "ma_arg", ")", "return", "ast" ]
Convert BEL1 activities to BEL2 act()
[ "Convert", "BEL1", "activities", "to", "BEL2", "act", "()" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L128-L144
belbio/bel
bel/lang/migrate_1_2.py
convert_pmod
def convert_pmod(pmod): """Update BEL1 pmod() protein modification term""" if pmod.args[0].value in spec["bel1_migration"]["protein_modifications"]: pmod.args[0].value = spec["bel1_migration"]["protein_modifications"][ pmod.args[0].value ] return pmod
python
def convert_pmod(pmod): """Update BEL1 pmod() protein modification term""" if pmod.args[0].value in spec["bel1_migration"]["protein_modifications"]: pmod.args[0].value = spec["bel1_migration"]["protein_modifications"][ pmod.args[0].value ] return pmod
[ "def", "convert_pmod", "(", "pmod", ")", ":", "if", "pmod", ".", "args", "[", "0", "]", ".", "value", "in", "spec", "[", "\"bel1_migration\"", "]", "[", "\"protein_modifications\"", "]", ":", "pmod", ".", "args", "[", "0", "]", ".", "value", "=", "spec", "[", "\"bel1_migration\"", "]", "[", "\"protein_modifications\"", "]", "[", "pmod", ".", "args", "[", "0", "]", ".", "value", "]", "return", "pmod" ]
Update BEL1 pmod() protein modification term
[ "Update", "BEL1", "pmod", "()", "protein", "modification", "term" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L147-L155
belbio/bel
bel/lang/migrate_1_2.py
convert_fus
def convert_fus(ast): """Convert BEL1 fus() to BEL2 fus()""" parent_fn_name = ast.name_short prefix_list = {"p": "p.", "r": "r.", "g": "c."} prefix = prefix_list[parent_fn_name] fus1_ns = ast.args[0].namespace fus1_val = ast.args[0].value arg_fus = ast.args[1] fus_args = [None, "?", "?"] for idx, arg in enumerate(arg_fus.args): fus_args[idx] = arg fus2_ns = fus_args[0].namespace fus2_val = fus_args[0].value if fus_args[1] == "?": fus1_range = fus_args[1] else: fus1_range = f'"{prefix}1_{fus_args[1].value}"' if fus_args[2] == "?": fus2_range = fus_args[2] else: fus2_range = f'"{prefix}{fus_args[2].value}_?"' fus = Function("fus", spec, parent_function=ast) fus.args = [ NSArg(fus1_ns, fus1_val, fus), StrArg(fus1_range, fus), NSArg(fus2_ns, fus2_val, fus), StrArg(fus2_range, fus), ] # Remove BEL ast_args = ast.args ast_args.pop(0) ast_args.pop(0) if ast_args == [None]: ast_args = [] ast.args = [] ast.add_argument(fus) if len(ast_args) > 0: ast.args.extend(ast_args) return ast
python
def convert_fus(ast): """Convert BEL1 fus() to BEL2 fus()""" parent_fn_name = ast.name_short prefix_list = {"p": "p.", "r": "r.", "g": "c."} prefix = prefix_list[parent_fn_name] fus1_ns = ast.args[0].namespace fus1_val = ast.args[0].value arg_fus = ast.args[1] fus_args = [None, "?", "?"] for idx, arg in enumerate(arg_fus.args): fus_args[idx] = arg fus2_ns = fus_args[0].namespace fus2_val = fus_args[0].value if fus_args[1] == "?": fus1_range = fus_args[1] else: fus1_range = f'"{prefix}1_{fus_args[1].value}"' if fus_args[2] == "?": fus2_range = fus_args[2] else: fus2_range = f'"{prefix}{fus_args[2].value}_?"' fus = Function("fus", spec, parent_function=ast) fus.args = [ NSArg(fus1_ns, fus1_val, fus), StrArg(fus1_range, fus), NSArg(fus2_ns, fus2_val, fus), StrArg(fus2_range, fus), ] # Remove BEL ast_args = ast.args ast_args.pop(0) ast_args.pop(0) if ast_args == [None]: ast_args = [] ast.args = [] ast.add_argument(fus) if len(ast_args) > 0: ast.args.extend(ast_args) return ast
[ "def", "convert_fus", "(", "ast", ")", ":", "parent_fn_name", "=", "ast", ".", "name_short", "prefix_list", "=", "{", "\"p\"", ":", "\"p.\"", ",", "\"r\"", ":", "\"r.\"", ",", "\"g\"", ":", "\"c.\"", "}", "prefix", "=", "prefix_list", "[", "parent_fn_name", "]", "fus1_ns", "=", "ast", ".", "args", "[", "0", "]", ".", "namespace", "fus1_val", "=", "ast", ".", "args", "[", "0", "]", ".", "value", "arg_fus", "=", "ast", ".", "args", "[", "1", "]", "fus_args", "=", "[", "None", ",", "\"?\"", ",", "\"?\"", "]", "for", "idx", ",", "arg", "in", "enumerate", "(", "arg_fus", ".", "args", ")", ":", "fus_args", "[", "idx", "]", "=", "arg", "fus2_ns", "=", "fus_args", "[", "0", "]", ".", "namespace", "fus2_val", "=", "fus_args", "[", "0", "]", ".", "value", "if", "fus_args", "[", "1", "]", "==", "\"?\"", ":", "fus1_range", "=", "fus_args", "[", "1", "]", "else", ":", "fus1_range", "=", "f'\"{prefix}1_{fus_args[1].value}\"'", "if", "fus_args", "[", "2", "]", "==", "\"?\"", ":", "fus2_range", "=", "fus_args", "[", "2", "]", "else", ":", "fus2_range", "=", "f'\"{prefix}{fus_args[2].value}_?\"'", "fus", "=", "Function", "(", "\"fus\"", ",", "spec", ",", "parent_function", "=", "ast", ")", "fus", ".", "args", "=", "[", "NSArg", "(", "fus1_ns", ",", "fus1_val", ",", "fus", ")", ",", "StrArg", "(", "fus1_range", ",", "fus", ")", ",", "NSArg", "(", "fus2_ns", ",", "fus2_val", ",", "fus", ")", ",", "StrArg", "(", "fus2_range", ",", "fus", ")", ",", "]", "# Remove BEL", "ast_args", "=", "ast", ".", "args", "ast_args", ".", "pop", "(", "0", ")", "ast_args", ".", "pop", "(", "0", ")", "if", "ast_args", "==", "[", "None", "]", ":", "ast_args", "=", "[", "]", "ast", ".", "args", "=", "[", "]", "ast", ".", "add_argument", "(", "fus", ")", "if", "len", "(", "ast_args", ")", ">", "0", ":", "ast", ".", "args", ".", "extend", "(", "ast_args", ")", "return", "ast" ]
Convert BEL1 fus() to BEL2 fus()
[ "Convert", "BEL1", "fus", "()", "to", "BEL2", "fus", "()" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L158-L208
belbio/bel
bel/lang/migrate_1_2.py
convert_sub
def convert_sub(sub): """Convert BEL1 sub() to BEL2 var()""" args = sub.args (ref_aa, pos, new_aa) = args parent_fn_name = sub.parent_function.name_short prefix_list = {"p": "p.", "r": "r.", "g": "c."} prefix = prefix_list[parent_fn_name] new_var_arg = f'"{prefix}{spec["namespaces"]["AminoAcid"]["to_short"][ref_aa.value]}{pos.value}{spec["namespaces"]["AminoAcid"]["to_short"][new_aa.value]}"' new_var = bel.lang.ast.Function("var", bo.spec) new_var.add_argument(StrArg(new_var_arg, new_var)) return new_var
python
def convert_sub(sub): """Convert BEL1 sub() to BEL2 var()""" args = sub.args (ref_aa, pos, new_aa) = args parent_fn_name = sub.parent_function.name_short prefix_list = {"p": "p.", "r": "r.", "g": "c."} prefix = prefix_list[parent_fn_name] new_var_arg = f'"{prefix}{spec["namespaces"]["AminoAcid"]["to_short"][ref_aa.value]}{pos.value}{spec["namespaces"]["AminoAcid"]["to_short"][new_aa.value]}"' new_var = bel.lang.ast.Function("var", bo.spec) new_var.add_argument(StrArg(new_var_arg, new_var)) return new_var
[ "def", "convert_sub", "(", "sub", ")", ":", "args", "=", "sub", ".", "args", "(", "ref_aa", ",", "pos", ",", "new_aa", ")", "=", "args", "parent_fn_name", "=", "sub", ".", "parent_function", ".", "name_short", "prefix_list", "=", "{", "\"p\"", ":", "\"p.\"", ",", "\"r\"", ":", "\"r.\"", ",", "\"g\"", ":", "\"c.\"", "}", "prefix", "=", "prefix_list", "[", "parent_fn_name", "]", "new_var_arg", "=", "f'\"{prefix}{spec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][ref_aa.value]}{pos.value}{spec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][new_aa.value]}\"'", "new_var", "=", "bel", ".", "lang", ".", "ast", ".", "Function", "(", "\"var\"", ",", "bo", ".", "spec", ")", "new_var", ".", "add_argument", "(", "StrArg", "(", "new_var_arg", ",", "new_var", ")", ")", "return", "new_var" ]
Convert BEL1 sub() to BEL2 var()
[ "Convert", "BEL1", "sub", "()", "to", "BEL2", "var", "()" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L211-L227
belbio/bel
bel/lang/migrate_1_2.py
convert_trunc
def convert_trunc(trunc): """Convert BEL1 trunc() to BEL2 var()""" parent_fn_name = trunc.parent_function.name_short prefix_list = {"p": "p.", "r": "r.", "g": "c."} prefix = prefix_list[parent_fn_name] new_var_arg = f'"truncated at {trunc.args[0].value}"' new_var = bel.lang.ast.Function("var", bo.spec) new_var.add_argument(StrArg(new_var_arg, new_var)) return new_var
python
def convert_trunc(trunc): """Convert BEL1 trunc() to BEL2 var()""" parent_fn_name = trunc.parent_function.name_short prefix_list = {"p": "p.", "r": "r.", "g": "c."} prefix = prefix_list[parent_fn_name] new_var_arg = f'"truncated at {trunc.args[0].value}"' new_var = bel.lang.ast.Function("var", bo.spec) new_var.add_argument(StrArg(new_var_arg, new_var)) return new_var
[ "def", "convert_trunc", "(", "trunc", ")", ":", "parent_fn_name", "=", "trunc", ".", "parent_function", ".", "name_short", "prefix_list", "=", "{", "\"p\"", ":", "\"p.\"", ",", "\"r\"", ":", "\"r.\"", ",", "\"g\"", ":", "\"c.\"", "}", "prefix", "=", "prefix_list", "[", "parent_fn_name", "]", "new_var_arg", "=", "f'\"truncated at {trunc.args[0].value}\"'", "new_var", "=", "bel", ".", "lang", ".", "ast", ".", "Function", "(", "\"var\"", ",", "bo", ".", "spec", ")", "new_var", ".", "add_argument", "(", "StrArg", "(", "new_var_arg", ",", "new_var", ")", ")", "return", "new_var" ]
Convert BEL1 trunc() to BEL2 var()
[ "Convert", "BEL1", "trunc", "()", "to", "BEL2", "var", "()" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/migrate_1_2.py#L230-L243
belbio/bel
bel/db/arangodb.py
get_user_creds
def get_user_creds(username, password): """Get username/password Use provided username and password OR in config OR blank in that order """ username = utils.first_true( [username, config["bel_api"]["servers"]["arangodb_username"]], default="" ) password = utils.first_true( [password, config["secrets"]["bel_api"]["servers"].get("arangodb_password")], default="", ) return username, password
python
def get_user_creds(username, password): """Get username/password Use provided username and password OR in config OR blank in that order """ username = utils.first_true( [username, config["bel_api"]["servers"]["arangodb_username"]], default="" ) password = utils.first_true( [password, config["secrets"]["bel_api"]["servers"].get("arangodb_password")], default="", ) return username, password
[ "def", "get_user_creds", "(", "username", ",", "password", ")", ":", "username", "=", "utils", ".", "first_true", "(", "[", "username", ",", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"arangodb_username\"", "]", "]", ",", "default", "=", "\"\"", ")", "password", "=", "utils", ".", "first_true", "(", "[", "password", ",", "config", "[", "\"secrets\"", "]", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", ".", "get", "(", "\"arangodb_password\"", ")", "]", ",", "default", "=", "\"\"", ",", ")", "return", "username", ",", "password" ]
Get username/password Use provided username and password OR in config OR blank in that order
[ "Get", "username", "/", "password" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L46-L59
belbio/bel
bel/db/arangodb.py
get_client
def get_client(host=None, port=None, username=None, password=None, enable_logging=True): """Get arango client and edgestore db handle""" host = utils.first_true( [host, config["bel_api"]["servers"]["arangodb_host"], "localhost"] ) port = utils.first_true([port, config["bel_api"]["servers"]["arangodb_port"], 8529]) username = utils.first_true( [username, config["bel_api"]["servers"]["arangodb_username"], ""] ) password = utils.first_true( [ password, config.get( "secrets", config["secrets"]["bel_api"]["servers"].get("arangodb_password"), ), "", ] ) client = arango.client.ArangoClient( protocol=config["bel_api"]["servers"]["arangodb_protocol"], host=host, port=port ) return client
python
def get_client(host=None, port=None, username=None, password=None, enable_logging=True): """Get arango client and edgestore db handle""" host = utils.first_true( [host, config["bel_api"]["servers"]["arangodb_host"], "localhost"] ) port = utils.first_true([port, config["bel_api"]["servers"]["arangodb_port"], 8529]) username = utils.first_true( [username, config["bel_api"]["servers"]["arangodb_username"], ""] ) password = utils.first_true( [ password, config.get( "secrets", config["secrets"]["bel_api"]["servers"].get("arangodb_password"), ), "", ] ) client = arango.client.ArangoClient( protocol=config["bel_api"]["servers"]["arangodb_protocol"], host=host, port=port ) return client
[ "def", "get_client", "(", "host", "=", "None", ",", "port", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "enable_logging", "=", "True", ")", ":", "host", "=", "utils", ".", "first_true", "(", "[", "host", ",", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"arangodb_host\"", "]", ",", "\"localhost\"", "]", ")", "port", "=", "utils", ".", "first_true", "(", "[", "port", ",", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"arangodb_port\"", "]", ",", "8529", "]", ")", "username", "=", "utils", ".", "first_true", "(", "[", "username", ",", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"arangodb_username\"", "]", ",", "\"\"", "]", ")", "password", "=", "utils", ".", "first_true", "(", "[", "password", ",", "config", ".", "get", "(", "\"secrets\"", ",", "config", "[", "\"secrets\"", "]", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", ".", "get", "(", "\"arangodb_password\"", ")", ",", ")", ",", "\"\"", ",", "]", ")", "client", "=", "arango", ".", "client", ".", "ArangoClient", "(", "protocol", "=", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"arangodb_protocol\"", "]", ",", "host", "=", "host", ",", "port", "=", "port", ")", "return", "client" ]
Get arango client and edgestore db handle
[ "Get", "arango", "client", "and", "edgestore", "db", "handle" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L62-L87
belbio/bel
bel/db/arangodb.py
get_edgestore_handle
def get_edgestore_handle( client: arango.client.ArangoClient, username=None, password=None, edgestore_db_name: str = edgestore_db_name, edgestore_edges_name: str = edgestore_edges_name, edgestore_nodes_name: str = edgestore_nodes_name, edgestore_pipeline_name: str = edgestore_pipeline_name, edgestore_pipeline_stats_name: str = edgestore_pipeline_stats_name, edgestore_pipeline_errors_name: str = edgestore_pipeline_errors_name, ) -> arango.database.StandardDatabase: """Get Edgestore arangodb database handle Args: client (arango.client.ArangoClient): Description username (None, optional): Description password (None, optional): Description edgestore_db_name (str, optional): Description edgestore_edges_name (str, optional): Description edgestore_nodes_name (str, optional): Description Returns: arango.database.StandardDatabase: Description """ (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "edgestore" try: if username and password: edgestore_db = sys_db.create_database( name=edgestore_db_name, users=[{"username": username, "password": password, "active": True}], ) else: edgestore_db = sys_db.create_database(name=edgestore_db_name) except arango.exceptions.DatabaseCreateError: if username and password: edgestore_db = client.db( edgestore_db_name, username=username, password=password ) else: edgestore_db = client.db(edgestore_db_name) # TODO - add a skiplist index for _from? or _key? to be able to do paging? # has_collection function doesn't seem to be working # if not edgestore_db.has_collection(edgestore_nodes_name): try: nodes = edgestore_db.create_collection( edgestore_nodes_name, index_bucket_count=64 ) nodes.add_hash_index(fields=["name"], unique=False) nodes.add_hash_index( fields=["components"], unique=False ) # add subject/object components as node properties except Exception: pass # if not edgestore_db.has_collection(edgestore_edges_name): try: edges = edgestore_db.create_collection( edgestore_edges_name, edge=True, index_bucket_count=64 ) edges.add_hash_index(fields=["relation"], unique=False) edges.add_hash_index(fields=["edge_types"], unique=False) edges.add_hash_index(fields=["nanopub_id"], unique=False) edges.add_hash_index(fields=["metadata.project"], unique=False) edges.add_hash_index(fields=["annotations[*].id"], unique=False) except Exception: pass # if not edgestore_db.has_collection(edgestore_pipeline_name): try: edgestore_db.create_collection(edgestore_pipeline_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_errors_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_stats_name) except arango.exceptions.CollectionCreateError as e: pass return edgestore_db
python
def get_edgestore_handle( client: arango.client.ArangoClient, username=None, password=None, edgestore_db_name: str = edgestore_db_name, edgestore_edges_name: str = edgestore_edges_name, edgestore_nodes_name: str = edgestore_nodes_name, edgestore_pipeline_name: str = edgestore_pipeline_name, edgestore_pipeline_stats_name: str = edgestore_pipeline_stats_name, edgestore_pipeline_errors_name: str = edgestore_pipeline_errors_name, ) -> arango.database.StandardDatabase: """Get Edgestore arangodb database handle Args: client (arango.client.ArangoClient): Description username (None, optional): Description password (None, optional): Description edgestore_db_name (str, optional): Description edgestore_edges_name (str, optional): Description edgestore_nodes_name (str, optional): Description Returns: arango.database.StandardDatabase: Description """ (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "edgestore" try: if username and password: edgestore_db = sys_db.create_database( name=edgestore_db_name, users=[{"username": username, "password": password, "active": True}], ) else: edgestore_db = sys_db.create_database(name=edgestore_db_name) except arango.exceptions.DatabaseCreateError: if username and password: edgestore_db = client.db( edgestore_db_name, username=username, password=password ) else: edgestore_db = client.db(edgestore_db_name) # TODO - add a skiplist index for _from? or _key? to be able to do paging? # has_collection function doesn't seem to be working # if not edgestore_db.has_collection(edgestore_nodes_name): try: nodes = edgestore_db.create_collection( edgestore_nodes_name, index_bucket_count=64 ) nodes.add_hash_index(fields=["name"], unique=False) nodes.add_hash_index( fields=["components"], unique=False ) # add subject/object components as node properties except Exception: pass # if not edgestore_db.has_collection(edgestore_edges_name): try: edges = edgestore_db.create_collection( edgestore_edges_name, edge=True, index_bucket_count=64 ) edges.add_hash_index(fields=["relation"], unique=False) edges.add_hash_index(fields=["edge_types"], unique=False) edges.add_hash_index(fields=["nanopub_id"], unique=False) edges.add_hash_index(fields=["metadata.project"], unique=False) edges.add_hash_index(fields=["annotations[*].id"], unique=False) except Exception: pass # if not edgestore_db.has_collection(edgestore_pipeline_name): try: edgestore_db.create_collection(edgestore_pipeline_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_errors_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_stats_name) except arango.exceptions.CollectionCreateError as e: pass return edgestore_db
[ "def", "get_edgestore_handle", "(", "client", ":", "arango", ".", "client", ".", "ArangoClient", ",", "username", "=", "None", ",", "password", "=", "None", ",", "edgestore_db_name", ":", "str", "=", "edgestore_db_name", ",", "edgestore_edges_name", ":", "str", "=", "edgestore_edges_name", ",", "edgestore_nodes_name", ":", "str", "=", "edgestore_nodes_name", ",", "edgestore_pipeline_name", ":", "str", "=", "edgestore_pipeline_name", ",", "edgestore_pipeline_stats_name", ":", "str", "=", "edgestore_pipeline_stats_name", ",", "edgestore_pipeline_errors_name", ":", "str", "=", "edgestore_pipeline_errors_name", ",", ")", "->", "arango", ".", "database", ".", "StandardDatabase", ":", "(", "username", ",", "password", ")", "=", "get_user_creds", "(", "username", ",", "password", ")", "sys_db", "=", "client", ".", "db", "(", "\"_system\"", ",", "username", "=", "username", ",", "password", "=", "password", ")", "# Create a new database named \"edgestore\"", "try", ":", "if", "username", "and", "password", ":", "edgestore_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "edgestore_db_name", ",", "users", "=", "[", "{", "\"username\"", ":", "username", ",", "\"password\"", ":", "password", ",", "\"active\"", ":", "True", "}", "]", ",", ")", "else", ":", "edgestore_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "edgestore_db_name", ")", "except", "arango", ".", "exceptions", ".", "DatabaseCreateError", ":", "if", "username", "and", "password", ":", "edgestore_db", "=", "client", ".", "db", "(", "edgestore_db_name", ",", "username", "=", "username", ",", "password", "=", "password", ")", "else", ":", "edgestore_db", "=", "client", ".", "db", "(", "edgestore_db_name", ")", "# TODO - add a skiplist index for _from? or _key? to be able to do paging?", "# has_collection function doesn't seem to be working", "# if not edgestore_db.has_collection(edgestore_nodes_name):", "try", ":", "nodes", "=", "edgestore_db", ".", "create_collection", "(", "edgestore_nodes_name", ",", "index_bucket_count", "=", "64", ")", "nodes", ".", "add_hash_index", "(", "fields", "=", "[", "\"name\"", "]", ",", "unique", "=", "False", ")", "nodes", ".", "add_hash_index", "(", "fields", "=", "[", "\"components\"", "]", ",", "unique", "=", "False", ")", "# add subject/object components as node properties", "except", "Exception", ":", "pass", "# if not edgestore_db.has_collection(edgestore_edges_name):", "try", ":", "edges", "=", "edgestore_db", ".", "create_collection", "(", "edgestore_edges_name", ",", "edge", "=", "True", ",", "index_bucket_count", "=", "64", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"relation\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"edge_types\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"nanopub_id\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"metadata.project\"", "]", ",", "unique", "=", "False", ")", "edges", ".", "add_hash_index", "(", "fields", "=", "[", "\"annotations[*].id\"", "]", ",", "unique", "=", "False", ")", "except", "Exception", ":", "pass", "# if not edgestore_db.has_collection(edgestore_pipeline_name):", "try", ":", "edgestore_db", ".", "create_collection", "(", "edgestore_pipeline_name", ")", "except", "Exception", ":", "pass", "try", ":", "edgestore_db", ".", "create_collection", "(", "edgestore_pipeline_errors_name", ")", "except", "Exception", ":", "pass", "try", ":", "edgestore_db", ".", "create_collection", "(", "edgestore_pipeline_stats_name", ")", "except", "arango", ".", "exceptions", ".", "CollectionCreateError", "as", "e", ":", "pass", "return", "edgestore_db" ]
Get Edgestore arangodb database handle Args: client (arango.client.ArangoClient): Description username (None, optional): Description password (None, optional): Description edgestore_db_name (str, optional): Description edgestore_edges_name (str, optional): Description edgestore_nodes_name (str, optional): Description Returns: arango.database.StandardDatabase: Description
[ "Get", "Edgestore", "arangodb", "database", "handle" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L97-L186
belbio/bel
bel/db/arangodb.py
get_belns_handle
def get_belns_handle(client, username=None, password=None): """Get BEL namespace arango db handle""" (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "belns" try: if username and password: belns_db = sys_db.create_database( name=belns_db_name, users=[{"username": username, "password": password, "active": True}], ) else: belns_db = sys_db.create_database(name=belns_db_name) except arango.exceptions.DatabaseCreateError: if username and password: belns_db = client.db(belns_db_name, username=username, password=password) else: belns_db = client.db(belns_db_name) try: belns_db.create_collection(belns_metadata_name) except Exception: pass try: equiv_nodes = belns_db.create_collection( equiv_nodes_name, index_bucket_count=64 ) equiv_nodes.add_hash_index(fields=["name"], unique=True) except Exception: pass try: belns_db.create_collection(equiv_edges_name, edge=True, index_bucket_count=64) except Exception: pass try: ortholog_nodes = belns_db.create_collection( ortholog_nodes_name, index_bucket_count=64 ) ortholog_nodes.add_hash_index(fields=["name"], unique=True) except Exception: pass try: belns_db.create_collection( ortholog_edges_name, edge=True, index_bucket_count=64 ) except Exception: pass return belns_db
python
def get_belns_handle(client, username=None, password=None): """Get BEL namespace arango db handle""" (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "belns" try: if username and password: belns_db = sys_db.create_database( name=belns_db_name, users=[{"username": username, "password": password, "active": True}], ) else: belns_db = sys_db.create_database(name=belns_db_name) except arango.exceptions.DatabaseCreateError: if username and password: belns_db = client.db(belns_db_name, username=username, password=password) else: belns_db = client.db(belns_db_name) try: belns_db.create_collection(belns_metadata_name) except Exception: pass try: equiv_nodes = belns_db.create_collection( equiv_nodes_name, index_bucket_count=64 ) equiv_nodes.add_hash_index(fields=["name"], unique=True) except Exception: pass try: belns_db.create_collection(equiv_edges_name, edge=True, index_bucket_count=64) except Exception: pass try: ortholog_nodes = belns_db.create_collection( ortholog_nodes_name, index_bucket_count=64 ) ortholog_nodes.add_hash_index(fields=["name"], unique=True) except Exception: pass try: belns_db.create_collection( ortholog_edges_name, edge=True, index_bucket_count=64 ) except Exception: pass return belns_db
[ "def", "get_belns_handle", "(", "client", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "(", "username", ",", "password", ")", "=", "get_user_creds", "(", "username", ",", "password", ")", "sys_db", "=", "client", ".", "db", "(", "\"_system\"", ",", "username", "=", "username", ",", "password", "=", "password", ")", "# Create a new database named \"belns\"", "try", ":", "if", "username", "and", "password", ":", "belns_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "belns_db_name", ",", "users", "=", "[", "{", "\"username\"", ":", "username", ",", "\"password\"", ":", "password", ",", "\"active\"", ":", "True", "}", "]", ",", ")", "else", ":", "belns_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "belns_db_name", ")", "except", "arango", ".", "exceptions", ".", "DatabaseCreateError", ":", "if", "username", "and", "password", ":", "belns_db", "=", "client", ".", "db", "(", "belns_db_name", ",", "username", "=", "username", ",", "password", "=", "password", ")", "else", ":", "belns_db", "=", "client", ".", "db", "(", "belns_db_name", ")", "try", ":", "belns_db", ".", "create_collection", "(", "belns_metadata_name", ")", "except", "Exception", ":", "pass", "try", ":", "equiv_nodes", "=", "belns_db", ".", "create_collection", "(", "equiv_nodes_name", ",", "index_bucket_count", "=", "64", ")", "equiv_nodes", ".", "add_hash_index", "(", "fields", "=", "[", "\"name\"", "]", ",", "unique", "=", "True", ")", "except", "Exception", ":", "pass", "try", ":", "belns_db", ".", "create_collection", "(", "equiv_edges_name", ",", "edge", "=", "True", ",", "index_bucket_count", "=", "64", ")", "except", "Exception", ":", "pass", "try", ":", "ortholog_nodes", "=", "belns_db", ".", "create_collection", "(", "ortholog_nodes_name", ",", "index_bucket_count", "=", "64", ")", "ortholog_nodes", ".", "add_hash_index", "(", "fields", "=", "[", "\"name\"", "]", ",", "unique", "=", "True", ")", "except", "Exception", ":", "pass", "try", ":", "belns_db", ".", "create_collection", "(", "ortholog_edges_name", ",", "edge", "=", "True", ",", "index_bucket_count", "=", "64", ")", "except", "Exception", ":", "pass", "return", "belns_db" ]
Get BEL namespace arango db handle
[ "Get", "BEL", "namespace", "arango", "db", "handle" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L189-L244
belbio/bel
bel/db/arangodb.py
get_belapi_handle
def get_belapi_handle(client, username=None, password=None): """Get BEL API arango db handle""" (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "belapi" try: if username and password: belapi_db = sys_db.create_database( name=belapi_db_name, users=[{"username": username, "password": password, "active": True}], ) else: belapi_db = sys_db.create_database(name=belapi_db_name) except arango.exceptions.DatabaseCreateError: if username and password: belapi_db = client.db(belapi_db_name, username=username, password=password) else: belapi_db = client.db(belapi_db_name) try: belapi_db.create_collection(belapi_settings_name) except Exception: pass try: belapi_db.create_collection(belapi_statemgmt_name) except Exception: pass return belapi_db
python
def get_belapi_handle(client, username=None, password=None): """Get BEL API arango db handle""" (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "belapi" try: if username and password: belapi_db = sys_db.create_database( name=belapi_db_name, users=[{"username": username, "password": password, "active": True}], ) else: belapi_db = sys_db.create_database(name=belapi_db_name) except arango.exceptions.DatabaseCreateError: if username and password: belapi_db = client.db(belapi_db_name, username=username, password=password) else: belapi_db = client.db(belapi_db_name) try: belapi_db.create_collection(belapi_settings_name) except Exception: pass try: belapi_db.create_collection(belapi_statemgmt_name) except Exception: pass return belapi_db
[ "def", "get_belapi_handle", "(", "client", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "(", "username", ",", "password", ")", "=", "get_user_creds", "(", "username", ",", "password", ")", "sys_db", "=", "client", ".", "db", "(", "\"_system\"", ",", "username", "=", "username", ",", "password", "=", "password", ")", "# Create a new database named \"belapi\"", "try", ":", "if", "username", "and", "password", ":", "belapi_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "belapi_db_name", ",", "users", "=", "[", "{", "\"username\"", ":", "username", ",", "\"password\"", ":", "password", ",", "\"active\"", ":", "True", "}", "]", ",", ")", "else", ":", "belapi_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "belapi_db_name", ")", "except", "arango", ".", "exceptions", ".", "DatabaseCreateError", ":", "if", "username", "and", "password", ":", "belapi_db", "=", "client", ".", "db", "(", "belapi_db_name", ",", "username", "=", "username", ",", "password", "=", "password", ")", "else", ":", "belapi_db", "=", "client", ".", "db", "(", "belapi_db_name", ")", "try", ":", "belapi_db", ".", "create_collection", "(", "belapi_settings_name", ")", "except", "Exception", ":", "pass", "try", ":", "belapi_db", ".", "create_collection", "(", "belapi_statemgmt_name", ")", "except", "Exception", ":", "pass", "return", "belapi_db" ]
Get BEL API arango db handle
[ "Get", "BEL", "API", "arango", "db", "handle" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L247-L279
belbio/bel
bel/db/arangodb.py
delete_database
def delete_database(client, db_name, username=None, password=None): """Delete Arangodb database """ (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) try: return sys_db.delete_database(db_name) except Exception: log.warn("No arango database {db_name} to delete, does not exist")
python
def delete_database(client, db_name, username=None, password=None): """Delete Arangodb database """ (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) try: return sys_db.delete_database(db_name) except Exception: log.warn("No arango database {db_name} to delete, does not exist")
[ "def", "delete_database", "(", "client", ",", "db_name", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "(", "username", ",", "password", ")", "=", "get_user_creds", "(", "username", ",", "password", ")", "sys_db", "=", "client", ".", "db", "(", "\"_system\"", ",", "username", "=", "username", ",", "password", "=", "password", ")", "try", ":", "return", "sys_db", ".", "delete_database", "(", "db_name", ")", "except", "Exception", ":", "log", ".", "warn", "(", "\"No arango database {db_name} to delete, does not exist\"", ")" ]
Delete Arangodb database
[ "Delete", "Arangodb", "database" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L282-L294
belbio/bel
bel/db/arangodb.py
batch_load_docs
def batch_load_docs(db, doc_iterator, on_duplicate="replace"): """Batch load documents Args: db: ArangoDB client database handle doc_iterator: function that yields (collection_name, doc_key, doc) on_duplicate: defaults to replace, but can be error, update, replace or ignore https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk """ batch_size = 100 counter = 0 collections = {} docs = {} if on_duplicate not in ["error", "update", "replace", "ignore"]: log.error(f"Bad parameter for on_duplicate: {on_duplicate}") return for (collection_name, doc) in doc_iterator: if collection_name not in collections: collections[collection_name] = db.collection(collection_name) docs[collection_name] = [] counter += 1 docs[collection_name].append(doc) if counter % batch_size == 0: log.info(f"Bulk import arangodb: {counter}") for cname in docs: collections[cname].import_bulk( docs[cname], on_duplicate=on_duplicate, halt_on_error=False ) docs[cname] = [] log.info(f"Bulk import arangodb: {counter}") for cname in docs: collections[cname].import_bulk( docs[cname], on_duplicate=on_duplicate, halt_on_error=False ) docs[cname] = []
python
def batch_load_docs(db, doc_iterator, on_duplicate="replace"): """Batch load documents Args: db: ArangoDB client database handle doc_iterator: function that yields (collection_name, doc_key, doc) on_duplicate: defaults to replace, but can be error, update, replace or ignore https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk """ batch_size = 100 counter = 0 collections = {} docs = {} if on_duplicate not in ["error", "update", "replace", "ignore"]: log.error(f"Bad parameter for on_duplicate: {on_duplicate}") return for (collection_name, doc) in doc_iterator: if collection_name not in collections: collections[collection_name] = db.collection(collection_name) docs[collection_name] = [] counter += 1 docs[collection_name].append(doc) if counter % batch_size == 0: log.info(f"Bulk import arangodb: {counter}") for cname in docs: collections[cname].import_bulk( docs[cname], on_duplicate=on_duplicate, halt_on_error=False ) docs[cname] = [] log.info(f"Bulk import arangodb: {counter}") for cname in docs: collections[cname].import_bulk( docs[cname], on_duplicate=on_duplicate, halt_on_error=False ) docs[cname] = []
[ "def", "batch_load_docs", "(", "db", ",", "doc_iterator", ",", "on_duplicate", "=", "\"replace\"", ")", ":", "batch_size", "=", "100", "counter", "=", "0", "collections", "=", "{", "}", "docs", "=", "{", "}", "if", "on_duplicate", "not", "in", "[", "\"error\"", ",", "\"update\"", ",", "\"replace\"", ",", "\"ignore\"", "]", ":", "log", ".", "error", "(", "f\"Bad parameter for on_duplicate: {on_duplicate}\"", ")", "return", "for", "(", "collection_name", ",", "doc", ")", "in", "doc_iterator", ":", "if", "collection_name", "not", "in", "collections", ":", "collections", "[", "collection_name", "]", "=", "db", ".", "collection", "(", "collection_name", ")", "docs", "[", "collection_name", "]", "=", "[", "]", "counter", "+=", "1", "docs", "[", "collection_name", "]", ".", "append", "(", "doc", ")", "if", "counter", "%", "batch_size", "==", "0", ":", "log", ".", "info", "(", "f\"Bulk import arangodb: {counter}\"", ")", "for", "cname", "in", "docs", ":", "collections", "[", "cname", "]", ".", "import_bulk", "(", "docs", "[", "cname", "]", ",", "on_duplicate", "=", "on_duplicate", ",", "halt_on_error", "=", "False", ")", "docs", "[", "cname", "]", "=", "[", "]", "log", ".", "info", "(", "f\"Bulk import arangodb: {counter}\"", ")", "for", "cname", "in", "docs", ":", "collections", "[", "cname", "]", ".", "import_bulk", "(", "docs", "[", "cname", "]", ",", "on_duplicate", "=", "on_duplicate", ",", "halt_on_error", "=", "False", ")", "docs", "[", "cname", "]", "=", "[", "]" ]
Batch load documents Args: db: ArangoDB client database handle doc_iterator: function that yields (collection_name, doc_key, doc) on_duplicate: defaults to replace, but can be error, update, replace or ignore https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
[ "Batch", "load", "documents" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L297-L340
belbio/bel
bel/db/arangodb.py
arango_id_to_key
def arango_id_to_key(_id): """Remove illegal chars from potential arangodb _key (id) Args: _id (str): id to be used as arangodb _key Returns: (str): _key value with illegal chars removed """ key = re.sub(r"[^a-zA-Z0-9\_\-\:\.\@\(\)\+\,\=\;\$\!\*\%]+", r"_", _id) if len(key) > 254: log.error( f"Arango _key cannot be longer than 254 chars: Len={len(key)} Key: {key}" ) elif len(key) < 1: log.error(f"Arango _key cannot be an empty string: Len={len(key)} Key: {key}") return key
python
def arango_id_to_key(_id): """Remove illegal chars from potential arangodb _key (id) Args: _id (str): id to be used as arangodb _key Returns: (str): _key value with illegal chars removed """ key = re.sub(r"[^a-zA-Z0-9\_\-\:\.\@\(\)\+\,\=\;\$\!\*\%]+", r"_", _id) if len(key) > 254: log.error( f"Arango _key cannot be longer than 254 chars: Len={len(key)} Key: {key}" ) elif len(key) < 1: log.error(f"Arango _key cannot be an empty string: Len={len(key)} Key: {key}") return key
[ "def", "arango_id_to_key", "(", "_id", ")", ":", "key", "=", "re", ".", "sub", "(", "r\"[^a-zA-Z0-9\\_\\-\\:\\.\\@\\(\\)\\+\\,\\=\\;\\$\\!\\*\\%]+\"", ",", "r\"_\"", ",", "_id", ")", "if", "len", "(", "key", ")", ">", "254", ":", "log", ".", "error", "(", "f\"Arango _key cannot be longer than 254 chars: Len={len(key)} Key: {key}\"", ")", "elif", "len", "(", "key", ")", "<", "1", ":", "log", ".", "error", "(", "f\"Arango _key cannot be an empty string: Len={len(key)} Key: {key}\"", ")", "return", "key" ]
Remove illegal chars from potential arangodb _key (id) Args: _id (str): id to be used as arangodb _key Returns: (str): _key value with illegal chars removed
[ "Remove", "illegal", "chars", "from", "potential", "arangodb", "_key", "(", "id", ")" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L343-L361
belbio/bel
bel/resources/resource.py
load_resource
def load_resource(resource_url: str, forceupdate: bool = False): """Load BEL Resource file Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: resource_url: URL from which to download the resource to load into the BEL API forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches """ log.info(f"Loading resource {resource_url}") try: # Download resource fo = bel.utils.download_file(resource_url) if not fo: log.error(f"Could not download and open file {resource_url}") return "Failed to download resource_url" # Get metadata fo.seek(0) with gzip.open(fo, "rt") as f: metadata = json.loads(f.__next__()) if "metadata" not in metadata: log.error(f"Missing metadata entry for {resource_url}") return "Cannot load resource file - missing metadata object in first line of file" # Load resource files if metadata["metadata"]["type"] == "namespace": bel.resources.namespace.load_terms(fo, metadata, forceupdate) elif metadata["metadata"]["type"] == "ortholog": bel.resources.ortholog.load_orthologs(fo, metadata) finally: fo.close()
python
def load_resource(resource_url: str, forceupdate: bool = False): """Load BEL Resource file Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: resource_url: URL from which to download the resource to load into the BEL API forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches """ log.info(f"Loading resource {resource_url}") try: # Download resource fo = bel.utils.download_file(resource_url) if not fo: log.error(f"Could not download and open file {resource_url}") return "Failed to download resource_url" # Get metadata fo.seek(0) with gzip.open(fo, "rt") as f: metadata = json.loads(f.__next__()) if "metadata" not in metadata: log.error(f"Missing metadata entry for {resource_url}") return "Cannot load resource file - missing metadata object in first line of file" # Load resource files if metadata["metadata"]["type"] == "namespace": bel.resources.namespace.load_terms(fo, metadata, forceupdate) elif metadata["metadata"]["type"] == "ortholog": bel.resources.ortholog.load_orthologs(fo, metadata) finally: fo.close()
[ "def", "load_resource", "(", "resource_url", ":", "str", ",", "forceupdate", ":", "bool", "=", "False", ")", ":", "log", ".", "info", "(", "f\"Loading resource {resource_url}\"", ")", "try", ":", "# Download resource", "fo", "=", "bel", ".", "utils", ".", "download_file", "(", "resource_url", ")", "if", "not", "fo", ":", "log", ".", "error", "(", "f\"Could not download and open file {resource_url}\"", ")", "return", "\"Failed to download resource_url\"", "# Get metadata", "fo", ".", "seek", "(", "0", ")", "with", "gzip", ".", "open", "(", "fo", ",", "\"rt\"", ")", "as", "f", ":", "metadata", "=", "json", ".", "loads", "(", "f", ".", "__next__", "(", ")", ")", "if", "\"metadata\"", "not", "in", "metadata", ":", "log", ".", "error", "(", "f\"Missing metadata entry for {resource_url}\"", ")", "return", "\"Cannot load resource file - missing metadata object in first line of file\"", "# Load resource files", "if", "metadata", "[", "\"metadata\"", "]", "[", "\"type\"", "]", "==", "\"namespace\"", ":", "bel", ".", "resources", ".", "namespace", ".", "load_terms", "(", "fo", ",", "metadata", ",", "forceupdate", ")", "elif", "metadata", "[", "\"metadata\"", "]", "[", "\"type\"", "]", "==", "\"ortholog\"", ":", "bel", ".", "resources", ".", "ortholog", ".", "load_orthologs", "(", "fo", ",", "metadata", ")", "finally", ":", "fo", ".", "close", "(", ")" ]
Load BEL Resource file Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: resource_url: URL from which to download the resource to load into the BEL API forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
[ "Load", "BEL", "Resource", "file" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/resource.py#L23-L61
belbio/bel
bel/terms/terms.py
get_terms
def get_terms(term_id): """Get term(s) using term_id - given term_id may match multiple term records Term ID has to match either the id, alt_ids or obsolete_ids """ search_body = { "query": { "bool": { "should": [ {"term": {"id": term_id}}, {"term": {"alt_ids": term_id}}, {"term": {"obsolete_ids": term_id}}, ] } } } result = es.search(index="terms", body=search_body) results = [] for r in result["hits"]["hits"]: results.append(r["_source"]) return results
python
def get_terms(term_id): """Get term(s) using term_id - given term_id may match multiple term records Term ID has to match either the id, alt_ids or obsolete_ids """ search_body = { "query": { "bool": { "should": [ {"term": {"id": term_id}}, {"term": {"alt_ids": term_id}}, {"term": {"obsolete_ids": term_id}}, ] } } } result = es.search(index="terms", body=search_body) results = [] for r in result["hits"]["hits"]: results.append(r["_source"]) return results
[ "def", "get_terms", "(", "term_id", ")", ":", "search_body", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"should\"", ":", "[", "{", "\"term\"", ":", "{", "\"id\"", ":", "term_id", "}", "}", ",", "{", "\"term\"", ":", "{", "\"alt_ids\"", ":", "term_id", "}", "}", ",", "{", "\"term\"", ":", "{", "\"obsolete_ids\"", ":", "term_id", "}", "}", ",", "]", "}", "}", "}", "result", "=", "es", ".", "search", "(", "index", "=", "\"terms\"", ",", "body", "=", "search_body", ")", "results", "=", "[", "]", "for", "r", "in", "result", "[", "\"hits\"", "]", "[", "\"hits\"", "]", ":", "results", ".", "append", "(", "r", "[", "\"_source\"", "]", ")", "return", "results" ]
Get term(s) using term_id - given term_id may match multiple term records Term ID has to match either the id, alt_ids or obsolete_ids
[ "Get", "term", "(", "s", ")", "using", "term_id", "-", "given", "term_id", "may", "match", "multiple", "term", "records" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/terms.py#L22-L46
belbio/bel
bel/terms/terms.py
get_equivalents
def get_equivalents(term_id: str) -> List[Mapping[str, Union[str, bool]]]: """Get equivalents given ns:id Args: term_id (str): term id Returns: List[Mapping[str, Union[str, bool]]]: e.g. [{'term_id': 'HGNC:5', 'namespace': 'HGNC'}, 'primary': False] """ try: errors = [] terms = get_terms(term_id) if len(terms) == 0: return {"equivalents": [], "errors": errors} elif len(terms) > 1: errors.append( f'Too many primary IDs returned. Given term_id: {term_id} matches these term_ids: {[term["id"] for term in terms]}' ) return {"equivalents": [], "errors": errors} else: term_id = terms[0]["id"] term_id_key = bel.db.arangodb.arango_id_to_key(term_id) equivalents = [] query = f""" FOR vertex, edge IN 1..5 ANY 'equivalence_nodes/{term_id_key}' equivalence_edges OPTIONS {{bfs: true, uniqueVertices : 'global'}} RETURN DISTINCT {{ term_id: vertex.name, namespace: vertex.namespace, primary: vertex.primary }} """ cursor = belns_db.aql.execute(query, count=True, batch_size=20) for doc in cursor: if doc.get("term_id", False): equivalents.append(doc) equivalents.append( {"term_id": term_id, "namespace": term_id.split(":")[0], "primary": True} ) return {"equivalents": equivalents, "errors": errors} except Exception as e: log.error(f"Problem getting term equivalents for {term_id} msg: {e}") return {"equivalents": [], "errors": [f"Unexpected error {e}"]}
python
def get_equivalents(term_id: str) -> List[Mapping[str, Union[str, bool]]]: """Get equivalents given ns:id Args: term_id (str): term id Returns: List[Mapping[str, Union[str, bool]]]: e.g. [{'term_id': 'HGNC:5', 'namespace': 'HGNC'}, 'primary': False] """ try: errors = [] terms = get_terms(term_id) if len(terms) == 0: return {"equivalents": [], "errors": errors} elif len(terms) > 1: errors.append( f'Too many primary IDs returned. Given term_id: {term_id} matches these term_ids: {[term["id"] for term in terms]}' ) return {"equivalents": [], "errors": errors} else: term_id = terms[0]["id"] term_id_key = bel.db.arangodb.arango_id_to_key(term_id) equivalents = [] query = f""" FOR vertex, edge IN 1..5 ANY 'equivalence_nodes/{term_id_key}' equivalence_edges OPTIONS {{bfs: true, uniqueVertices : 'global'}} RETURN DISTINCT {{ term_id: vertex.name, namespace: vertex.namespace, primary: vertex.primary }} """ cursor = belns_db.aql.execute(query, count=True, batch_size=20) for doc in cursor: if doc.get("term_id", False): equivalents.append(doc) equivalents.append( {"term_id": term_id, "namespace": term_id.split(":")[0], "primary": True} ) return {"equivalents": equivalents, "errors": errors} except Exception as e: log.error(f"Problem getting term equivalents for {term_id} msg: {e}") return {"equivalents": [], "errors": [f"Unexpected error {e}"]}
[ "def", "get_equivalents", "(", "term_id", ":", "str", ")", "->", "List", "[", "Mapping", "[", "str", ",", "Union", "[", "str", ",", "bool", "]", "]", "]", ":", "try", ":", "errors", "=", "[", "]", "terms", "=", "get_terms", "(", "term_id", ")", "if", "len", "(", "terms", ")", "==", "0", ":", "return", "{", "\"equivalents\"", ":", "[", "]", ",", "\"errors\"", ":", "errors", "}", "elif", "len", "(", "terms", ")", ">", "1", ":", "errors", ".", "append", "(", "f'Too many primary IDs returned. Given term_id: {term_id} matches these term_ids: {[term[\"id\"] for term in terms]}'", ")", "return", "{", "\"equivalents\"", ":", "[", "]", ",", "\"errors\"", ":", "errors", "}", "else", ":", "term_id", "=", "terms", "[", "0", "]", "[", "\"id\"", "]", "term_id_key", "=", "bel", ".", "db", ".", "arangodb", ".", "arango_id_to_key", "(", "term_id", ")", "equivalents", "=", "[", "]", "query", "=", "f\"\"\"\n FOR vertex, edge IN 1..5\n ANY 'equivalence_nodes/{term_id_key}' equivalence_edges\n OPTIONS {{bfs: true, uniqueVertices : 'global'}}\n RETURN DISTINCT {{\n term_id: vertex.name,\n namespace: vertex.namespace,\n primary: vertex.primary\n }}\n \"\"\"", "cursor", "=", "belns_db", ".", "aql", ".", "execute", "(", "query", ",", "count", "=", "True", ",", "batch_size", "=", "20", ")", "for", "doc", "in", "cursor", ":", "if", "doc", ".", "get", "(", "\"term_id\"", ",", "False", ")", ":", "equivalents", ".", "append", "(", "doc", ")", "equivalents", ".", "append", "(", "{", "\"term_id\"", ":", "term_id", ",", "\"namespace\"", ":", "term_id", ".", "split", "(", "\":\"", ")", "[", "0", "]", ",", "\"primary\"", ":", "True", "}", ")", "return", "{", "\"equivalents\"", ":", "equivalents", ",", "\"errors\"", ":", "errors", "}", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Problem getting term equivalents for {term_id} msg: {e}\"", ")", "return", "{", "\"equivalents\"", ":", "[", "]", ",", "\"errors\"", ":", "[", "f\"Unexpected error {e}\"", "]", "}" ]
Get equivalents given ns:id Args: term_id (str): term id Returns: List[Mapping[str, Union[str, bool]]]: e.g. [{'term_id': 'HGNC:5', 'namespace': 'HGNC'}, 'primary': False]
[ "Get", "equivalents", "given", "ns", ":", "id" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/terms.py#L49-L98
belbio/bel
bel/terms/terms.py
get_normalized_term
def get_normalized_term(term_id: str, equivalents: list, namespace_targets: dict) -> str: """Get normalized term""" if equivalents and len(equivalents) > 0: for start_ns in namespace_targets: if re.match(start_ns, term_id): for target_ns in namespace_targets[start_ns]: for e in equivalents: if e and target_ns in e["namespace"] and e["primary"]: normalized_term = e["term_id"] return normalized_term return term_id
python
def get_normalized_term(term_id: str, equivalents: list, namespace_targets: dict) -> str: """Get normalized term""" if equivalents and len(equivalents) > 0: for start_ns in namespace_targets: if re.match(start_ns, term_id): for target_ns in namespace_targets[start_ns]: for e in equivalents: if e and target_ns in e["namespace"] and e["primary"]: normalized_term = e["term_id"] return normalized_term return term_id
[ "def", "get_normalized_term", "(", "term_id", ":", "str", ",", "equivalents", ":", "list", ",", "namespace_targets", ":", "dict", ")", "->", "str", ":", "if", "equivalents", "and", "len", "(", "equivalents", ")", ">", "0", ":", "for", "start_ns", "in", "namespace_targets", ":", "if", "re", ".", "match", "(", "start_ns", ",", "term_id", ")", ":", "for", "target_ns", "in", "namespace_targets", "[", "start_ns", "]", ":", "for", "e", "in", "equivalents", ":", "if", "e", "and", "target_ns", "in", "e", "[", "\"namespace\"", "]", "and", "e", "[", "\"primary\"", "]", ":", "normalized_term", "=", "e", "[", "\"term_id\"", "]", "return", "normalized_term", "return", "term_id" ]
Get normalized term
[ "Get", "normalized", "term" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/terms.py#L101-L113
belbio/bel
bel/terms/terms.py
get_labels
def get_labels(term_ids: list) -> dict: """Get term labels given term ids This only takes the first term returned for a term_id so use the unique term_id for a term not an alternate id that might not be unique. """ term_labels = {} for term_id in term_ids: term = get_terms(term_id) term_labels[term_id] = term[0].get("label", "") return term_labels
python
def get_labels(term_ids: list) -> dict: """Get term labels given term ids This only takes the first term returned for a term_id so use the unique term_id for a term not an alternate id that might not be unique. """ term_labels = {} for term_id in term_ids: term = get_terms(term_id) term_labels[term_id] = term[0].get("label", "") return term_labels
[ "def", "get_labels", "(", "term_ids", ":", "list", ")", "->", "dict", ":", "term_labels", "=", "{", "}", "for", "term_id", "in", "term_ids", ":", "term", "=", "get_terms", "(", "term_id", ")", "term_labels", "[", "term_id", "]", "=", "term", "[", "0", "]", ".", "get", "(", "\"label\"", ",", "\"\"", ")", "return", "term_labels" ]
Get term labels given term ids This only takes the first term returned for a term_id so use the unique term_id for a term not an alternate id that might not be unique.
[ "Get", "term", "labels", "given", "term", "ids" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/terms.py#L116-L127
belbio/bel
bel/terms/terms.py
get_normalized_terms
def get_normalized_terms(term_id: str) -> dict: """Get normalized terms - canonical/decanonical forms""" canonical = term_id decanonical = term_id canonical_namespace_targets = config["bel"]["lang"]["canonical"] decanonical_namespace_targets = config["bel"]["lang"]["decanonical"] results = get_equivalents(term_id) equivalents = results["equivalents"] # log.debug(f'Equivalents: {equivalents}') if equivalents: canonical = get_normalized_term(term_id, equivalents, canonical_namespace_targets) decanonical = get_normalized_term(canonical, equivalents, decanonical_namespace_targets) # log.debug(f'canonical: {canonical}, decanonical: {decanonical}, original: {term_id}') return {"canonical": canonical, "decanonical": decanonical, "original": term_id}
python
def get_normalized_terms(term_id: str) -> dict: """Get normalized terms - canonical/decanonical forms""" canonical = term_id decanonical = term_id canonical_namespace_targets = config["bel"]["lang"]["canonical"] decanonical_namespace_targets = config["bel"]["lang"]["decanonical"] results = get_equivalents(term_id) equivalents = results["equivalents"] # log.debug(f'Equivalents: {equivalents}') if equivalents: canonical = get_normalized_term(term_id, equivalents, canonical_namespace_targets) decanonical = get_normalized_term(canonical, equivalents, decanonical_namespace_targets) # log.debug(f'canonical: {canonical}, decanonical: {decanonical}, original: {term_id}') return {"canonical": canonical, "decanonical": decanonical, "original": term_id}
[ "def", "get_normalized_terms", "(", "term_id", ":", "str", ")", "->", "dict", ":", "canonical", "=", "term_id", "decanonical", "=", "term_id", "canonical_namespace_targets", "=", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"canonical\"", "]", "decanonical_namespace_targets", "=", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", "[", "\"decanonical\"", "]", "results", "=", "get_equivalents", "(", "term_id", ")", "equivalents", "=", "results", "[", "\"equivalents\"", "]", "# log.debug(f'Equivalents: {equivalents}')", "if", "equivalents", ":", "canonical", "=", "get_normalized_term", "(", "term_id", ",", "equivalents", ",", "canonical_namespace_targets", ")", "decanonical", "=", "get_normalized_term", "(", "canonical", ",", "equivalents", ",", "decanonical_namespace_targets", ")", "# log.debug(f'canonical: {canonical}, decanonical: {decanonical}, original: {term_id}')", "return", "{", "\"canonical\"", ":", "canonical", ",", "\"decanonical\"", ":", "decanonical", ",", "\"original\"", ":", "term_id", "}" ]
Get normalized terms - canonical/decanonical forms
[ "Get", "normalized", "terms", "-", "canonical", "/", "decanonical", "forms" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/terms.py#L130-L149
PayEx/pypayex
payex/handlers.py
BaseHandler._get_params
def _get_params(self): """ Generate SOAP parameters. """ params = {'accountNumber': self._service.accountNumber} # Include object variables that are in field_order for key, val in self.__dict__.iteritems(): if key in self.field_order: # Turn into Unicode if isinstance(val, str,): val = val.decode('utf8') params[key] = val # Set missing parameters as empty strings for key in self.field_order: if key not in params: params[key] = u'' # Parameter sorting method def order_keys(k): if k[0] in self.field_order: return self.field_order.index(k[0]) return len(self.field_order) + 1 # Sort the ordered dictionary params = OrderedDict(sorted(params.items(), key=order_keys)) # Add hash to dictionary if present if hasattr(self, 'hash') and self.hash is not None: params['hash'] = self.hash return params
python
def _get_params(self): """ Generate SOAP parameters. """ params = {'accountNumber': self._service.accountNumber} # Include object variables that are in field_order for key, val in self.__dict__.iteritems(): if key in self.field_order: # Turn into Unicode if isinstance(val, str,): val = val.decode('utf8') params[key] = val # Set missing parameters as empty strings for key in self.field_order: if key not in params: params[key] = u'' # Parameter sorting method def order_keys(k): if k[0] in self.field_order: return self.field_order.index(k[0]) return len(self.field_order) + 1 # Sort the ordered dictionary params = OrderedDict(sorted(params.items(), key=order_keys)) # Add hash to dictionary if present if hasattr(self, 'hash') and self.hash is not None: params['hash'] = self.hash return params
[ "def", "_get_params", "(", "self", ")", ":", "params", "=", "{", "'accountNumber'", ":", "self", ".", "_service", ".", "accountNumber", "}", "# Include object variables that are in field_order", "for", "key", ",", "val", "in", "self", ".", "__dict__", ".", "iteritems", "(", ")", ":", "if", "key", "in", "self", ".", "field_order", ":", "# Turn into Unicode", "if", "isinstance", "(", "val", ",", "str", ",", ")", ":", "val", "=", "val", ".", "decode", "(", "'utf8'", ")", "params", "[", "key", "]", "=", "val", "# Set missing parameters as empty strings", "for", "key", "in", "self", ".", "field_order", ":", "if", "key", "not", "in", "params", ":", "params", "[", "key", "]", "=", "u''", "# Parameter sorting method", "def", "order_keys", "(", "k", ")", ":", "if", "k", "[", "0", "]", "in", "self", ".", "field_order", ":", "return", "self", ".", "field_order", ".", "index", "(", "k", "[", "0", "]", ")", "return", "len", "(", "self", ".", "field_order", ")", "+", "1", "# Sort the ordered dictionary", "params", "=", "OrderedDict", "(", "sorted", "(", "params", ".", "items", "(", ")", ",", "key", "=", "order_keys", ")", ")", "# Add hash to dictionary if present", "if", "hasattr", "(", "self", ",", "'hash'", ")", "and", "self", ".", "hash", "is", "not", "None", ":", "params", "[", "'hash'", "]", "=", "self", ".", "hash", "return", "params" ]
Generate SOAP parameters.
[ "Generate", "SOAP", "parameters", "." ]
train
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L26-L61
PayEx/pypayex
payex/handlers.py
BaseHandler._generate_hash
def _generate_hash(self): """ Generates a hash based on the specific fields for the method. """ self.hash = None str_hash = '' for key, val in self._get_params().iteritems(): str_hash += smart_str(val) # Append the encryption string str_hash += self._service.encryption_key # Set md5 hash on the object self.hash = hashlib.md5(str_hash).hexdigest()
python
def _generate_hash(self): """ Generates a hash based on the specific fields for the method. """ self.hash = None str_hash = '' for key, val in self._get_params().iteritems(): str_hash += smart_str(val) # Append the encryption string str_hash += self._service.encryption_key # Set md5 hash on the object self.hash = hashlib.md5(str_hash).hexdigest()
[ "def", "_generate_hash", "(", "self", ")", ":", "self", ".", "hash", "=", "None", "str_hash", "=", "''", "for", "key", ",", "val", "in", "self", ".", "_get_params", "(", ")", ".", "iteritems", "(", ")", ":", "str_hash", "+=", "smart_str", "(", "val", ")", "# Append the encryption string", "str_hash", "+=", "self", ".", "_service", ".", "encryption_key", "# Set md5 hash on the object", "self", ".", "hash", "=", "hashlib", ".", "md5", "(", "str_hash", ")", ".", "hexdigest", "(", ")" ]
Generates a hash based on the specific fields for the method.
[ "Generates", "a", "hash", "based", "on", "the", "specific", "fields", "for", "the", "method", "." ]
train
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L63-L78
PayEx/pypayex
payex/handlers.py
BaseHandler._send_request
def _send_request(self): """ Make the SOAP request and convert the result to a dictionary. """ # Generate the hash variable and parameters self._generate_hash() params = self._get_params() # Make the SOAP request try: resp = self._endpoint(**params) logger.debug(resp) except WebFault, e: logger.exception('An error occurred while making the SOAP request.') return None # Convert XML response into a dictionary self.response = XmlDictConfig(ElementTree.XML(smart_str(resp))) # Normalize dictionary values self.response = normalize_dictionary_values(self.response) # Log all non OK status codes if self.response['status']['errorCode'] != 'OK': logger.error(resp) return self.response
python
def _send_request(self): """ Make the SOAP request and convert the result to a dictionary. """ # Generate the hash variable and parameters self._generate_hash() params = self._get_params() # Make the SOAP request try: resp = self._endpoint(**params) logger.debug(resp) except WebFault, e: logger.exception('An error occurred while making the SOAP request.') return None # Convert XML response into a dictionary self.response = XmlDictConfig(ElementTree.XML(smart_str(resp))) # Normalize dictionary values self.response = normalize_dictionary_values(self.response) # Log all non OK status codes if self.response['status']['errorCode'] != 'OK': logger.error(resp) return self.response
[ "def", "_send_request", "(", "self", ")", ":", "# Generate the hash variable and parameters", "self", ".", "_generate_hash", "(", ")", "params", "=", "self", ".", "_get_params", "(", ")", "# Make the SOAP request", "try", ":", "resp", "=", "self", ".", "_endpoint", "(", "*", "*", "params", ")", "logger", ".", "debug", "(", "resp", ")", "except", "WebFault", ",", "e", ":", "logger", ".", "exception", "(", "'An error occurred while making the SOAP request.'", ")", "return", "None", "# Convert XML response into a dictionary", "self", ".", "response", "=", "XmlDictConfig", "(", "ElementTree", ".", "XML", "(", "smart_str", "(", "resp", ")", ")", ")", "# Normalize dictionary values", "self", ".", "response", "=", "normalize_dictionary_values", "(", "self", ".", "response", ")", "# Log all non OK status codes", "if", "self", ".", "response", "[", "'status'", "]", "[", "'errorCode'", "]", "!=", "'OK'", ":", "logger", ".", "error", "(", "resp", ")", "return", "self", ".", "response" ]
Make the SOAP request and convert the result to a dictionary.
[ "Make", "the", "SOAP", "request", "and", "convert", "the", "result", "to", "a", "dictionary", "." ]
train
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L80-L107
PayEx/pypayex
payex/handlers.py
BaseHandler.client_factory
def client_factory(self): """ Custom client factory to set proxy options. """ if self._service.production: url = self.production_url else: url = self.testing_url proxy_options = dict() https_proxy_setting = os.environ.get('PAYEX_HTTPS_PROXY') or os.environ.get('https_proxy') http_proxy_setting = os.environ.get('PAYEX_HTTP_PROXY') or os.environ.get('http_proxy') if https_proxy_setting: proxy_options['https'] = https_proxy_setting if http_proxy_setting: proxy_options['http'] = http_proxy_setting return client.Client(url, proxy=proxy_options)
python
def client_factory(self): """ Custom client factory to set proxy options. """ if self._service.production: url = self.production_url else: url = self.testing_url proxy_options = dict() https_proxy_setting = os.environ.get('PAYEX_HTTPS_PROXY') or os.environ.get('https_proxy') http_proxy_setting = os.environ.get('PAYEX_HTTP_PROXY') or os.environ.get('http_proxy') if https_proxy_setting: proxy_options['https'] = https_proxy_setting if http_proxy_setting: proxy_options['http'] = http_proxy_setting return client.Client(url, proxy=proxy_options)
[ "def", "client_factory", "(", "self", ")", ":", "if", "self", ".", "_service", ".", "production", ":", "url", "=", "self", ".", "production_url", "else", ":", "url", "=", "self", ".", "testing_url", "proxy_options", "=", "dict", "(", ")", "https_proxy_setting", "=", "os", ".", "environ", ".", "get", "(", "'PAYEX_HTTPS_PROXY'", ")", "or", "os", ".", "environ", ".", "get", "(", "'https_proxy'", ")", "http_proxy_setting", "=", "os", ".", "environ", ".", "get", "(", "'PAYEX_HTTP_PROXY'", ")", "or", "os", ".", "environ", ".", "get", "(", "'http_proxy'", ")", "if", "https_proxy_setting", ":", "proxy_options", "[", "'https'", "]", "=", "https_proxy_setting", "if", "http_proxy_setting", ":", "proxy_options", "[", "'http'", "]", "=", "http_proxy_setting", "return", "client", ".", "Client", "(", "url", ",", "proxy", "=", "proxy_options", ")" ]
Custom client factory to set proxy options.
[ "Custom", "client", "factory", "to", "set", "proxy", "options", "." ]
train
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L109-L128
DNX/django-keyboard-shorcuts
keyboard_shortcuts/utils.py
get_combination_action
def get_combination_action(combination): """ Prepares the action for a keyboard combination, also filters another "strange" actions declared by the user. """ accepted_actions = ('link', 'js') for action in accepted_actions: if action in combination: return {action: combination[action]} return {}
python
def get_combination_action(combination): """ Prepares the action for a keyboard combination, also filters another "strange" actions declared by the user. """ accepted_actions = ('link', 'js') for action in accepted_actions: if action in combination: return {action: combination[action]} return {}
[ "def", "get_combination_action", "(", "combination", ")", ":", "accepted_actions", "=", "(", "'link'", ",", "'js'", ")", "for", "action", "in", "accepted_actions", ":", "if", "action", "in", "combination", ":", "return", "{", "action", ":", "combination", "[", "action", "]", "}", "return", "{", "}" ]
Prepares the action for a keyboard combination, also filters another "strange" actions declared by the user.
[ "Prepares", "the", "action", "for", "a", "keyboard", "combination", "also", "filters", "another", "strange", "actions", "declared", "by", "the", "user", "." ]
train
https://github.com/DNX/django-keyboard-shorcuts/blob/dd853a410614c0dfb7cce803eafda9b5fa47be17/keyboard_shortcuts/utils.py#L14-L23
DNX/django-keyboard-shorcuts
keyboard_shortcuts/utils.py
get_processed_hotkeys
def get_processed_hotkeys(hotkeys=None): """ Process passed dict with key combinations or the HOTKEYS dict from settings. """ hotkeys = hotkeys or ks_settings.HOTKEYS processed_hotkeys = AutoVivification() if not hotkeys: return processed_hotkeys for combination in hotkeys: key_codes = get_key_codes(combination['keys']) if len(key_codes) == 1: processed_hotkeys[key_codes[0]] = get_combination_action(combination) elif len(key_codes) == 2: processed_hotkeys[key_codes[0]][key_codes[1]] = get_combination_action(combination) elif len(key_codes) == 3: processed_hotkeys[key_codes[0]][key_codes[1]][key_codes[2]] = get_combination_action(combination) # TODO: make dynamic vivification return processed_hotkeys
python
def get_processed_hotkeys(hotkeys=None): """ Process passed dict with key combinations or the HOTKEYS dict from settings. """ hotkeys = hotkeys or ks_settings.HOTKEYS processed_hotkeys = AutoVivification() if not hotkeys: return processed_hotkeys for combination in hotkeys: key_codes = get_key_codes(combination['keys']) if len(key_codes) == 1: processed_hotkeys[key_codes[0]] = get_combination_action(combination) elif len(key_codes) == 2: processed_hotkeys[key_codes[0]][key_codes[1]] = get_combination_action(combination) elif len(key_codes) == 3: processed_hotkeys[key_codes[0]][key_codes[1]][key_codes[2]] = get_combination_action(combination) # TODO: make dynamic vivification return processed_hotkeys
[ "def", "get_processed_hotkeys", "(", "hotkeys", "=", "None", ")", ":", "hotkeys", "=", "hotkeys", "or", "ks_settings", ".", "HOTKEYS", "processed_hotkeys", "=", "AutoVivification", "(", ")", "if", "not", "hotkeys", ":", "return", "processed_hotkeys", "for", "combination", "in", "hotkeys", ":", "key_codes", "=", "get_key_codes", "(", "combination", "[", "'keys'", "]", ")", "if", "len", "(", "key_codes", ")", "==", "1", ":", "processed_hotkeys", "[", "key_codes", "[", "0", "]", "]", "=", "get_combination_action", "(", "combination", ")", "elif", "len", "(", "key_codes", ")", "==", "2", ":", "processed_hotkeys", "[", "key_codes", "[", "0", "]", "]", "[", "key_codes", "[", "1", "]", "]", "=", "get_combination_action", "(", "combination", ")", "elif", "len", "(", "key_codes", ")", "==", "3", ":", "processed_hotkeys", "[", "key_codes", "[", "0", "]", "]", "[", "key_codes", "[", "1", "]", "]", "[", "key_codes", "[", "2", "]", "]", "=", "get_combination_action", "(", "combination", ")", "# TODO: make dynamic vivification", "return", "processed_hotkeys" ]
Process passed dict with key combinations or the HOTKEYS dict from settings.
[ "Process", "passed", "dict", "with", "key", "combinations", "or", "the", "HOTKEYS", "dict", "from", "settings", "." ]
train
https://github.com/DNX/django-keyboard-shorcuts/blob/dd853a410614c0dfb7cce803eafda9b5fa47be17/keyboard_shortcuts/utils.py#L26-L46
DNX/django-keyboard-shorcuts
keyboard_shortcuts/utils.py
get_key_codes
def get_key_codes(keys): """ Calculates the list of key codes from a string with key combinations. Ex: 'CTRL+A' will produce the output (17, 65) """ keys = keys.strip().upper().split('+') codes = list() for key in keys: code = ks_settings.KEY_CODES.get(key.strip()) if code: codes.append(code) return codes
python
def get_key_codes(keys): """ Calculates the list of key codes from a string with key combinations. Ex: 'CTRL+A' will produce the output (17, 65) """ keys = keys.strip().upper().split('+') codes = list() for key in keys: code = ks_settings.KEY_CODES.get(key.strip()) if code: codes.append(code) return codes
[ "def", "get_key_codes", "(", "keys", ")", ":", "keys", "=", "keys", ".", "strip", "(", ")", ".", "upper", "(", ")", ".", "split", "(", "'+'", ")", "codes", "=", "list", "(", ")", "for", "key", "in", "keys", ":", "code", "=", "ks_settings", ".", "KEY_CODES", ".", "get", "(", "key", ".", "strip", "(", ")", ")", "if", "code", ":", "codes", ".", "append", "(", "code", ")", "return", "codes" ]
Calculates the list of key codes from a string with key combinations. Ex: 'CTRL+A' will produce the output (17, 65)
[ "Calculates", "the", "list", "of", "key", "codes", "from", "a", "string", "with", "key", "combinations", ".", "Ex", ":", "CTRL", "+", "A", "will", "produce", "the", "output", "(", "17", "65", ")" ]
train
https://github.com/DNX/django-keyboard-shorcuts/blob/dd853a410614c0dfb7cce803eafda9b5fa47be17/keyboard_shortcuts/utils.py#L49-L60
belbio/bel
bel/lang/belobj.py
BEL.parse
def parse( self, assertion: Union[str, Mapping[str, str]], strict: bool = False, parseinfo: bool = False, rule_name: str = "start", error_level: str = "WARNING", ) -> "BEL": """Parse and semantically validate BEL statement Parses a BEL statement given as a string and returns an AST, Abstract Syntax Tree (defined in ast.py) if the statement is valid, self.parse_valid. Else, the AST attribute is None and there will be validation error messages in self.validation_messages. self.validation_messages will contain WARNINGS if warranted even if the statement parses correctly. Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR Args: assertion: BEL statement (if str -> 'S R O', if dict {'subject': S, 'relation': R, 'object': O}) strict: specify to use strict or loose parsing; defaults to loose parseinfo: specify whether or not to include Tatsu parse information in AST rule_name: starting point in parser - defaults to 'start' error_level: return ERRORs only or also WARNINGs Returns: ParseObject: The ParseObject which contain either an AST or error messages. """ self.ast = None self.parse_valid = False self.parse_visualize_error = "" self.validation_messages = [] # Reset messages when parsing a new BEL Statement if isinstance(assertion, dict): if assertion.get("relation", False) and assertion.get("object", False): statement = f"{assertion['subject']} {assertion['relation']} {assertion['object']}" elif assertion.get("subject"): statement = f"{assertion['subject']}" else: statement = "" else: statement = assertion self.original_bel_stmt = statement # pre-process to remove extra white space, add space after commas, etc. self.bel_stmt = bel_utils.preprocess_bel_stmt(statement) # TODO - double check these tests before enabling # is_valid, messages = bel_utils.simple_checks(self.bel_stmt) # if not is_valid: # self.validation_messages.extend(messages) # return self # Check to see if empty string for bel statement if len(self.bel_stmt) == 0: self.validation_messages.append( ("ERROR", "Please include a valid BEL statement - found empty string.") ) return self try: # see if an AST is returned without any parsing errors ast_dict = self.parser.parse( self.bel_stmt, rule_name=rule_name, trace=False, parseinfo=parseinfo ) self.ast = lang_ast.ast_dict_to_objects(ast_dict, self) self.parse_valid = True except FailedParse as e: # if an error is returned, send to handle_syntax, error error, visualize_error = bel_utils.handle_parser_syntax_error(e) self.parse_visualize_error = visualize_error if visualize_error: self.validation_messages.append( ("ERROR", f"{error}\n{visualize_error}") ) else: self.validation_messages.append( ("ERROR", f"{error}\nBEL: {self.bel_stmt}") ) self.ast = None except Exception as e: log.error("Error {}, error type: {}".format(e, type(e))) self.validation_messages.append( ("ERROR", "Error {}, error type: {}".format(e, type(e))) ) return self
python
def parse( self, assertion: Union[str, Mapping[str, str]], strict: bool = False, parseinfo: bool = False, rule_name: str = "start", error_level: str = "WARNING", ) -> "BEL": """Parse and semantically validate BEL statement Parses a BEL statement given as a string and returns an AST, Abstract Syntax Tree (defined in ast.py) if the statement is valid, self.parse_valid. Else, the AST attribute is None and there will be validation error messages in self.validation_messages. self.validation_messages will contain WARNINGS if warranted even if the statement parses correctly. Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR Args: assertion: BEL statement (if str -> 'S R O', if dict {'subject': S, 'relation': R, 'object': O}) strict: specify to use strict or loose parsing; defaults to loose parseinfo: specify whether or not to include Tatsu parse information in AST rule_name: starting point in parser - defaults to 'start' error_level: return ERRORs only or also WARNINGs Returns: ParseObject: The ParseObject which contain either an AST or error messages. """ self.ast = None self.parse_valid = False self.parse_visualize_error = "" self.validation_messages = [] # Reset messages when parsing a new BEL Statement if isinstance(assertion, dict): if assertion.get("relation", False) and assertion.get("object", False): statement = f"{assertion['subject']} {assertion['relation']} {assertion['object']}" elif assertion.get("subject"): statement = f"{assertion['subject']}" else: statement = "" else: statement = assertion self.original_bel_stmt = statement # pre-process to remove extra white space, add space after commas, etc. self.bel_stmt = bel_utils.preprocess_bel_stmt(statement) # TODO - double check these tests before enabling # is_valid, messages = bel_utils.simple_checks(self.bel_stmt) # if not is_valid: # self.validation_messages.extend(messages) # return self # Check to see if empty string for bel statement if len(self.bel_stmt) == 0: self.validation_messages.append( ("ERROR", "Please include a valid BEL statement - found empty string.") ) return self try: # see if an AST is returned without any parsing errors ast_dict = self.parser.parse( self.bel_stmt, rule_name=rule_name, trace=False, parseinfo=parseinfo ) self.ast = lang_ast.ast_dict_to_objects(ast_dict, self) self.parse_valid = True except FailedParse as e: # if an error is returned, send to handle_syntax, error error, visualize_error = bel_utils.handle_parser_syntax_error(e) self.parse_visualize_error = visualize_error if visualize_error: self.validation_messages.append( ("ERROR", f"{error}\n{visualize_error}") ) else: self.validation_messages.append( ("ERROR", f"{error}\nBEL: {self.bel_stmt}") ) self.ast = None except Exception as e: log.error("Error {}, error type: {}".format(e, type(e))) self.validation_messages.append( ("ERROR", "Error {}, error type: {}".format(e, type(e))) ) return self
[ "def", "parse", "(", "self", ",", "assertion", ":", "Union", "[", "str", ",", "Mapping", "[", "str", ",", "str", "]", "]", ",", "strict", ":", "bool", "=", "False", ",", "parseinfo", ":", "bool", "=", "False", ",", "rule_name", ":", "str", "=", "\"start\"", ",", "error_level", ":", "str", "=", "\"WARNING\"", ",", ")", "->", "\"BEL\"", ":", "self", ".", "ast", "=", "None", "self", ".", "parse_valid", "=", "False", "self", ".", "parse_visualize_error", "=", "\"\"", "self", ".", "validation_messages", "=", "[", "]", "# Reset messages when parsing a new BEL Statement", "if", "isinstance", "(", "assertion", ",", "dict", ")", ":", "if", "assertion", ".", "get", "(", "\"relation\"", ",", "False", ")", "and", "assertion", ".", "get", "(", "\"object\"", ",", "False", ")", ":", "statement", "=", "f\"{assertion['subject']} {assertion['relation']} {assertion['object']}\"", "elif", "assertion", ".", "get", "(", "\"subject\"", ")", ":", "statement", "=", "f\"{assertion['subject']}\"", "else", ":", "statement", "=", "\"\"", "else", ":", "statement", "=", "assertion", "self", ".", "original_bel_stmt", "=", "statement", "# pre-process to remove extra white space, add space after commas, etc.", "self", ".", "bel_stmt", "=", "bel_utils", ".", "preprocess_bel_stmt", "(", "statement", ")", "# TODO - double check these tests before enabling", "# is_valid, messages = bel_utils.simple_checks(self.bel_stmt)", "# if not is_valid:", "# self.validation_messages.extend(messages)", "# return self", "# Check to see if empty string for bel statement", "if", "len", "(", "self", ".", "bel_stmt", ")", "==", "0", ":", "self", ".", "validation_messages", ".", "append", "(", "(", "\"ERROR\"", ",", "\"Please include a valid BEL statement - found empty string.\"", ")", ")", "return", "self", "try", ":", "# see if an AST is returned without any parsing errors", "ast_dict", "=", "self", ".", "parser", ".", "parse", "(", "self", ".", "bel_stmt", ",", "rule_name", "=", "rule_name", ",", "trace", "=", "False", ",", "parseinfo", "=", "parseinfo", ")", "self", ".", "ast", "=", "lang_ast", ".", "ast_dict_to_objects", "(", "ast_dict", ",", "self", ")", "self", ".", "parse_valid", "=", "True", "except", "FailedParse", "as", "e", ":", "# if an error is returned, send to handle_syntax, error", "error", ",", "visualize_error", "=", "bel_utils", ".", "handle_parser_syntax_error", "(", "e", ")", "self", ".", "parse_visualize_error", "=", "visualize_error", "if", "visualize_error", ":", "self", ".", "validation_messages", ".", "append", "(", "(", "\"ERROR\"", ",", "f\"{error}\\n{visualize_error}\"", ")", ")", "else", ":", "self", ".", "validation_messages", ".", "append", "(", "(", "\"ERROR\"", ",", "f\"{error}\\nBEL: {self.bel_stmt}\"", ")", ")", "self", ".", "ast", "=", "None", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"Error {}, error type: {}\"", ".", "format", "(", "e", ",", "type", "(", "e", ")", ")", ")", "self", ".", "validation_messages", ".", "append", "(", "(", "\"ERROR\"", ",", "\"Error {}, error type: {}\"", ".", "format", "(", "e", ",", "type", "(", "e", ")", ")", ")", ")", "return", "self" ]
Parse and semantically validate BEL statement Parses a BEL statement given as a string and returns an AST, Abstract Syntax Tree (defined in ast.py) if the statement is valid, self.parse_valid. Else, the AST attribute is None and there will be validation error messages in self.validation_messages. self.validation_messages will contain WARNINGS if warranted even if the statement parses correctly. Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR Args: assertion: BEL statement (if str -> 'S R O', if dict {'subject': S, 'relation': R, 'object': O}) strict: specify to use strict or loose parsing; defaults to loose parseinfo: specify whether or not to include Tatsu parse information in AST rule_name: starting point in parser - defaults to 'start' error_level: return ERRORs only or also WARNINGs Returns: ParseObject: The ParseObject which contain either an AST or error messages.
[ "Parse", "and", "semantically", "validate", "BEL", "statement" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L92-L184
belbio/bel
bel/lang/belobj.py
BEL.canonicalize
def canonicalize(self, namespace_targets: Mapping[str, List[str]] = None) -> "BEL": """ Takes an AST and returns a canonicalized BEL statement string. Args: namespace_targets (Mapping[str, List[str]]): override default canonicalization settings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings Returns: BEL: returns self """ # TODO Need to order position independent args if not self.ast: return self # Collect canonical/decanonical NSArg values if not self.ast.collected_nsarg_norms: self = self.collect_nsarg_norms() # TODO Need to pass namespace target overrides for canonicalization self.ast.canonicalize() # self.ast = bel_utils.convert_namespaces_ast(self.ast, canonicalize=True, api_url=self.api_url, namespace_targets=namespace_targets) return self
python
def canonicalize(self, namespace_targets: Mapping[str, List[str]] = None) -> "BEL": """ Takes an AST and returns a canonicalized BEL statement string. Args: namespace_targets (Mapping[str, List[str]]): override default canonicalization settings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings Returns: BEL: returns self """ # TODO Need to order position independent args if not self.ast: return self # Collect canonical/decanonical NSArg values if not self.ast.collected_nsarg_norms: self = self.collect_nsarg_norms() # TODO Need to pass namespace target overrides for canonicalization self.ast.canonicalize() # self.ast = bel_utils.convert_namespaces_ast(self.ast, canonicalize=True, api_url=self.api_url, namespace_targets=namespace_targets) return self
[ "def", "canonicalize", "(", "self", ",", "namespace_targets", ":", "Mapping", "[", "str", ",", "List", "[", "str", "]", "]", "=", "None", ")", "->", "\"BEL\"", ":", "# TODO Need to order position independent args", "if", "not", "self", ".", "ast", ":", "return", "self", "# Collect canonical/decanonical NSArg values", "if", "not", "self", ".", "ast", ".", "collected_nsarg_norms", ":", "self", "=", "self", ".", "collect_nsarg_norms", "(", ")", "# TODO Need to pass namespace target overrides for canonicalization", "self", ".", "ast", ".", "canonicalize", "(", ")", "# self.ast = bel_utils.convert_namespaces_ast(self.ast, canonicalize=True, api_url=self.api_url, namespace_targets=namespace_targets)", "return", "self" ]
Takes an AST and returns a canonicalized BEL statement string. Args: namespace_targets (Mapping[str, List[str]]): override default canonicalization settings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings Returns: BEL: returns self
[ "Takes", "an", "AST", "and", "returns", "a", "canonicalized", "BEL", "statement", "string", "." ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L202-L228
belbio/bel
bel/lang/belobj.py
BEL.collect_nsarg_norms
def collect_nsarg_norms(self): """Adds canonical and decanonical values to NSArgs in AST This prepares the AST object for (de)canonicalization """ start_time = datetime.datetime.now() self.ast = bel_utils.populate_ast_nsarg_defaults(self.ast, self.ast) self.ast.collected_nsarg_norms = True if ( hasattr(self.ast, "bel_object") and self.ast.bel_object and self.ast.bel_object.type == "BELAst" ): self.ast.bel_object.collected_nsarg_norms = True end_time = datetime.datetime.now() delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}" log.info("Timing - prepare nsarg normalization", delta_ms=delta_ms) return self
python
def collect_nsarg_norms(self): """Adds canonical and decanonical values to NSArgs in AST This prepares the AST object for (de)canonicalization """ start_time = datetime.datetime.now() self.ast = bel_utils.populate_ast_nsarg_defaults(self.ast, self.ast) self.ast.collected_nsarg_norms = True if ( hasattr(self.ast, "bel_object") and self.ast.bel_object and self.ast.bel_object.type == "BELAst" ): self.ast.bel_object.collected_nsarg_norms = True end_time = datetime.datetime.now() delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}" log.info("Timing - prepare nsarg normalization", delta_ms=delta_ms) return self
[ "def", "collect_nsarg_norms", "(", "self", ")", ":", "start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "self", ".", "ast", "=", "bel_utils", ".", "populate_ast_nsarg_defaults", "(", "self", ".", "ast", ",", "self", ".", "ast", ")", "self", ".", "ast", ".", "collected_nsarg_norms", "=", "True", "if", "(", "hasattr", "(", "self", ".", "ast", ",", "\"bel_object\"", ")", "and", "self", ".", "ast", ".", "bel_object", "and", "self", ".", "ast", ".", "bel_object", ".", "type", "==", "\"BELAst\"", ")", ":", "self", ".", "ast", ".", "bel_object", ".", "collected_nsarg_norms", "=", "True", "end_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta_ms", "=", "f\"{(end_time - start_time).total_seconds() * 1000:.1f}\"", "log", ".", "info", "(", "\"Timing - prepare nsarg normalization\"", ",", "delta_ms", "=", "delta_ms", ")", "return", "self" ]
Adds canonical and decanonical values to NSArgs in AST This prepares the AST object for (de)canonicalization
[ "Adds", "canonical", "and", "decanonical", "values", "to", "NSArgs", "in", "AST" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L256-L277
belbio/bel
bel/lang/belobj.py
BEL.orthologize
def orthologize(self, species_id: str) -> "BEL": """Orthologize BEL AST to given species_id Will return original entity (ns:value) if no ortholog found. Args: species_id (str): species id to convert genes/rna/proteins into Returns: BEL: returns self """ if not self.ast: return self # Collect canonical/decanonical NSArg values if not self.ast.collected_orthologs: self = self.collect_orthologs([species_id]) self.ast.species = set() self.ast = bel_utils.orthologize(self.ast, self, species_id) return self
python
def orthologize(self, species_id: str) -> "BEL": """Orthologize BEL AST to given species_id Will return original entity (ns:value) if no ortholog found. Args: species_id (str): species id to convert genes/rna/proteins into Returns: BEL: returns self """ if not self.ast: return self # Collect canonical/decanonical NSArg values if not self.ast.collected_orthologs: self = self.collect_orthologs([species_id]) self.ast.species = set() self.ast = bel_utils.orthologize(self.ast, self, species_id) return self
[ "def", "orthologize", "(", "self", ",", "species_id", ":", "str", ")", "->", "\"BEL\"", ":", "if", "not", "self", ".", "ast", ":", "return", "self", "# Collect canonical/decanonical NSArg values", "if", "not", "self", ".", "ast", ".", "collected_orthologs", ":", "self", "=", "self", ".", "collect_orthologs", "(", "[", "species_id", "]", ")", "self", ".", "ast", ".", "species", "=", "set", "(", ")", "self", ".", "ast", "=", "bel_utils", ".", "orthologize", "(", "self", ".", "ast", ",", "self", ",", "species_id", ")", "return", "self" ]
Orthologize BEL AST to given species_id Will return original entity (ns:value) if no ortholog found. Args: species_id (str): species id to convert genes/rna/proteins into Returns: BEL: returns self
[ "Orthologize", "BEL", "AST", "to", "given", "species_id" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L279-L301
belbio/bel
bel/lang/belobj.py
BEL.collect_orthologs
def collect_orthologs(self, species: list) -> "BEL": """Add NSArg orthologs for given species (TAX:<number format) This will add orthologs to the AST for all of the given species (as available). NOTE: This will run self.collect_nsarg_norms() first if not already available as we need the canonical forms of the NSArgs """ if not species: return self species_labels = bel.terms.terms.get_labels(species) # Collect canonical/decanonical NSArg values if not self.ast.collected_nsarg_norms: self = self.collect_nsarg_norms() start_time = datetime.datetime.now() self.ast = bel_utils.populate_ast_nsarg_orthologs(self.ast, species_labels) self.ast.collected_orthologs = True if ( hasattr(self.ast, "bel_object") and self.ast.bel_object and self.ast.bel_object.type == "BELAst" ): self.ast.bel_object.collected_orthologs = True end_time = datetime.datetime.now() delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}" log.info("Timing - prepare nsarg normalization", delta_ms=delta_ms) return self
python
def collect_orthologs(self, species: list) -> "BEL": """Add NSArg orthologs for given species (TAX:<number format) This will add orthologs to the AST for all of the given species (as available). NOTE: This will run self.collect_nsarg_norms() first if not already available as we need the canonical forms of the NSArgs """ if not species: return self species_labels = bel.terms.terms.get_labels(species) # Collect canonical/decanonical NSArg values if not self.ast.collected_nsarg_norms: self = self.collect_nsarg_norms() start_time = datetime.datetime.now() self.ast = bel_utils.populate_ast_nsarg_orthologs(self.ast, species_labels) self.ast.collected_orthologs = True if ( hasattr(self.ast, "bel_object") and self.ast.bel_object and self.ast.bel_object.type == "BELAst" ): self.ast.bel_object.collected_orthologs = True end_time = datetime.datetime.now() delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}" log.info("Timing - prepare nsarg normalization", delta_ms=delta_ms) return self
[ "def", "collect_orthologs", "(", "self", ",", "species", ":", "list", ")", "->", "\"BEL\"", ":", "if", "not", "species", ":", "return", "self", "species_labels", "=", "bel", ".", "terms", ".", "terms", ".", "get_labels", "(", "species", ")", "# Collect canonical/decanonical NSArg values", "if", "not", "self", ".", "ast", ".", "collected_nsarg_norms", ":", "self", "=", "self", ".", "collect_nsarg_norms", "(", ")", "start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "self", ".", "ast", "=", "bel_utils", ".", "populate_ast_nsarg_orthologs", "(", "self", ".", "ast", ",", "species_labels", ")", "self", ".", "ast", ".", "collected_orthologs", "=", "True", "if", "(", "hasattr", "(", "self", ".", "ast", ",", "\"bel_object\"", ")", "and", "self", ".", "ast", ".", "bel_object", "and", "self", ".", "ast", ".", "bel_object", ".", "type", "==", "\"BELAst\"", ")", ":", "self", ".", "ast", ".", "bel_object", ".", "collected_orthologs", "=", "True", "end_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta_ms", "=", "f\"{(end_time - start_time).total_seconds() * 1000:.1f}\"", "log", ".", "info", "(", "\"Timing - prepare nsarg normalization\"", ",", "delta_ms", "=", "delta_ms", ")", "return", "self" ]
Add NSArg orthologs for given species (TAX:<number format) This will add orthologs to the AST for all of the given species (as available). NOTE: This will run self.collect_nsarg_norms() first if not already available as we need the canonical forms of the NSArgs
[ "Add", "NSArg", "orthologs", "for", "given", "species", "(", "TAX", ":", "<number", "format", ")" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L303-L337
belbio/bel
bel/lang/belobj.py
BEL.compute_edges
def compute_edges( self, rules: List[str] = None, ast_result=False, fmt="medium" ) -> List[Mapping[str, Any]]: """Computed edges from primary BEL statement Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures. Will run only the list of computed edge rules if given. Args: rules (list): a list of rules to filter; only the rules in this list will be applied to computed fmt (str): short, medium or long version of BEL Edge (function and relation names) Returns: List[Mapping[str, Any]]: BEL Edges in medium format """ if not self.ast: return self edges_asts = bel.edge.computed.compute_edges(self.ast, self.spec) if ast_result: return edges_asts edges = [] for ast in edges_asts: edges.append( { "subject": ast.bel_subject.to_string(), "relation": ast.bel_relation, "object": ast.bel_object.to_string(), } ) return edges
python
def compute_edges( self, rules: List[str] = None, ast_result=False, fmt="medium" ) -> List[Mapping[str, Any]]: """Computed edges from primary BEL statement Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures. Will run only the list of computed edge rules if given. Args: rules (list): a list of rules to filter; only the rules in this list will be applied to computed fmt (str): short, medium or long version of BEL Edge (function and relation names) Returns: List[Mapping[str, Any]]: BEL Edges in medium format """ if not self.ast: return self edges_asts = bel.edge.computed.compute_edges(self.ast, self.spec) if ast_result: return edges_asts edges = [] for ast in edges_asts: edges.append( { "subject": ast.bel_subject.to_string(), "relation": ast.bel_relation, "object": ast.bel_object.to_string(), } ) return edges
[ "def", "compute_edges", "(", "self", ",", "rules", ":", "List", "[", "str", "]", "=", "None", ",", "ast_result", "=", "False", ",", "fmt", "=", "\"medium\"", ")", "->", "List", "[", "Mapping", "[", "str", ",", "Any", "]", "]", ":", "if", "not", "self", ".", "ast", ":", "return", "self", "edges_asts", "=", "bel", ".", "edge", ".", "computed", ".", "compute_edges", "(", "self", ".", "ast", ",", "self", ".", "spec", ")", "if", "ast_result", ":", "return", "edges_asts", "edges", "=", "[", "]", "for", "ast", "in", "edges_asts", ":", "edges", ".", "append", "(", "{", "\"subject\"", ":", "ast", ".", "bel_subject", ".", "to_string", "(", ")", ",", "\"relation\"", ":", "ast", ".", "bel_relation", ",", "\"object\"", ":", "ast", ".", "bel_object", ".", "to_string", "(", ")", ",", "}", ")", "return", "edges" ]
Computed edges from primary BEL statement Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures. Will run only the list of computed edge rules if given. Args: rules (list): a list of rules to filter; only the rules in this list will be applied to computed fmt (str): short, medium or long version of BEL Edge (function and relation names) Returns: List[Mapping[str, Any]]: BEL Edges in medium format
[ "Computed", "edges", "from", "primary", "BEL", "statement" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L339-L372
belbio/bel
bel/lang/belobj.py
BEL.to_triple
def to_triple(self, fmt: str = "medium") -> dict: """Convert AST object to BEL triple Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: dict: {'subject': <subject>, 'relation': <relations>, 'object': <object>} """ if self.ast: return self.ast.to_triple(ast_obj=self.ast, fmt=fmt) else: return {}
python
def to_triple(self, fmt: str = "medium") -> dict: """Convert AST object to BEL triple Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: dict: {'subject': <subject>, 'relation': <relations>, 'object': <object>} """ if self.ast: return self.ast.to_triple(ast_obj=self.ast, fmt=fmt) else: return {}
[ "def", "to_triple", "(", "self", ",", "fmt", ":", "str", "=", "\"medium\"", ")", "->", "dict", ":", "if", "self", ".", "ast", ":", "return", "self", ".", "ast", ".", "to_triple", "(", "ast_obj", "=", "self", ".", "ast", ",", "fmt", "=", "fmt", ")", "else", ":", "return", "{", "}" ]
Convert AST object to BEL triple Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: dict: {'subject': <subject>, 'relation': <relations>, 'object': <object>}
[ "Convert", "AST", "object", "to", "BEL", "triple" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L390-L406
belbio/bel
bel/lang/belobj.py
BEL.print_tree
def print_tree(self) -> str: """Convert AST object to tree view of BEL AST Returns: printed tree of BEL AST """ if self.ast: return self.ast.print_tree(ast_obj=self.ast) else: return ""
python
def print_tree(self) -> str: """Convert AST object to tree view of BEL AST Returns: printed tree of BEL AST """ if self.ast: return self.ast.print_tree(ast_obj=self.ast) else: return ""
[ "def", "print_tree", "(", "self", ")", "->", "str", ":", "if", "self", ".", "ast", ":", "return", "self", ".", "ast", ".", "print_tree", "(", "ast_obj", "=", "self", ".", "ast", ")", "else", ":", "return", "\"\"" ]
Convert AST object to tree view of BEL AST Returns: printed tree of BEL AST
[ "Convert", "AST", "object", "to", "tree", "view", "of", "BEL", "AST" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L408-L418
belbio/bel
bel/lang/ast.py
ast_dict_to_objects
def ast_dict_to_objects(ast_dict: Mapping[str, Any], bel_obj) -> BELAst: """Convert Tatsu AST dictionary to BEL AST object Args: ast_dict (Mapping[str, Any]) Returns: BELAst: object representing the BEL Statement AST """ ast_subject = ast_dict.get("subject", None) ast_object = ast_dict.get("object", None) bel_subject = None bel_object = None bel_relation = ast_dict.get("relation") if ast_subject: bel_subject = function_ast_to_objects(ast_subject, bel_obj) if ast_object: bel_object = function_ast_to_objects(ast_object, bel_obj) ast_obj = BELAst(bel_subject, bel_relation, bel_object, bel_obj.spec) return ast_obj
python
def ast_dict_to_objects(ast_dict: Mapping[str, Any], bel_obj) -> BELAst: """Convert Tatsu AST dictionary to BEL AST object Args: ast_dict (Mapping[str, Any]) Returns: BELAst: object representing the BEL Statement AST """ ast_subject = ast_dict.get("subject", None) ast_object = ast_dict.get("object", None) bel_subject = None bel_object = None bel_relation = ast_dict.get("relation") if ast_subject: bel_subject = function_ast_to_objects(ast_subject, bel_obj) if ast_object: bel_object = function_ast_to_objects(ast_object, bel_obj) ast_obj = BELAst(bel_subject, bel_relation, bel_object, bel_obj.spec) return ast_obj
[ "def", "ast_dict_to_objects", "(", "ast_dict", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "bel_obj", ")", "->", "BELAst", ":", "ast_subject", "=", "ast_dict", ".", "get", "(", "\"subject\"", ",", "None", ")", "ast_object", "=", "ast_dict", ".", "get", "(", "\"object\"", ",", "None", ")", "bel_subject", "=", "None", "bel_object", "=", "None", "bel_relation", "=", "ast_dict", ".", "get", "(", "\"relation\"", ")", "if", "ast_subject", ":", "bel_subject", "=", "function_ast_to_objects", "(", "ast_subject", ",", "bel_obj", ")", "if", "ast_object", ":", "bel_object", "=", "function_ast_to_objects", "(", "ast_object", ",", "bel_obj", ")", "ast_obj", "=", "BELAst", "(", "bel_subject", ",", "bel_relation", ",", "bel_object", ",", "bel_obj", ".", "spec", ")", "return", "ast_obj" ]
Convert Tatsu AST dictionary to BEL AST object Args: ast_dict (Mapping[str, Any]) Returns: BELAst: object representing the BEL Statement AST
[ "Convert", "Tatsu", "AST", "dictionary", "to", "BEL", "AST", "object" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L559-L583
belbio/bel
bel/lang/ast.py
BELAst.to_string
def to_string(self, ast_obj=None, fmt: str = "medium") -> str: """Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format canonicalize Returns: str: string version of BEL AST """ if not ast_obj: ast_obj = self bel_relation = None if self.bel_relation and fmt == "short": bel_relation = self.spec["relations"]["to_short"].get( self.bel_relation, self.bel_relation ) elif self.bel_relation: bel_relation = self.spec["relations"]["to_long"].get( self.bel_relation, self.bel_relation ) if self.bel_subject and bel_relation and self.bel_object: if isinstance(self.bel_object, BELAst): return "{} {} ({})".format( self.bel_subject.to_string(fmt=fmt), bel_relation, self.bel_object.to_string(fmt=fmt), ) else: return "{} {} {}".format( self.bel_subject.to_string(fmt=fmt), bel_relation, self.bel_object.to_string(fmt=fmt), ) elif self.bel_subject: return "{}".format(self.bel_subject.to_string(fmt=fmt)) else: return ""
python
def to_string(self, ast_obj=None, fmt: str = "medium") -> str: """Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format canonicalize Returns: str: string version of BEL AST """ if not ast_obj: ast_obj = self bel_relation = None if self.bel_relation and fmt == "short": bel_relation = self.spec["relations"]["to_short"].get( self.bel_relation, self.bel_relation ) elif self.bel_relation: bel_relation = self.spec["relations"]["to_long"].get( self.bel_relation, self.bel_relation ) if self.bel_subject and bel_relation and self.bel_object: if isinstance(self.bel_object, BELAst): return "{} {} ({})".format( self.bel_subject.to_string(fmt=fmt), bel_relation, self.bel_object.to_string(fmt=fmt), ) else: return "{} {} {}".format( self.bel_subject.to_string(fmt=fmt), bel_relation, self.bel_object.to_string(fmt=fmt), ) elif self.bel_subject: return "{}".format(self.bel_subject.to_string(fmt=fmt)) else: return ""
[ "def", "to_string", "(", "self", ",", "ast_obj", "=", "None", ",", "fmt", ":", "str", "=", "\"medium\"", ")", "->", "str", ":", "if", "not", "ast_obj", ":", "ast_obj", "=", "self", "bel_relation", "=", "None", "if", "self", ".", "bel_relation", "and", "fmt", "==", "\"short\"", ":", "bel_relation", "=", "self", ".", "spec", "[", "\"relations\"", "]", "[", "\"to_short\"", "]", ".", "get", "(", "self", ".", "bel_relation", ",", "self", ".", "bel_relation", ")", "elif", "self", ".", "bel_relation", ":", "bel_relation", "=", "self", ".", "spec", "[", "\"relations\"", "]", "[", "\"to_long\"", "]", ".", "get", "(", "self", ".", "bel_relation", ",", "self", ".", "bel_relation", ")", "if", "self", ".", "bel_subject", "and", "bel_relation", "and", "self", ".", "bel_object", ":", "if", "isinstance", "(", "self", ".", "bel_object", ",", "BELAst", ")", ":", "return", "\"{} {} ({})\"", ".", "format", "(", "self", ".", "bel_subject", ".", "to_string", "(", "fmt", "=", "fmt", ")", ",", "bel_relation", ",", "self", ".", "bel_object", ".", "to_string", "(", "fmt", "=", "fmt", ")", ",", ")", "else", ":", "return", "\"{} {} {}\"", ".", "format", "(", "self", ".", "bel_subject", ".", "to_string", "(", "fmt", "=", "fmt", ")", ",", "bel_relation", ",", "self", ".", "bel_object", ".", "to_string", "(", "fmt", "=", "fmt", ")", ",", ")", "elif", "self", ".", "bel_subject", ":", "return", "\"{}\"", ".", "format", "(", "self", ".", "bel_subject", ".", "to_string", "(", "fmt", "=", "fmt", ")", ")", "else", ":", "return", "\"\"" ]
Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format canonicalize Returns: str: string version of BEL AST
[ "Convert", "AST", "object", "to", "string" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L105-L150
belbio/bel
bel/lang/ast.py
BELAst.to_triple
def to_triple(self, ast_obj=None, fmt="medium"): """Convert AST object to BEL triple Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: dict: {'subject': <subject>, 'relation': <relations>, 'object': <object>} """ if not ast_obj: ast_obj = self if self.bel_subject and self.bel_relation and self.bel_object: if self.bel_relation.startswith("has"): bel_relation = self.bel_relation elif fmt == "short": bel_relation = self.spec["relations"]["to_short"].get( self.bel_relation, None ) else: bel_relation = self.spec["relations"]["to_long"].get( self.bel_relation, None ) bel_subject = self.bel_subject.to_string(fmt=fmt) if isinstance(self.bel_object, (BELAst)): bel_object = f"({self.bel_object.to_string(fmt=fmt)})" else: bel_object = self.bel_object.to_string(fmt=fmt) return { "subject": bel_subject, "relation": bel_relation, "object": bel_object, } elif self.bel_subject: return {"subject": self.bel_subject.to_string(fmt=fmt)} else: return None
python
def to_triple(self, ast_obj=None, fmt="medium"): """Convert AST object to BEL triple Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: dict: {'subject': <subject>, 'relation': <relations>, 'object': <object>} """ if not ast_obj: ast_obj = self if self.bel_subject and self.bel_relation and self.bel_object: if self.bel_relation.startswith("has"): bel_relation = self.bel_relation elif fmt == "short": bel_relation = self.spec["relations"]["to_short"].get( self.bel_relation, None ) else: bel_relation = self.spec["relations"]["to_long"].get( self.bel_relation, None ) bel_subject = self.bel_subject.to_string(fmt=fmt) if isinstance(self.bel_object, (BELAst)): bel_object = f"({self.bel_object.to_string(fmt=fmt)})" else: bel_object = self.bel_object.to_string(fmt=fmt) return { "subject": bel_subject, "relation": bel_relation, "object": bel_object, } elif self.bel_subject: return {"subject": self.bel_subject.to_string(fmt=fmt)} else: return None
[ "def", "to_triple", "(", "self", ",", "ast_obj", "=", "None", ",", "fmt", "=", "\"medium\"", ")", ":", "if", "not", "ast_obj", ":", "ast_obj", "=", "self", "if", "self", ".", "bel_subject", "and", "self", ".", "bel_relation", "and", "self", ".", "bel_object", ":", "if", "self", ".", "bel_relation", ".", "startswith", "(", "\"has\"", ")", ":", "bel_relation", "=", "self", ".", "bel_relation", "elif", "fmt", "==", "\"short\"", ":", "bel_relation", "=", "self", ".", "spec", "[", "\"relations\"", "]", "[", "\"to_short\"", "]", ".", "get", "(", "self", ".", "bel_relation", ",", "None", ")", "else", ":", "bel_relation", "=", "self", ".", "spec", "[", "\"relations\"", "]", "[", "\"to_long\"", "]", ".", "get", "(", "self", ".", "bel_relation", ",", "None", ")", "bel_subject", "=", "self", ".", "bel_subject", ".", "to_string", "(", "fmt", "=", "fmt", ")", "if", "isinstance", "(", "self", ".", "bel_object", ",", "(", "BELAst", ")", ")", ":", "bel_object", "=", "f\"({self.bel_object.to_string(fmt=fmt)})\"", "else", ":", "bel_object", "=", "self", ".", "bel_object", ".", "to_string", "(", "fmt", "=", "fmt", ")", "return", "{", "\"subject\"", ":", "bel_subject", ",", "\"relation\"", ":", "bel_relation", ",", "\"object\"", ":", "bel_object", ",", "}", "elif", "self", ".", "bel_subject", ":", "return", "{", "\"subject\"", ":", "self", ".", "bel_subject", ".", "to_string", "(", "fmt", "=", "fmt", ")", "}", "else", ":", "return", "None" ]
Convert AST object to BEL triple Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: dict: {'subject': <subject>, 'relation': <relations>, 'object': <object>}
[ "Convert", "AST", "object", "to", "BEL", "triple" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L152-L197
belbio/bel
bel/lang/ast.py
BELAst.print_tree
def print_tree(self, ast_obj=None): """Convert AST object to tree view of BEL AST Returns: prints tree of BEL AST to STDOUT """ if not ast_obj: ast_obj = self if hasattr(self, "bel_subject"): print("Subject:") self.bel_subject.print_tree(self.bel_subject, indent=0) if hasattr(self, "bel_relation"): print("Relation:", self.bel_relation) if hasattr(self, "bel_object"): if self.bel_object.type == "BELAst": if hasattr(self, "bel_subject"): print("Nested Subject:") self.bel_object.bel_subject.print_tree(indent=0) if hasattr(self, "bel_relation"): print("Nested Relation:", self.bel_object.bel_relation) if hasattr(self, "bel_object"): print("Nested Object:") self.bel_object.bel_object.print_tree(indent=0) else: print("Object:") self.bel_object.print_tree(self.bel_object, indent=0) return self
python
def print_tree(self, ast_obj=None): """Convert AST object to tree view of BEL AST Returns: prints tree of BEL AST to STDOUT """ if not ast_obj: ast_obj = self if hasattr(self, "bel_subject"): print("Subject:") self.bel_subject.print_tree(self.bel_subject, indent=0) if hasattr(self, "bel_relation"): print("Relation:", self.bel_relation) if hasattr(self, "bel_object"): if self.bel_object.type == "BELAst": if hasattr(self, "bel_subject"): print("Nested Subject:") self.bel_object.bel_subject.print_tree(indent=0) if hasattr(self, "bel_relation"): print("Nested Relation:", self.bel_object.bel_relation) if hasattr(self, "bel_object"): print("Nested Object:") self.bel_object.bel_object.print_tree(indent=0) else: print("Object:") self.bel_object.print_tree(self.bel_object, indent=0) return self
[ "def", "print_tree", "(", "self", ",", "ast_obj", "=", "None", ")", ":", "if", "not", "ast_obj", ":", "ast_obj", "=", "self", "if", "hasattr", "(", "self", ",", "\"bel_subject\"", ")", ":", "print", "(", "\"Subject:\"", ")", "self", ".", "bel_subject", ".", "print_tree", "(", "self", ".", "bel_subject", ",", "indent", "=", "0", ")", "if", "hasattr", "(", "self", ",", "\"bel_relation\"", ")", ":", "print", "(", "\"Relation:\"", ",", "self", ".", "bel_relation", ")", "if", "hasattr", "(", "self", ",", "\"bel_object\"", ")", ":", "if", "self", ".", "bel_object", ".", "type", "==", "\"BELAst\"", ":", "if", "hasattr", "(", "self", ",", "\"bel_subject\"", ")", ":", "print", "(", "\"Nested Subject:\"", ")", "self", ".", "bel_object", ".", "bel_subject", ".", "print_tree", "(", "indent", "=", "0", ")", "if", "hasattr", "(", "self", ",", "\"bel_relation\"", ")", ":", "print", "(", "\"Nested Relation:\"", ",", "self", ".", "bel_object", ".", "bel_relation", ")", "if", "hasattr", "(", "self", ",", "\"bel_object\"", ")", ":", "print", "(", "\"Nested Object:\"", ")", "self", ".", "bel_object", ".", "bel_object", ".", "print_tree", "(", "indent", "=", "0", ")", "else", ":", "print", "(", "\"Object:\"", ")", "self", ".", "bel_object", ".", "print_tree", "(", "self", ".", "bel_object", ",", "indent", "=", "0", ")", "return", "self" ]
Convert AST object to tree view of BEL AST Returns: prints tree of BEL AST to STDOUT
[ "Convert", "AST", "object", "to", "tree", "view", "of", "BEL", "AST" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L204-L237
belbio/bel
bel/lang/ast.py
Function.to_string
def to_string( self, fmt: str = "medium", canonicalize: bool = False, decanonicalize: bool = False, orthologize: str = None, ) -> str: """Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: str: string version of BEL AST """ arg_string = ", ".join([a.to_string(fmt=fmt) for a in self.args]) if fmt in ["short", "medium"]: function_name = self.name_short else: function_name = self.name return "{}({})".format(function_name, arg_string)
python
def to_string( self, fmt: str = "medium", canonicalize: bool = False, decanonicalize: bool = False, orthologize: str = None, ) -> str: """Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: str: string version of BEL AST """ arg_string = ", ".join([a.to_string(fmt=fmt) for a in self.args]) if fmt in ["short", "medium"]: function_name = self.name_short else: function_name = self.name return "{}({})".format(function_name, arg_string)
[ "def", "to_string", "(", "self", ",", "fmt", ":", "str", "=", "\"medium\"", ",", "canonicalize", ":", "bool", "=", "False", ",", "decanonicalize", ":", "bool", "=", "False", ",", "orthologize", ":", "str", "=", "None", ",", ")", "->", "str", ":", "arg_string", "=", "\", \"", ".", "join", "(", "[", "a", ".", "to_string", "(", "fmt", "=", "fmt", ")", "for", "a", "in", "self", ".", "args", "]", ")", "if", "fmt", "in", "[", "\"short\"", ",", "\"medium\"", "]", ":", "function_name", "=", "self", ".", "name_short", "else", ":", "function_name", "=", "self", ".", "name", "return", "\"{}({})\"", ".", "format", "(", "function_name", ",", "arg_string", ")" ]
Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format Returns: str: string version of BEL AST
[ "Convert", "AST", "object", "to", "string" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L315-L341
belbio/bel
bel/lang/ast.py
Function.subcomponents
def subcomponents(self, subcomponents): """Generate subcomponents of the BEL subject or object These subcomponents are used for matching parts of a BEL subject or Object in the Edgestore. Args: AST subcomponents: Pass an empty list to start a new subcomponents request Returns: List[str]: subcomponents of BEL subject or object """ for arg in self.args: if arg.__class__.__name__ == "Function": subcomponents.append(arg.to_string()) if arg.function_type == "primary": arg.subcomponents(subcomponents) else: subcomponents.append(arg.to_string()) return subcomponents
python
def subcomponents(self, subcomponents): """Generate subcomponents of the BEL subject or object These subcomponents are used for matching parts of a BEL subject or Object in the Edgestore. Args: AST subcomponents: Pass an empty list to start a new subcomponents request Returns: List[str]: subcomponents of BEL subject or object """ for arg in self.args: if arg.__class__.__name__ == "Function": subcomponents.append(arg.to_string()) if arg.function_type == "primary": arg.subcomponents(subcomponents) else: subcomponents.append(arg.to_string()) return subcomponents
[ "def", "subcomponents", "(", "self", ",", "subcomponents", ")", ":", "for", "arg", "in", "self", ".", "args", ":", "if", "arg", ".", "__class__", ".", "__name__", "==", "\"Function\"", ":", "subcomponents", ".", "append", "(", "arg", ".", "to_string", "(", ")", ")", "if", "arg", ".", "function_type", "==", "\"primary\"", ":", "arg", ".", "subcomponents", "(", "subcomponents", ")", "else", ":", "subcomponents", ".", "append", "(", "arg", ".", "to_string", "(", ")", ")", "return", "subcomponents" ]
Generate subcomponents of the BEL subject or object These subcomponents are used for matching parts of a BEL subject or Object in the Edgestore. Args: AST subcomponents: Pass an empty list to start a new subcomponents request Returns: List[str]: subcomponents of BEL subject or object
[ "Generate", "subcomponents", "of", "the", "BEL", "subject", "or", "object" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L358-L380
belbio/bel
bel/lang/ast.py
NSArg.change_nsvalue
def change_nsvalue(self, namespace, value): """Deprecated""" self.namespace = namespace self.value = value
python
def change_nsvalue(self, namespace, value): """Deprecated""" self.namespace = namespace self.value = value
[ "def", "change_nsvalue", "(", "self", ",", "namespace", ",", "value", ")", ":", "self", ".", "namespace", "=", "namespace", "self", ".", "value", "=", "value" ]
Deprecated
[ "Deprecated" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L430-L434
belbio/bel
bel/lang/ast.py
NSArg.update_nsval
def update_nsval( self, *, nsval: str = None, ns: str = None, val: str = None ) -> None: """Update Namespace and valueast. Args: nsval: e.g. HGNC:AKT1 ns: namespace val: value of entity """ if not (ns and val) and nsval: (ns, val) = nsval.split(":", 1) elif not (ns and val) and not nsval: log.error("Did not update NSArg - no ns:val or nsval provided") self.namespace = ns self.value = val
python
def update_nsval( self, *, nsval: str = None, ns: str = None, val: str = None ) -> None: """Update Namespace and valueast. Args: nsval: e.g. HGNC:AKT1 ns: namespace val: value of entity """ if not (ns and val) and nsval: (ns, val) = nsval.split(":", 1) elif not (ns and val) and not nsval: log.error("Did not update NSArg - no ns:val or nsval provided") self.namespace = ns self.value = val
[ "def", "update_nsval", "(", "self", ",", "*", ",", "nsval", ":", "str", "=", "None", ",", "ns", ":", "str", "=", "None", ",", "val", ":", "str", "=", "None", ")", "->", "None", ":", "if", "not", "(", "ns", "and", "val", ")", "and", "nsval", ":", "(", "ns", ",", "val", ")", "=", "nsval", ".", "split", "(", "\":\"", ",", "1", ")", "elif", "not", "(", "ns", "and", "val", ")", "and", "not", "nsval", ":", "log", ".", "error", "(", "\"Did not update NSArg - no ns:val or nsval provided\"", ")", "self", ".", "namespace", "=", "ns", "self", ".", "value", "=", "val" ]
Update Namespace and valueast. Args: nsval: e.g. HGNC:AKT1 ns: namespace val: value of entity
[ "Update", "Namespace", "and", "valueast", "." ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L436-L453
belbio/bel
bel/lang/ast.py
NSArg.orthologize
def orthologize(self, ortho_species_id, belast): """Decanonical ortholog name used""" if ( self.orthologs and ortho_species_id in self.orthologs and ortho_species_id != self.species_id ): self.orthology_species = ortho_species_id self.canonical = self.orthologs[ortho_species_id]["canonical"] self.decanonical = self.orthologs[ortho_species_id]["decanonical"] self.update_nsval(nsval=self.decanonical) self.orthologized = True elif self.species_id and ortho_species_id not in self.orthologs: self.orthologized = False belast.partially_orthologized = True return self
python
def orthologize(self, ortho_species_id, belast): """Decanonical ortholog name used""" if ( self.orthologs and ortho_species_id in self.orthologs and ortho_species_id != self.species_id ): self.orthology_species = ortho_species_id self.canonical = self.orthologs[ortho_species_id]["canonical"] self.decanonical = self.orthologs[ortho_species_id]["decanonical"] self.update_nsval(nsval=self.decanonical) self.orthologized = True elif self.species_id and ortho_species_id not in self.orthologs: self.orthologized = False belast.partially_orthologized = True return self
[ "def", "orthologize", "(", "self", ",", "ortho_species_id", ",", "belast", ")", ":", "if", "(", "self", ".", "orthologs", "and", "ortho_species_id", "in", "self", ".", "orthologs", "and", "ortho_species_id", "!=", "self", ".", "species_id", ")", ":", "self", ".", "orthology_species", "=", "ortho_species_id", "self", ".", "canonical", "=", "self", ".", "orthologs", "[", "ortho_species_id", "]", "[", "\"canonical\"", "]", "self", ".", "decanonical", "=", "self", ".", "orthologs", "[", "ortho_species_id", "]", "[", "\"decanonical\"", "]", "self", ".", "update_nsval", "(", "nsval", "=", "self", ".", "decanonical", ")", "self", ".", "orthologized", "=", "True", "elif", "self", ".", "species_id", "and", "ortho_species_id", "not", "in", "self", ".", "orthologs", ":", "self", ".", "orthologized", "=", "False", "belast", ".", "partially_orthologized", "=", "True", "return", "self" ]
Decanonical ortholog name used
[ "Decanonical", "ortholog", "name", "used" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L483-L501
belbio/bel
bel/nanopub/belscripts.py
convert_csv_str_to_list
def convert_csv_str_to_list(csv_str: str) -> list: """Convert CSV str to list""" csv_str = re.sub("^\s*{", "", csv_str) csv_str = re.sub("}\s*$", "", csv_str) r = csv.reader([csv_str]) row = list(r)[0] new = [] for col in row: col = re.sub('^\s*"?\s*', "", col) col = re.sub('\s*"?\s*$', "", col) new.append(col) return new
python
def convert_csv_str_to_list(csv_str: str) -> list: """Convert CSV str to list""" csv_str = re.sub("^\s*{", "", csv_str) csv_str = re.sub("}\s*$", "", csv_str) r = csv.reader([csv_str]) row = list(r)[0] new = [] for col in row: col = re.sub('^\s*"?\s*', "", col) col = re.sub('\s*"?\s*$', "", col) new.append(col) return new
[ "def", "convert_csv_str_to_list", "(", "csv_str", ":", "str", ")", "->", "list", ":", "csv_str", "=", "re", ".", "sub", "(", "\"^\\s*{\"", ",", "\"\"", ",", "csv_str", ")", "csv_str", "=", "re", ".", "sub", "(", "\"}\\s*$\"", ",", "\"\"", ",", "csv_str", ")", "r", "=", "csv", ".", "reader", "(", "[", "csv_str", "]", ")", "row", "=", "list", "(", "r", ")", "[", "0", "]", "new", "=", "[", "]", "for", "col", "in", "row", ":", "col", "=", "re", ".", "sub", "(", "'^\\s*\"?\\s*'", ",", "\"\"", ",", "col", ")", "col", "=", "re", ".", "sub", "(", "'\\s*\"?\\s*$'", ",", "\"\"", ",", "col", ")", "new", ".", "append", "(", "col", ")", "return", "new" ]
Convert CSV str to list
[ "Convert", "CSV", "str", "to", "list" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L34-L47
belbio/bel
bel/nanopub/belscripts.py
process_citation
def process_citation(citation_str: str) -> dict: """Parse BEL Script Citation string into nanopub_bel citation object""" citation_obj = {} citation_list = convert_csv_str_to_list(citation_str) (citation_type, name, doc_id, pub_date, authors, comment, *extra) = ( citation_list + [None] * 7 ) # print(f'citation_type: {citation_type}, name: {name}, doc_id: {doc_id}, pub_date: {pub_date}, authors: {authors}, comment: {comment}') authors_list = [] if authors: authors_list = authors.split("|") citation_obj["authors"] = authors_list if name and re.match("http?://", name): citation_obj["uri"] = name elif citation_type and citation_type.upper() == "PUBMED": citation_obj["database"] = {"name": "PubMed", "id": doc_id} if name: citation_obj["reference"] = name elif name: citation_obj["reference"] = name else: citation_obj["reference"] = "No reference found" if pub_date: citation_obj["date_published"] = pub_date if comment: citation_obj["comment"] = comment return citation_obj
python
def process_citation(citation_str: str) -> dict: """Parse BEL Script Citation string into nanopub_bel citation object""" citation_obj = {} citation_list = convert_csv_str_to_list(citation_str) (citation_type, name, doc_id, pub_date, authors, comment, *extra) = ( citation_list + [None] * 7 ) # print(f'citation_type: {citation_type}, name: {name}, doc_id: {doc_id}, pub_date: {pub_date}, authors: {authors}, comment: {comment}') authors_list = [] if authors: authors_list = authors.split("|") citation_obj["authors"] = authors_list if name and re.match("http?://", name): citation_obj["uri"] = name elif citation_type and citation_type.upper() == "PUBMED": citation_obj["database"] = {"name": "PubMed", "id": doc_id} if name: citation_obj["reference"] = name elif name: citation_obj["reference"] = name else: citation_obj["reference"] = "No reference found" if pub_date: citation_obj["date_published"] = pub_date if comment: citation_obj["comment"] = comment return citation_obj
[ "def", "process_citation", "(", "citation_str", ":", "str", ")", "->", "dict", ":", "citation_obj", "=", "{", "}", "citation_list", "=", "convert_csv_str_to_list", "(", "citation_str", ")", "(", "citation_type", ",", "name", ",", "doc_id", ",", "pub_date", ",", "authors", ",", "comment", ",", "*", "extra", ")", "=", "(", "citation_list", "+", "[", "None", "]", "*", "7", ")", "# print(f'citation_type: {citation_type}, name: {name}, doc_id: {doc_id}, pub_date: {pub_date}, authors: {authors}, comment: {comment}')", "authors_list", "=", "[", "]", "if", "authors", ":", "authors_list", "=", "authors", ".", "split", "(", "\"|\"", ")", "citation_obj", "[", "\"authors\"", "]", "=", "authors_list", "if", "name", "and", "re", ".", "match", "(", "\"http?://\"", ",", "name", ")", ":", "citation_obj", "[", "\"uri\"", "]", "=", "name", "elif", "citation_type", "and", "citation_type", ".", "upper", "(", ")", "==", "\"PUBMED\"", ":", "citation_obj", "[", "\"database\"", "]", "=", "{", "\"name\"", ":", "\"PubMed\"", ",", "\"id\"", ":", "doc_id", "}", "if", "name", ":", "citation_obj", "[", "\"reference\"", "]", "=", "name", "elif", "name", ":", "citation_obj", "[", "\"reference\"", "]", "=", "name", "else", ":", "citation_obj", "[", "\"reference\"", "]", "=", "\"No reference found\"", "if", "pub_date", ":", "citation_obj", "[", "\"date_published\"", "]", "=", "pub_date", "if", "comment", ":", "citation_obj", "[", "\"comment\"", "]", "=", "comment", "return", "citation_obj" ]
Parse BEL Script Citation string into nanopub_bel citation object
[ "Parse", "BEL", "Script", "Citation", "string", "into", "nanopub_bel", "citation", "object" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L50-L85
belbio/bel
bel/nanopub/belscripts.py
split_bel_stmt
def split_bel_stmt(stmt: str, line_num) -> tuple: """Split bel statement into subject, relation, object tuple""" m = re.match(f"^(.*?\))\s+([a-zA-Z=\->\|:]+)\s+([\w(]+.*?)$", stmt, flags=0) if m: return (m.group(1), m.group(2), m.group(3)) else: log.info( f"Could not parse bel statement into components at line number: {line_num} assertion: {stmt}" ) return (stmt, None, None)
python
def split_bel_stmt(stmt: str, line_num) -> tuple: """Split bel statement into subject, relation, object tuple""" m = re.match(f"^(.*?\))\s+([a-zA-Z=\->\|:]+)\s+([\w(]+.*?)$", stmt, flags=0) if m: return (m.group(1), m.group(2), m.group(3)) else: log.info( f"Could not parse bel statement into components at line number: {line_num} assertion: {stmt}" ) return (stmt, None, None)
[ "def", "split_bel_stmt", "(", "stmt", ":", "str", ",", "line_num", ")", "->", "tuple", ":", "m", "=", "re", ".", "match", "(", "f\"^(.*?\\))\\s+([a-zA-Z=\\->\\|:]+)\\s+([\\w(]+.*?)$\"", ",", "stmt", ",", "flags", "=", "0", ")", "if", "m", ":", "return", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ",", "m", ".", "group", "(", "3", ")", ")", "else", ":", "log", ".", "info", "(", "f\"Could not parse bel statement into components at line number: {line_num} assertion: {stmt}\"", ")", "return", "(", "stmt", ",", "None", ",", "None", ")" ]
Split bel statement into subject, relation, object tuple
[ "Split", "bel", "statement", "into", "subject", "relation", "object", "tuple" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L94-L104
belbio/bel
bel/nanopub/belscripts.py
yield_nanopub
def yield_nanopub(assertions, annotations, line_num): """Yield nanopub object""" if not assertions: return {} anno = copy.deepcopy(annotations) evidence = anno.pop("evidence", None) stmt_group = anno.pop("statement_group", None) citation = anno.pop("citation", None) anno_list = [] for anno_type in anno: if isinstance(anno[anno_type], (list, tuple)): for val in anno[anno_type]: anno_list.append({"type": anno_type, "label": val}) else: anno_list.append({"type": anno_type, "label": anno[anno_type]}) assertions_list = [] for assertion in assertions: (subj, rel, obj) = split_bel_stmt(assertion, line_num) assertions_list.append({"subject": subj, "relation": rel, "object": obj}) nanopub = { "schema_uri": "https://raw.githubusercontent.com/belbio/schemas/master/schemas/nanopub_bel-1.0.0.yaml", "type": copy.deepcopy(nanopub_type), "annotations": copy.deepcopy(anno_list), "citation": copy.deepcopy(citation), "assertions": copy.deepcopy(assertions_list), "evidence": evidence, "metadata": {"statement_group": stmt_group}, } return {"nanopub": copy.deepcopy(nanopub)}
python
def yield_nanopub(assertions, annotations, line_num): """Yield nanopub object""" if not assertions: return {} anno = copy.deepcopy(annotations) evidence = anno.pop("evidence", None) stmt_group = anno.pop("statement_group", None) citation = anno.pop("citation", None) anno_list = [] for anno_type in anno: if isinstance(anno[anno_type], (list, tuple)): for val in anno[anno_type]: anno_list.append({"type": anno_type, "label": val}) else: anno_list.append({"type": anno_type, "label": anno[anno_type]}) assertions_list = [] for assertion in assertions: (subj, rel, obj) = split_bel_stmt(assertion, line_num) assertions_list.append({"subject": subj, "relation": rel, "object": obj}) nanopub = { "schema_uri": "https://raw.githubusercontent.com/belbio/schemas/master/schemas/nanopub_bel-1.0.0.yaml", "type": copy.deepcopy(nanopub_type), "annotations": copy.deepcopy(anno_list), "citation": copy.deepcopy(citation), "assertions": copy.deepcopy(assertions_list), "evidence": evidence, "metadata": {"statement_group": stmt_group}, } return {"nanopub": copy.deepcopy(nanopub)}
[ "def", "yield_nanopub", "(", "assertions", ",", "annotations", ",", "line_num", ")", ":", "if", "not", "assertions", ":", "return", "{", "}", "anno", "=", "copy", ".", "deepcopy", "(", "annotations", ")", "evidence", "=", "anno", ".", "pop", "(", "\"evidence\"", ",", "None", ")", "stmt_group", "=", "anno", ".", "pop", "(", "\"statement_group\"", ",", "None", ")", "citation", "=", "anno", ".", "pop", "(", "\"citation\"", ",", "None", ")", "anno_list", "=", "[", "]", "for", "anno_type", "in", "anno", ":", "if", "isinstance", "(", "anno", "[", "anno_type", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "val", "in", "anno", "[", "anno_type", "]", ":", "anno_list", ".", "append", "(", "{", "\"type\"", ":", "anno_type", ",", "\"label\"", ":", "val", "}", ")", "else", ":", "anno_list", ".", "append", "(", "{", "\"type\"", ":", "anno_type", ",", "\"label\"", ":", "anno", "[", "anno_type", "]", "}", ")", "assertions_list", "=", "[", "]", "for", "assertion", "in", "assertions", ":", "(", "subj", ",", "rel", ",", "obj", ")", "=", "split_bel_stmt", "(", "assertion", ",", "line_num", ")", "assertions_list", ".", "append", "(", "{", "\"subject\"", ":", "subj", ",", "\"relation\"", ":", "rel", ",", "\"object\"", ":", "obj", "}", ")", "nanopub", "=", "{", "\"schema_uri\"", ":", "\"https://raw.githubusercontent.com/belbio/schemas/master/schemas/nanopub_bel-1.0.0.yaml\"", ",", "\"type\"", ":", "copy", ".", "deepcopy", "(", "nanopub_type", ")", ",", "\"annotations\"", ":", "copy", ".", "deepcopy", "(", "anno_list", ")", ",", "\"citation\"", ":", "copy", ".", "deepcopy", "(", "citation", ")", ",", "\"assertions\"", ":", "copy", ".", "deepcopy", "(", "assertions_list", ")", ",", "\"evidence\"", ":", "evidence", ",", "\"metadata\"", ":", "{", "\"statement_group\"", ":", "stmt_group", "}", ",", "}", "return", "{", "\"nanopub\"", ":", "copy", ".", "deepcopy", "(", "nanopub", ")", "}" ]
Yield nanopub object
[ "Yield", "nanopub", "object" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L107-L142
belbio/bel
bel/nanopub/belscripts.py
process_documentline
def process_documentline(line, nanopubs_metadata): """Process SET DOCUMENT line in BEL script""" matches = re.match('SET DOCUMENT\s+(\w+)\s+=\s+"?(.*?)"?$', line) key = matches.group(1) val = matches.group(2) nanopubs_metadata[key] = val return nanopubs_metadata
python
def process_documentline(line, nanopubs_metadata): """Process SET DOCUMENT line in BEL script""" matches = re.match('SET DOCUMENT\s+(\w+)\s+=\s+"?(.*?)"?$', line) key = matches.group(1) val = matches.group(2) nanopubs_metadata[key] = val return nanopubs_metadata
[ "def", "process_documentline", "(", "line", ",", "nanopubs_metadata", ")", ":", "matches", "=", "re", ".", "match", "(", "'SET DOCUMENT\\s+(\\w+)\\s+=\\s+\"?(.*?)\"?$'", ",", "line", ")", "key", "=", "matches", ".", "group", "(", "1", ")", "val", "=", "matches", ".", "group", "(", "2", ")", "nanopubs_metadata", "[", "key", "]", "=", "val", "return", "nanopubs_metadata" ]
Process SET DOCUMENT line in BEL script
[ "Process", "SET", "DOCUMENT", "line", "in", "BEL", "script" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L145-L153
belbio/bel
bel/nanopub/belscripts.py
process_definition
def process_definition(line, nanopubs_metadata): """Process DEFINE line in BEL script""" matches = re.match('DEFINE\s+(\w+)\s+(\w+)\s+AS\s+URL\s+"(.*?)"\s*$', line) if matches: def_type = matches.group(1).lower() if def_type == "namespace": def_type = "namespaces" elif def_type == "annotation": def_type == "annotations" key = matches.group(2) val = matches.group(3) if def_type in nanopubs_metadata: nanopubs_metadata[def_type][key] = val else: nanopubs_metadata[def_type] = {key: val} matches = re.match("DEFINE\s+(\w+)\s+(\w+)\s+AS\s+LIST\s+{(.*?)}\s*$", line) if matches: def_type = matches.group(1).lower() if def_type == "namespace": def_type = "namespaces" elif def_type == "annotation": def_type == "annotations" key = matches.group(2) val = matches.group(3) vals = convert_csv_str_to_list(val) if def_type in nanopubs_metadata: nanopubs_metadata[def_type][key] = vals else: nanopubs_metadata[def_type] = {key: vals} return nanopubs_metadata
python
def process_definition(line, nanopubs_metadata): """Process DEFINE line in BEL script""" matches = re.match('DEFINE\s+(\w+)\s+(\w+)\s+AS\s+URL\s+"(.*?)"\s*$', line) if matches: def_type = matches.group(1).lower() if def_type == "namespace": def_type = "namespaces" elif def_type == "annotation": def_type == "annotations" key = matches.group(2) val = matches.group(3) if def_type in nanopubs_metadata: nanopubs_metadata[def_type][key] = val else: nanopubs_metadata[def_type] = {key: val} matches = re.match("DEFINE\s+(\w+)\s+(\w+)\s+AS\s+LIST\s+{(.*?)}\s*$", line) if matches: def_type = matches.group(1).lower() if def_type == "namespace": def_type = "namespaces" elif def_type == "annotation": def_type == "annotations" key = matches.group(2) val = matches.group(3) vals = convert_csv_str_to_list(val) if def_type in nanopubs_metadata: nanopubs_metadata[def_type][key] = vals else: nanopubs_metadata[def_type] = {key: vals} return nanopubs_metadata
[ "def", "process_definition", "(", "line", ",", "nanopubs_metadata", ")", ":", "matches", "=", "re", ".", "match", "(", "'DEFINE\\s+(\\w+)\\s+(\\w+)\\s+AS\\s+URL\\s+\"(.*?)\"\\s*$'", ",", "line", ")", "if", "matches", ":", "def_type", "=", "matches", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "if", "def_type", "==", "\"namespace\"", ":", "def_type", "=", "\"namespaces\"", "elif", "def_type", "==", "\"annotation\"", ":", "def_type", "==", "\"annotations\"", "key", "=", "matches", ".", "group", "(", "2", ")", "val", "=", "matches", ".", "group", "(", "3", ")", "if", "def_type", "in", "nanopubs_metadata", ":", "nanopubs_metadata", "[", "def_type", "]", "[", "key", "]", "=", "val", "else", ":", "nanopubs_metadata", "[", "def_type", "]", "=", "{", "key", ":", "val", "}", "matches", "=", "re", ".", "match", "(", "\"DEFINE\\s+(\\w+)\\s+(\\w+)\\s+AS\\s+LIST\\s+{(.*?)}\\s*$\"", ",", "line", ")", "if", "matches", ":", "def_type", "=", "matches", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "if", "def_type", "==", "\"namespace\"", ":", "def_type", "=", "\"namespaces\"", "elif", "def_type", "==", "\"annotation\"", ":", "def_type", "==", "\"annotations\"", "key", "=", "matches", ".", "group", "(", "2", ")", "val", "=", "matches", ".", "group", "(", "3", ")", "vals", "=", "convert_csv_str_to_list", "(", "val", ")", "if", "def_type", "in", "nanopubs_metadata", ":", "nanopubs_metadata", "[", "def_type", "]", "[", "key", "]", "=", "vals", "else", ":", "nanopubs_metadata", "[", "def_type", "]", "=", "{", "key", ":", "vals", "}", "return", "nanopubs_metadata" ]
Process DEFINE line in BEL script
[ "Process", "DEFINE", "line", "in", "BEL", "script" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L156-L192
belbio/bel
bel/nanopub/belscripts.py
process_unset
def process_unset(line, annotations): """Process UNSET lines in BEL Script""" matches = re.match('UNSET\s+"?(.*?)"?\s*$', line) if matches: val = matches.group(1) if val == "ALL" or val == "STATEMENT_GROUP": annotations = {} elif re.match("{", val): vals = convert_csv_str_to_list(val) for val in vals: annotations.pop(val, None) else: annotations.pop(val, None) else: log.warn(f"Problem with UNSET line: {line}") return annotations
python
def process_unset(line, annotations): """Process UNSET lines in BEL Script""" matches = re.match('UNSET\s+"?(.*?)"?\s*$', line) if matches: val = matches.group(1) if val == "ALL" or val == "STATEMENT_GROUP": annotations = {} elif re.match("{", val): vals = convert_csv_str_to_list(val) for val in vals: annotations.pop(val, None) else: annotations.pop(val, None) else: log.warn(f"Problem with UNSET line: {line}") return annotations
[ "def", "process_unset", "(", "line", ",", "annotations", ")", ":", "matches", "=", "re", ".", "match", "(", "'UNSET\\s+\"?(.*?)\"?\\s*$'", ",", "line", ")", "if", "matches", ":", "val", "=", "matches", ".", "group", "(", "1", ")", "if", "val", "==", "\"ALL\"", "or", "val", "==", "\"STATEMENT_GROUP\"", ":", "annotations", "=", "{", "}", "elif", "re", ".", "match", "(", "\"{\"", ",", "val", ")", ":", "vals", "=", "convert_csv_str_to_list", "(", "val", ")", "for", "val", "in", "vals", ":", "annotations", ".", "pop", "(", "val", ",", "None", ")", "else", ":", "annotations", ".", "pop", "(", "val", ",", "None", ")", "else", ":", "log", ".", "warn", "(", "f\"Problem with UNSET line: {line}\"", ")", "return", "annotations" ]
Process UNSET lines in BEL Script
[ "Process", "UNSET", "lines", "in", "BEL", "Script" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L195-L213
belbio/bel
bel/nanopub/belscripts.py
process_set
def process_set(line, annotations): """Convert annotations into nanopub_bel annotations format""" matches = re.match('SET\s+(\w+)\s*=\s*"?(.*?)"?\s*$', line) key = None if matches: key = matches.group(1) val = matches.group(2) if key == "STATEMENT_GROUP": annotations["statement_group"] = val elif key == "Citation": annotations["citation"] = process_citation(val) elif key.lower() == "support" or key.lower() == "evidence": annotations["evidence"] = val elif re.match("\s*{.*?}", val): vals = convert_csv_str_to_list(val) annotations[key] = vals else: annotations[key] = val return annotations
python
def process_set(line, annotations): """Convert annotations into nanopub_bel annotations format""" matches = re.match('SET\s+(\w+)\s*=\s*"?(.*?)"?\s*$', line) key = None if matches: key = matches.group(1) val = matches.group(2) if key == "STATEMENT_GROUP": annotations["statement_group"] = val elif key == "Citation": annotations["citation"] = process_citation(val) elif key.lower() == "support" or key.lower() == "evidence": annotations["evidence"] = val elif re.match("\s*{.*?}", val): vals = convert_csv_str_to_list(val) annotations[key] = vals else: annotations[key] = val return annotations
[ "def", "process_set", "(", "line", ",", "annotations", ")", ":", "matches", "=", "re", ".", "match", "(", "'SET\\s+(\\w+)\\s*=\\s*\"?(.*?)\"?\\s*$'", ",", "line", ")", "key", "=", "None", "if", "matches", ":", "key", "=", "matches", ".", "group", "(", "1", ")", "val", "=", "matches", ".", "group", "(", "2", ")", "if", "key", "==", "\"STATEMENT_GROUP\"", ":", "annotations", "[", "\"statement_group\"", "]", "=", "val", "elif", "key", "==", "\"Citation\"", ":", "annotations", "[", "\"citation\"", "]", "=", "process_citation", "(", "val", ")", "elif", "key", ".", "lower", "(", ")", "==", "\"support\"", "or", "key", ".", "lower", "(", ")", "==", "\"evidence\"", ":", "annotations", "[", "\"evidence\"", "]", "=", "val", "elif", "re", ".", "match", "(", "\"\\s*{.*?}\"", ",", "val", ")", ":", "vals", "=", "convert_csv_str_to_list", "(", "val", ")", "annotations", "[", "key", "]", "=", "vals", "else", ":", "annotations", "[", "key", "]", "=", "val", "return", "annotations" ]
Convert annotations into nanopub_bel annotations format
[ "Convert", "annotations", "into", "nanopub_bel", "annotations", "format" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L216-L238
belbio/bel
bel/nanopub/belscripts.py
preprocess_belscript
def preprocess_belscript(lines): """ Convert any multi-line SET statements into single line SET statements""" set_flag = False for line in lines: if set_flag is False and re.match("SET", line): set_flag = True set_line = [line.rstrip()] # SET following SET elif set_flag and re.match("SET", line): yield f"{' '.join(set_line)}\n" set_line = [line.rstrip()] # Blank line following SET yields single line SET elif set_flag and re.match("\s+$", line): yield f"{' '.join(set_line)}\n" yield line set_flag = False # Append second, third, ... lines to SET elif set_flag: set_line.append(line.rstrip()) else: yield line
python
def preprocess_belscript(lines): """ Convert any multi-line SET statements into single line SET statements""" set_flag = False for line in lines: if set_flag is False and re.match("SET", line): set_flag = True set_line = [line.rstrip()] # SET following SET elif set_flag and re.match("SET", line): yield f"{' '.join(set_line)}\n" set_line = [line.rstrip()] # Blank line following SET yields single line SET elif set_flag and re.match("\s+$", line): yield f"{' '.join(set_line)}\n" yield line set_flag = False # Append second, third, ... lines to SET elif set_flag: set_line.append(line.rstrip()) else: yield line
[ "def", "preprocess_belscript", "(", "lines", ")", ":", "set_flag", "=", "False", "for", "line", "in", "lines", ":", "if", "set_flag", "is", "False", "and", "re", ".", "match", "(", "\"SET\"", ",", "line", ")", ":", "set_flag", "=", "True", "set_line", "=", "[", "line", ".", "rstrip", "(", ")", "]", "# SET following SET", "elif", "set_flag", "and", "re", ".", "match", "(", "\"SET\"", ",", "line", ")", ":", "yield", "f\"{' '.join(set_line)}\\n\"", "set_line", "=", "[", "line", ".", "rstrip", "(", ")", "]", "# Blank line following SET yields single line SET", "elif", "set_flag", "and", "re", ".", "match", "(", "\"\\s+$\"", ",", "line", ")", ":", "yield", "f\"{' '.join(set_line)}\\n\"", "yield", "line", "set_flag", "=", "False", "# Append second, third, ... lines to SET", "elif", "set_flag", ":", "set_line", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "else", ":", "yield", "line" ]
Convert any multi-line SET statements into single line SET statements
[ "Convert", "any", "multi", "-", "line", "SET", "statements", "into", "single", "line", "SET", "statements" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L266-L288
belbio/bel
bel/nanopub/belscripts.py
parse_belscript
def parse_belscript(lines): """Lines from the BELScript - can be an iterator or list yields Nanopubs in nanopubs_bel-1.0.0 format """ nanopubs_metadata = {} annotations = {} assertions = [] # # Turn a list into an iterator # if not isinstance(lines, collections.Iterator): # lines = iter(lines) line_num = 0 # for line in preprocess_belscript(lines): for line in set_single_line(lines): line_num += 1 # Get rid of trailing comments line = re.sub("\/\/.*?$", "", line) line = line.rstrip() # Collapse continuation lines while re.search("\\\s*$", line): line = line.replace("\\", "") + next(lines) # Process lines ################################# if re.match("\s*#", line) or re.match("\s*$", line): # Skip comments and empty lines continue elif re.match("SET DOCUMENT", line): nanopubs_metadata = process_documentline(line, nanopubs_metadata) elif re.match("DEFINE", line): nanopubs_metadata = process_definition(line, nanopubs_metadata) elif re.match("UNSET", line): # Process any assertions prior to changing annotations if assertions: yield yield_nanopub(assertions, annotations, line_num) assertions = [] annotations = process_unset(line, annotations) elif re.match("SET", line): # Create nanopubs metadata prior to starting BEL Script statements section if nanopubs_metadata: yield yield_metadata(nanopubs_metadata) nanopubs_metadata = {} # Process any assertions prior to changing annotations if assertions: yield yield_nanopub(assertions, annotations, line_num) assertions = [] annotations = process_set(line, annotations) else: assertions.append(line) # Catch any leftover bel statements yield_nanopub(assertions, annotations, line_num)
python
def parse_belscript(lines): """Lines from the BELScript - can be an iterator or list yields Nanopubs in nanopubs_bel-1.0.0 format """ nanopubs_metadata = {} annotations = {} assertions = [] # # Turn a list into an iterator # if not isinstance(lines, collections.Iterator): # lines = iter(lines) line_num = 0 # for line in preprocess_belscript(lines): for line in set_single_line(lines): line_num += 1 # Get rid of trailing comments line = re.sub("\/\/.*?$", "", line) line = line.rstrip() # Collapse continuation lines while re.search("\\\s*$", line): line = line.replace("\\", "") + next(lines) # Process lines ################################# if re.match("\s*#", line) or re.match("\s*$", line): # Skip comments and empty lines continue elif re.match("SET DOCUMENT", line): nanopubs_metadata = process_documentline(line, nanopubs_metadata) elif re.match("DEFINE", line): nanopubs_metadata = process_definition(line, nanopubs_metadata) elif re.match("UNSET", line): # Process any assertions prior to changing annotations if assertions: yield yield_nanopub(assertions, annotations, line_num) assertions = [] annotations = process_unset(line, annotations) elif re.match("SET", line): # Create nanopubs metadata prior to starting BEL Script statements section if nanopubs_metadata: yield yield_metadata(nanopubs_metadata) nanopubs_metadata = {} # Process any assertions prior to changing annotations if assertions: yield yield_nanopub(assertions, annotations, line_num) assertions = [] annotations = process_set(line, annotations) else: assertions.append(line) # Catch any leftover bel statements yield_nanopub(assertions, annotations, line_num)
[ "def", "parse_belscript", "(", "lines", ")", ":", "nanopubs_metadata", "=", "{", "}", "annotations", "=", "{", "}", "assertions", "=", "[", "]", "# # Turn a list into an iterator", "# if not isinstance(lines, collections.Iterator):", "# lines = iter(lines)", "line_num", "=", "0", "# for line in preprocess_belscript(lines):", "for", "line", "in", "set_single_line", "(", "lines", ")", ":", "line_num", "+=", "1", "# Get rid of trailing comments", "line", "=", "re", ".", "sub", "(", "\"\\/\\/.*?$\"", ",", "\"\"", ",", "line", ")", "line", "=", "line", ".", "rstrip", "(", ")", "# Collapse continuation lines", "while", "re", ".", "search", "(", "\"\\\\\\s*$\"", ",", "line", ")", ":", "line", "=", "line", ".", "replace", "(", "\"\\\\\"", ",", "\"\"", ")", "+", "next", "(", "lines", ")", "# Process lines #################################", "if", "re", ".", "match", "(", "\"\\s*#\"", ",", "line", ")", "or", "re", ".", "match", "(", "\"\\s*$\"", ",", "line", ")", ":", "# Skip comments and empty lines", "continue", "elif", "re", ".", "match", "(", "\"SET DOCUMENT\"", ",", "line", ")", ":", "nanopubs_metadata", "=", "process_documentline", "(", "line", ",", "nanopubs_metadata", ")", "elif", "re", ".", "match", "(", "\"DEFINE\"", ",", "line", ")", ":", "nanopubs_metadata", "=", "process_definition", "(", "line", ",", "nanopubs_metadata", ")", "elif", "re", ".", "match", "(", "\"UNSET\"", ",", "line", ")", ":", "# Process any assertions prior to changing annotations", "if", "assertions", ":", "yield", "yield_nanopub", "(", "assertions", ",", "annotations", ",", "line_num", ")", "assertions", "=", "[", "]", "annotations", "=", "process_unset", "(", "line", ",", "annotations", ")", "elif", "re", ".", "match", "(", "\"SET\"", ",", "line", ")", ":", "# Create nanopubs metadata prior to starting BEL Script statements section", "if", "nanopubs_metadata", ":", "yield", "yield_metadata", "(", "nanopubs_metadata", ")", "nanopubs_metadata", "=", "{", "}", "# Process any assertions prior to changing annotations", "if", "assertions", ":", "yield", "yield_nanopub", "(", "assertions", ",", "annotations", ",", "line_num", ")", "assertions", "=", "[", "]", "annotations", "=", "process_set", "(", "line", ",", "annotations", ")", "else", ":", "assertions", ".", "append", "(", "line", ")", "# Catch any leftover bel statements", "yield_nanopub", "(", "assertions", ",", "annotations", ",", "line_num", ")" ]
Lines from the BELScript - can be an iterator or list yields Nanopubs in nanopubs_bel-1.0.0 format
[ "Lines", "from", "the", "BELScript", "-", "can", "be", "an", "iterator", "or", "list" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/belscripts.py#L291-L353
RockFeng0/rtsf-http
httpdriver/actions.py
RequestTrackInfo.__stringify_body
def __stringify_body(self, request_or_response): ''' this method reference from httprunner ''' headers = self.__track_info['{}_headers'.format(request_or_response)] body = self.__track_info.get('{}_body'.format(request_or_response)) if isinstance(body, CaseInsensitiveDict): body = json.dumps(dict(body), ensure_ascii=False) elif isinstance(body, (dict, list)): body = json.dumps(body, indent=2, ensure_ascii=False) elif isinstance(body, bytes): resp_content_type = headers.get("Content-Type", "") try: if "image" in resp_content_type: self.__track_info["response_data_type"] = "image" body = "data:{};base64,{}".format( resp_content_type, b64encode(body).decode('utf-8') ) else: body = escape(body.decode("utf-8")) except UnicodeDecodeError: pass elif not isinstance(body, (basestring, numeric_types, Iterable)): # class instance, e.g. MultipartEncoder() body = repr(body) self.__track_info['{}_body'.format(request_or_response)] = body
python
def __stringify_body(self, request_or_response): ''' this method reference from httprunner ''' headers = self.__track_info['{}_headers'.format(request_or_response)] body = self.__track_info.get('{}_body'.format(request_or_response)) if isinstance(body, CaseInsensitiveDict): body = json.dumps(dict(body), ensure_ascii=False) elif isinstance(body, (dict, list)): body = json.dumps(body, indent=2, ensure_ascii=False) elif isinstance(body, bytes): resp_content_type = headers.get("Content-Type", "") try: if "image" in resp_content_type: self.__track_info["response_data_type"] = "image" body = "data:{};base64,{}".format( resp_content_type, b64encode(body).decode('utf-8') ) else: body = escape(body.decode("utf-8")) except UnicodeDecodeError: pass elif not isinstance(body, (basestring, numeric_types, Iterable)): # class instance, e.g. MultipartEncoder() body = repr(body) self.__track_info['{}_body'.format(request_or_response)] = body
[ "def", "__stringify_body", "(", "self", ",", "request_or_response", ")", ":", "headers", "=", "self", ".", "__track_info", "[", "'{}_headers'", ".", "format", "(", "request_or_response", ")", "]", "body", "=", "self", ".", "__track_info", ".", "get", "(", "'{}_body'", ".", "format", "(", "request_or_response", ")", ")", "if", "isinstance", "(", "body", ",", "CaseInsensitiveDict", ")", ":", "body", "=", "json", ".", "dumps", "(", "dict", "(", "body", ")", ",", "ensure_ascii", "=", "False", ")", "elif", "isinstance", "(", "body", ",", "(", "dict", ",", "list", ")", ")", ":", "body", "=", "json", ".", "dumps", "(", "body", ",", "indent", "=", "2", ",", "ensure_ascii", "=", "False", ")", "elif", "isinstance", "(", "body", ",", "bytes", ")", ":", "resp_content_type", "=", "headers", ".", "get", "(", "\"Content-Type\"", ",", "\"\"", ")", "try", ":", "if", "\"image\"", "in", "resp_content_type", ":", "self", ".", "__track_info", "[", "\"response_data_type\"", "]", "=", "\"image\"", "body", "=", "\"data:{};base64,{}\"", ".", "format", "(", "resp_content_type", ",", "b64encode", "(", "body", ")", ".", "decode", "(", "'utf-8'", ")", ")", "else", ":", "body", "=", "escape", "(", "body", ".", "decode", "(", "\"utf-8\"", ")", ")", "except", "UnicodeDecodeError", ":", "pass", "elif", "not", "isinstance", "(", "body", ",", "(", "basestring", ",", "numeric_types", ",", "Iterable", ")", ")", ":", "# class instance, e.g. MultipartEncoder()\r", "body", "=", "repr", "(", "body", ")", "self", ".", "__track_info", "[", "'{}_body'", ".", "format", "(", "request_or_response", ")", "]", "=", "body" ]
this method reference from httprunner
[ "this", "method", "reference", "from", "httprunner" ]
train
https://github.com/RockFeng0/rtsf-http/blob/3280cc9a01b0c92c52d699b0ebc29e55e62611a0/httpdriver/actions.py#L73-L102
RockFeng0/rtsf-http
httpdriver/actions.py
Request.DyStrData
def DyStrData(cls,name, regx, index = 0): ''' set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123')) ''' text = Markup(cls.__trackinfo["response_body"]).unescape() if not text: return if not isinstance(regx, re._pattern_type): raise Exception("DyStrData need the arg which have compiled the regular expression.") values = regx.findall(text) result = "" if len(values)>index: result = values[index] cls.glob.update({name:result})
python
def DyStrData(cls,name, regx, index = 0): ''' set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123')) ''' text = Markup(cls.__trackinfo["response_body"]).unescape() if not text: return if not isinstance(regx, re._pattern_type): raise Exception("DyStrData need the arg which have compiled the regular expression.") values = regx.findall(text) result = "" if len(values)>index: result = values[index] cls.glob.update({name:result})
[ "def", "DyStrData", "(", "cls", ",", "name", ",", "regx", ",", "index", "=", "0", ")", ":", "text", "=", "Markup", "(", "cls", ".", "__trackinfo", "[", "\"response_body\"", "]", ")", ".", "unescape", "(", ")", "if", "not", "text", ":", "return", "if", "not", "isinstance", "(", "regx", ",", "re", ".", "_pattern_type", ")", ":", "raise", "Exception", "(", "\"DyStrData need the arg which have compiled the regular expression.\"", ")", "values", "=", "regx", ".", "findall", "(", "text", ")", "result", "=", "\"\"", "if", "len", "(", "values", ")", ">", "index", ":", "result", "=", "values", "[", "index", "]", "cls", ".", "glob", ".", "update", "(", "{", "name", ":", "result", "}", ")" ]
set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123'))
[ "set", "dynamic", "value", "from", "the", "string", "data", "of", "response" ]
train
https://github.com/RockFeng0/rtsf-http/blob/3280cc9a01b0c92c52d699b0ebc29e55e62611a0/httpdriver/actions.py#L227-L245
RockFeng0/rtsf-http
httpdriver/actions.py
Request.DyJsonData
def DyJsonData(cls,name, sequence): ''' set dynamic value from the json data of response @param name: glob parameter name @param sequence: sequence for the json e.g. result={"a":1, "b":[1,2,3,4], "c":{"d":5,"e":6}, "f":{"g":[7,8,9]}, "h":[{"i":10,"j":11},{"k":12}] } sequence1 ="a" # -> 1 sequence2 ="b.3" # -> 4 sequence3 = "f.g.2" # -> 9 sequence4 = "h.0.j" # -> 11 ''' text = Markup(cls.__trackinfo["response_body"]).unescape() if not text: return resp = json.loads(text) # resp = cls.__test.copy() sequence = [_parse_string_value(i) for i in sequence.split('.')] for i in sequence: try: if isinstance(i, int): resp = resp[i] else: resp = resp.get(i) except: cls.glob.update({name:None}) return cls.glob.update({name:resp})
python
def DyJsonData(cls,name, sequence): ''' set dynamic value from the json data of response @param name: glob parameter name @param sequence: sequence for the json e.g. result={"a":1, "b":[1,2,3,4], "c":{"d":5,"e":6}, "f":{"g":[7,8,9]}, "h":[{"i":10,"j":11},{"k":12}] } sequence1 ="a" # -> 1 sequence2 ="b.3" # -> 4 sequence3 = "f.g.2" # -> 9 sequence4 = "h.0.j" # -> 11 ''' text = Markup(cls.__trackinfo["response_body"]).unescape() if not text: return resp = json.loads(text) # resp = cls.__test.copy() sequence = [_parse_string_value(i) for i in sequence.split('.')] for i in sequence: try: if isinstance(i, int): resp = resp[i] else: resp = resp.get(i) except: cls.glob.update({name:None}) return cls.glob.update({name:resp})
[ "def", "DyJsonData", "(", "cls", ",", "name", ",", "sequence", ")", ":", "text", "=", "Markup", "(", "cls", ".", "__trackinfo", "[", "\"response_body\"", "]", ")", ".", "unescape", "(", ")", "if", "not", "text", ":", "return", "resp", "=", "json", ".", "loads", "(", "text", ")", "# resp = cls.__test.copy() \r", "sequence", "=", "[", "_parse_string_value", "(", "i", ")", "for", "i", "in", "sequence", ".", "split", "(", "'.'", ")", "]", "for", "i", "in", "sequence", ":", "try", ":", "if", "isinstance", "(", "i", ",", "int", ")", ":", "resp", "=", "resp", "[", "i", "]", "else", ":", "resp", "=", "resp", ".", "get", "(", "i", ")", "except", ":", "cls", ".", "glob", ".", "update", "(", "{", "name", ":", "None", "}", ")", "return", "cls", ".", "glob", ".", "update", "(", "{", "name", ":", "resp", "}", ")" ]
set dynamic value from the json data of response @param name: glob parameter name @param sequence: sequence for the json e.g. result={"a":1, "b":[1,2,3,4], "c":{"d":5,"e":6}, "f":{"g":[7,8,9]}, "h":[{"i":10,"j":11},{"k":12}] } sequence1 ="a" # -> 1 sequence2 ="b.3" # -> 4 sequence3 = "f.g.2" # -> 9 sequence4 = "h.0.j" # -> 11
[ "set", "dynamic", "value", "from", "the", "json", "data", "of", "response" ]
train
https://github.com/RockFeng0/rtsf-http/blob/3280cc9a01b0c92c52d699b0ebc29e55e62611a0/httpdriver/actions.py#L248-L281
belbio/bel
bel/nanopub/files.py
read_nanopubs
def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]: """Read file and generate nanopubs If filename has *.gz, will read as a gzip file If filename has *.jsonl*, will parsed as a JSONLines file IF filename has *.json*, will be parsed as a JSON file If filename has *.yaml* or *.yml*, will be parsed as a YAML file Args: filename (str): filename to read nanopubs from Returns: Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format """ jsonl_flag, json_flag, yaml_flag = False, False, False if fn == "-" or "jsonl" in fn: jsonl_flag = True elif "json" in fn: json_flag = True elif re.search("ya?ml", fn): yaml_flag = True else: log.error("Do not recognize nanopub file format - neither json nor jsonl format.") return {} try: if re.search("gz$", fn): f = gzip.open(fn, "rt") else: try: f = click.open_file(fn, mode="rt") except Exception as e: log.info(f"Can not open file {fn} Error: {e}") quit() if jsonl_flag: for line in f: yield json.loads(line) elif json_flag: nanopubs = json.load(f) for nanopub in nanopubs: yield nanopub elif yaml_flag: nanopubs = yaml.load(f, Loader=yaml.SafeLoader) for nanopub in nanopubs: yield nanopub except Exception as e: log.error(f"Could not open file: {fn}")
python
def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]: """Read file and generate nanopubs If filename has *.gz, will read as a gzip file If filename has *.jsonl*, will parsed as a JSONLines file IF filename has *.json*, will be parsed as a JSON file If filename has *.yaml* or *.yml*, will be parsed as a YAML file Args: filename (str): filename to read nanopubs from Returns: Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format """ jsonl_flag, json_flag, yaml_flag = False, False, False if fn == "-" or "jsonl" in fn: jsonl_flag = True elif "json" in fn: json_flag = True elif re.search("ya?ml", fn): yaml_flag = True else: log.error("Do not recognize nanopub file format - neither json nor jsonl format.") return {} try: if re.search("gz$", fn): f = gzip.open(fn, "rt") else: try: f = click.open_file(fn, mode="rt") except Exception as e: log.info(f"Can not open file {fn} Error: {e}") quit() if jsonl_flag: for line in f: yield json.loads(line) elif json_flag: nanopubs = json.load(f) for nanopub in nanopubs: yield nanopub elif yaml_flag: nanopubs = yaml.load(f, Loader=yaml.SafeLoader) for nanopub in nanopubs: yield nanopub except Exception as e: log.error(f"Could not open file: {fn}")
[ "def", "read_nanopubs", "(", "fn", ":", "str", ")", "->", "Iterable", "[", "Mapping", "[", "str", ",", "Any", "]", "]", ":", "jsonl_flag", ",", "json_flag", ",", "yaml_flag", "=", "False", ",", "False", ",", "False", "if", "fn", "==", "\"-\"", "or", "\"jsonl\"", "in", "fn", ":", "jsonl_flag", "=", "True", "elif", "\"json\"", "in", "fn", ":", "json_flag", "=", "True", "elif", "re", ".", "search", "(", "\"ya?ml\"", ",", "fn", ")", ":", "yaml_flag", "=", "True", "else", ":", "log", ".", "error", "(", "\"Do not recognize nanopub file format - neither json nor jsonl format.\"", ")", "return", "{", "}", "try", ":", "if", "re", ".", "search", "(", "\"gz$\"", ",", "fn", ")", ":", "f", "=", "gzip", ".", "open", "(", "fn", ",", "\"rt\"", ")", "else", ":", "try", ":", "f", "=", "click", ".", "open_file", "(", "fn", ",", "mode", "=", "\"rt\"", ")", "except", "Exception", "as", "e", ":", "log", ".", "info", "(", "f\"Can not open file {fn} Error: {e}\"", ")", "quit", "(", ")", "if", "jsonl_flag", ":", "for", "line", "in", "f", ":", "yield", "json", ".", "loads", "(", "line", ")", "elif", "json_flag", ":", "nanopubs", "=", "json", ".", "load", "(", "f", ")", "for", "nanopub", "in", "nanopubs", ":", "yield", "nanopub", "elif", "yaml_flag", ":", "nanopubs", "=", "yaml", ".", "load", "(", "f", ",", "Loader", "=", "yaml", ".", "SafeLoader", ")", "for", "nanopub", "in", "nanopubs", ":", "yield", "nanopub", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Could not open file: {fn}\"", ")" ]
Read file and generate nanopubs If filename has *.gz, will read as a gzip file If filename has *.jsonl*, will parsed as a JSONLines file IF filename has *.json*, will be parsed as a JSON file If filename has *.yaml* or *.yml*, will be parsed as a YAML file Args: filename (str): filename to read nanopubs from Returns: Generator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format
[ "Read", "file", "and", "generate", "nanopubs" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/files.py#L23-L72
belbio/bel
bel/nanopub/files.py
create_nanopubs_fh
def create_nanopubs_fh(output_fn: str): """Create Nanopubs output filehandle \b If output fn is '-' will write JSONlines to STDOUT If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file Args: output_fn: Name of output file Returns: (filehandle, yaml_flag, jsonl_flag, json_flag) """ # output file # set output flags json_flag, jsonl_flag, yaml_flag = False, False, False if output_fn: if re.search("gz$", output_fn): out_fh = gzip.open(output_fn, "wt") else: out_fh = click.open_file(output_fn, mode="wt") if re.search("ya?ml", output_fn): yaml_flag = True elif "jsonl" in output_fn or "-" == output_fn: jsonl_flag = True elif "json" in output_fn: json_flag = True else: out_fh = sys.stdout return (out_fh, yaml_flag, jsonl_flag, json_flag)
python
def create_nanopubs_fh(output_fn: str): """Create Nanopubs output filehandle \b If output fn is '-' will write JSONlines to STDOUT If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file Args: output_fn: Name of output file Returns: (filehandle, yaml_flag, jsonl_flag, json_flag) """ # output file # set output flags json_flag, jsonl_flag, yaml_flag = False, False, False if output_fn: if re.search("gz$", output_fn): out_fh = gzip.open(output_fn, "wt") else: out_fh = click.open_file(output_fn, mode="wt") if re.search("ya?ml", output_fn): yaml_flag = True elif "jsonl" in output_fn or "-" == output_fn: jsonl_flag = True elif "json" in output_fn: json_flag = True else: out_fh = sys.stdout return (out_fh, yaml_flag, jsonl_flag, json_flag)
[ "def", "create_nanopubs_fh", "(", "output_fn", ":", "str", ")", ":", "# output file", "# set output flags", "json_flag", ",", "jsonl_flag", ",", "yaml_flag", "=", "False", ",", "False", ",", "False", "if", "output_fn", ":", "if", "re", ".", "search", "(", "\"gz$\"", ",", "output_fn", ")", ":", "out_fh", "=", "gzip", ".", "open", "(", "output_fn", ",", "\"wt\"", ")", "else", ":", "out_fh", "=", "click", ".", "open_file", "(", "output_fn", ",", "mode", "=", "\"wt\"", ")", "if", "re", ".", "search", "(", "\"ya?ml\"", ",", "output_fn", ")", ":", "yaml_flag", "=", "True", "elif", "\"jsonl\"", "in", "output_fn", "or", "\"-\"", "==", "output_fn", ":", "jsonl_flag", "=", "True", "elif", "\"json\"", "in", "output_fn", ":", "json_flag", "=", "True", "else", ":", "out_fh", "=", "sys", ".", "stdout", "return", "(", "out_fh", ",", "yaml_flag", ",", "jsonl_flag", ",", "json_flag", ")" ]
Create Nanopubs output filehandle \b If output fn is '-' will write JSONlines to STDOUT If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file Args: output_fn: Name of output file Returns: (filehandle, yaml_flag, jsonl_flag, json_flag)
[ "Create", "Nanopubs", "output", "filehandle" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/files.py#L75-L111
belbio/bel
bel/nanopub/files.py
write_edges
def write_edges( edges: Mapping[str, Any], filename: str, jsonlines: bool = False, gzipflag: bool = False, yaml: bool = False, ): """Write edges to file Args: edges (Mapping[str, Any]): in edges JSON Schema format filename (str): filename to write jsonlines (bool): output in JSONLines format? gzipflag (bool): create gzipped file? yaml (bool): create yaml file? """ pass
python
def write_edges( edges: Mapping[str, Any], filename: str, jsonlines: bool = False, gzipflag: bool = False, yaml: bool = False, ): """Write edges to file Args: edges (Mapping[str, Any]): in edges JSON Schema format filename (str): filename to write jsonlines (bool): output in JSONLines format? gzipflag (bool): create gzipped file? yaml (bool): create yaml file? """ pass
[ "def", "write_edges", "(", "edges", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "filename", ":", "str", ",", "jsonlines", ":", "bool", "=", "False", ",", "gzipflag", ":", "bool", "=", "False", ",", "yaml", ":", "bool", "=", "False", ",", ")", ":", "pass" ]
Write edges to file Args: edges (Mapping[str, Any]): in edges JSON Schema format filename (str): filename to write jsonlines (bool): output in JSONLines format? gzipflag (bool): create gzipped file? yaml (bool): create yaml file?
[ "Write", "edges", "to", "file" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/files.py#L153-L169
belbio/bel
bel/db/elasticsearch.py
add_index_alias
def add_index_alias(es, index_name, alias_name): """Add index alias to index_name""" es.indices.put_alias(index=index_name, name=terms_alias)
python
def add_index_alias(es, index_name, alias_name): """Add index alias to index_name""" es.indices.put_alias(index=index_name, name=terms_alias)
[ "def", "add_index_alias", "(", "es", ",", "index_name", ",", "alias_name", ")", ":", "es", ".", "indices", ".", "put_alias", "(", "index", "=", "index_name", ",", "name", "=", "terms_alias", ")" ]
Add index alias to index_name
[ "Add", "index", "alias", "to", "index_name" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L25-L28
belbio/bel
bel/db/elasticsearch.py
delete_index
def delete_index(es, index_name: str): """Delete the terms index""" if not index_name: log.warn("No index name given to delete") return None result = es.indices.delete(index=index_name) return result
python
def delete_index(es, index_name: str): """Delete the terms index""" if not index_name: log.warn("No index name given to delete") return None result = es.indices.delete(index=index_name) return result
[ "def", "delete_index", "(", "es", ",", "index_name", ":", "str", ")", ":", "if", "not", "index_name", ":", "log", ".", "warn", "(", "\"No index name given to delete\"", ")", "return", "None", "result", "=", "es", ".", "indices", ".", "delete", "(", "index", "=", "index_name", ")", "return", "result" ]
Delete the terms index
[ "Delete", "the", "terms", "index" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L40-L48
belbio/bel
bel/db/elasticsearch.py
create_terms_index
def create_terms_index(es, index_name: str): """Create terms index""" with open(mappings_terms_fn, "r") as f: mappings_terms = yaml.load(f, Loader=yaml.SafeLoader) try: es.indices.create(index=index_name, body=mappings_terms) except Exception as e: log.error(f"Could not create elasticsearch terms index: {e}")
python
def create_terms_index(es, index_name: str): """Create terms index""" with open(mappings_terms_fn, "r") as f: mappings_terms = yaml.load(f, Loader=yaml.SafeLoader) try: es.indices.create(index=index_name, body=mappings_terms) except Exception as e: log.error(f"Could not create elasticsearch terms index: {e}")
[ "def", "create_terms_index", "(", "es", ",", "index_name", ":", "str", ")", ":", "with", "open", "(", "mappings_terms_fn", ",", "\"r\"", ")", "as", "f", ":", "mappings_terms", "=", "yaml", ".", "load", "(", "f", ",", "Loader", "=", "yaml", ".", "SafeLoader", ")", "try", ":", "es", ".", "indices", ".", "create", "(", "index", "=", "index_name", ",", "body", "=", "mappings_terms", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Could not create elasticsearch terms index: {e}\"", ")" ]
Create terms index
[ "Create", "terms", "index" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L51-L61
belbio/bel
bel/db/elasticsearch.py
delete_terms_indexes
def delete_terms_indexes(es, index_name: str = "terms_*"): """Delete all terms indexes""" try: es.indices.delete(index=index_name) except Exception as e: log.error(f"Could not delete all terms indices: {e}")
python
def delete_terms_indexes(es, index_name: str = "terms_*"): """Delete all terms indexes""" try: es.indices.delete(index=index_name) except Exception as e: log.error(f"Could not delete all terms indices: {e}")
[ "def", "delete_terms_indexes", "(", "es", ",", "index_name", ":", "str", "=", "\"terms_*\"", ")", ":", "try", ":", "es", ".", "indices", ".", "delete", "(", "index", "=", "index_name", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Could not delete all terms indices: {e}\"", ")" ]
Delete all terms indexes
[ "Delete", "all", "terms", "indexes" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L64-L70
belbio/bel
bel/db/elasticsearch.py
bulk_load_docs
def bulk_load_docs(es, docs): """Bulk load docs Args: es: elasticsearch handle docs: Iterator of doc objects - includes index_name """ chunk_size = 200 try: results = elasticsearch.helpers.bulk(es, docs, chunk_size=chunk_size) log.debug(f"Elasticsearch documents loaded: {results[0]}") # elasticsearch.helpers.parallel_bulk(es, terms, chunk_size=chunk_size, thread_count=4) if len(results[1]) > 0: log.error("Bulk load errors {}".format(results)) except elasticsearch.ElasticsearchException as e: log.error("Indexing error: {}\n".format(e))
python
def bulk_load_docs(es, docs): """Bulk load docs Args: es: elasticsearch handle docs: Iterator of doc objects - includes index_name """ chunk_size = 200 try: results = elasticsearch.helpers.bulk(es, docs, chunk_size=chunk_size) log.debug(f"Elasticsearch documents loaded: {results[0]}") # elasticsearch.helpers.parallel_bulk(es, terms, chunk_size=chunk_size, thread_count=4) if len(results[1]) > 0: log.error("Bulk load errors {}".format(results)) except elasticsearch.ElasticsearchException as e: log.error("Indexing error: {}\n".format(e))
[ "def", "bulk_load_docs", "(", "es", ",", "docs", ")", ":", "chunk_size", "=", "200", "try", ":", "results", "=", "elasticsearch", ".", "helpers", ".", "bulk", "(", "es", ",", "docs", ",", "chunk_size", "=", "chunk_size", ")", "log", ".", "debug", "(", "f\"Elasticsearch documents loaded: {results[0]}\"", ")", "# elasticsearch.helpers.parallel_bulk(es, terms, chunk_size=chunk_size, thread_count=4)", "if", "len", "(", "results", "[", "1", "]", ")", ">", "0", ":", "log", ".", "error", "(", "\"Bulk load errors {}\"", ".", "format", "(", "results", ")", ")", "except", "elasticsearch", ".", "ElasticsearchException", "as", "e", ":", "log", ".", "error", "(", "\"Indexing error: {}\\n\"", ".", "format", "(", "e", ")", ")" ]
Bulk load docs Args: es: elasticsearch handle docs: Iterator of doc objects - includes index_name
[ "Bulk", "load", "docs" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/elasticsearch.py#L85-L103
belbio/bel
bel/lang/semantics.py
validate
def validate(bo, error_level: str = "WARNING") -> Tuple[bool, List[Tuple[str, str]]]: """Semantically validate BEL AST Add errors and warnings to bel_obj.validation_messages Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR Args: bo: main BEL language object error_level: return ERRORs only or also WARNINGs Returns: Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages) """ if bo.ast: bo = validate_functions(bo.ast, bo) # No WARNINGs generated in this function if error_level == "WARNING": bo = validate_arg_values(bo.ast, bo) # validates NSArg and StrArg values else: bo.validation_messages.append(("ERROR", "Invalid BEL Statement - cannot parse")) for msg in bo.validation_messages: if msg[0] == "ERROR": bo.parse_valid = False break return bo
python
def validate(bo, error_level: str = "WARNING") -> Tuple[bool, List[Tuple[str, str]]]: """Semantically validate BEL AST Add errors and warnings to bel_obj.validation_messages Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR Args: bo: main BEL language object error_level: return ERRORs only or also WARNINGs Returns: Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages) """ if bo.ast: bo = validate_functions(bo.ast, bo) # No WARNINGs generated in this function if error_level == "WARNING": bo = validate_arg_values(bo.ast, bo) # validates NSArg and StrArg values else: bo.validation_messages.append(("ERROR", "Invalid BEL Statement - cannot parse")) for msg in bo.validation_messages: if msg[0] == "ERROR": bo.parse_valid = False break return bo
[ "def", "validate", "(", "bo", ",", "error_level", ":", "str", "=", "\"WARNING\"", ")", "->", "Tuple", "[", "bool", ",", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", "]", ":", "if", "bo", ".", "ast", ":", "bo", "=", "validate_functions", "(", "bo", ".", "ast", ",", "bo", ")", "# No WARNINGs generated in this function", "if", "error_level", "==", "\"WARNING\"", ":", "bo", "=", "validate_arg_values", "(", "bo", ".", "ast", ",", "bo", ")", "# validates NSArg and StrArg values", "else", ":", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"ERROR\"", ",", "\"Invalid BEL Statement - cannot parse\"", ")", ")", "for", "msg", "in", "bo", ".", "validation_messages", ":", "if", "msg", "[", "0", "]", "==", "\"ERROR\"", ":", "bo", ".", "parse_valid", "=", "False", "break", "return", "bo" ]
Semantically validate BEL AST Add errors and warnings to bel_obj.validation_messages Error Levels are similar to log levels - selecting WARNING includes both WARNING and ERROR, selecting ERROR just includes ERROR Args: bo: main BEL language object error_level: return ERRORs only or also WARNINGs Returns: Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages)
[ "Semantically", "validate", "BEL", "AST" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L14-L43
belbio/bel
bel/lang/semantics.py
validate_functions
def validate_functions(ast: BELAst, bo): """Recursively validate function signatures Determine if function matches one of the available signatures. Also, 1. Add entity types to AST NSArg, e.g. Abundance, ... 2. Add optional to AST Arg (optional means it is not a fixed, required argument and needs to be sorted for canonicalization, e.g. reactants(A, B, C) ) Args: bo: bel object Returns: bel object """ if isinstance(ast, Function): log.debug(f"Validating: {ast.name}, {ast.function_type}, {ast.args}") function_signatures = bo.spec["functions"]["signatures"][ast.name]["signatures"] function_name = ast.name (valid_function, messages) = check_function_args( ast.args, function_signatures, function_name ) if not valid_function: message = ", ".join(messages) bo.validation_messages.append( ( "ERROR", "Invalid BEL Statement function {} - problem with function signatures: {}".format( ast.to_string(), message ), ) ) bo.parse_valid = False # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: validate_functions(arg, bo) return bo
python
def validate_functions(ast: BELAst, bo): """Recursively validate function signatures Determine if function matches one of the available signatures. Also, 1. Add entity types to AST NSArg, e.g. Abundance, ... 2. Add optional to AST Arg (optional means it is not a fixed, required argument and needs to be sorted for canonicalization, e.g. reactants(A, B, C) ) Args: bo: bel object Returns: bel object """ if isinstance(ast, Function): log.debug(f"Validating: {ast.name}, {ast.function_type}, {ast.args}") function_signatures = bo.spec["functions"]["signatures"][ast.name]["signatures"] function_name = ast.name (valid_function, messages) = check_function_args( ast.args, function_signatures, function_name ) if not valid_function: message = ", ".join(messages) bo.validation_messages.append( ( "ERROR", "Invalid BEL Statement function {} - problem with function signatures: {}".format( ast.to_string(), message ), ) ) bo.parse_valid = False # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: validate_functions(arg, bo) return bo
[ "def", "validate_functions", "(", "ast", ":", "BELAst", ",", "bo", ")", ":", "if", "isinstance", "(", "ast", ",", "Function", ")", ":", "log", ".", "debug", "(", "f\"Validating: {ast.name}, {ast.function_type}, {ast.args}\"", ")", "function_signatures", "=", "bo", ".", "spec", "[", "\"functions\"", "]", "[", "\"signatures\"", "]", "[", "ast", ".", "name", "]", "[", "\"signatures\"", "]", "function_name", "=", "ast", ".", "name", "(", "valid_function", ",", "messages", ")", "=", "check_function_args", "(", "ast", ".", "args", ",", "function_signatures", ",", "function_name", ")", "if", "not", "valid_function", ":", "message", "=", "\", \"", ".", "join", "(", "messages", ")", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"ERROR\"", ",", "\"Invalid BEL Statement function {} - problem with function signatures: {}\"", ".", "format", "(", "ast", ".", "to_string", "(", ")", ",", "message", ")", ",", ")", ")", "bo", ".", "parse_valid", "=", "False", "# Recursively process every NSArg by processing BELAst and Functions", "if", "hasattr", "(", "ast", ",", "\"args\"", ")", ":", "for", "arg", "in", "ast", ".", "args", ":", "validate_functions", "(", "arg", ",", "bo", ")", "return", "bo" ]
Recursively validate function signatures Determine if function matches one of the available signatures. Also, 1. Add entity types to AST NSArg, e.g. Abundance, ... 2. Add optional to AST Arg (optional means it is not a fixed, required argument and needs to be sorted for canonicalization, e.g. reactants(A, B, C) ) Args: bo: bel object Returns: bel object
[ "Recursively", "validate", "function", "signatures" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L46-L88
belbio/bel
bel/lang/semantics.py
check_function_args
def check_function_args(args, signatures, function_name): """Check function args - return message if function args don't match function signature Called from validate_functions We have following types of arguments to validate: 1. Required, position_dependent arguments, e.g. p(HGNC:AKT1), NSArg HGNC:AKT1 is required and must be first argument 2. Optional, position_dependent arguments, e.g. pmod(P, T, 308) - T and 308 are optional and position_dependent 3. Optional, e.g. loc() modifier can only be found once, but anywhere after the position_dependent arguments 4. Multiple, e.g. var(), can have more than one var() modifier in p() function Args: args (Union['Function', 'NSArg', 'StrArg']): AST Function arguments signatures (Mapping[str, Any]): function signatures from spec_dict, may be more than one per function function_name (str): passed in to improve error messaging Returns: Tuple[bool, List[str]]: (function_valid?, list of error messages per signature) """ messages = [] arg_types = [] for arg in args: arg_type = arg.__class__.__name__ if arg_type == "Function": arg_types.append((arg.name, "")) elif arg_type == "NSArg": arg_types.append((arg_type, f"{arg.namespace}:{arg.value}")) elif arg_type == "StrArg": arg_types.append((arg_type, arg.value)) log.debug(f"Arg_types {arg_types}") matched_signature_idx = -1 valid_function = False for sig_argset_idx, sig_argset in enumerate(signatures): sig_req_args = sig_argset["req_args"] # required position_dependent arguments sig_pos_args = sig_argset["pos_args"] # optional position_dependent arguments sig_opt_args = sig_argset["opt_args"] # optional arguments sig_mult_args = sig_argset["mult_args"] # multiple arguments log.debug(f"{sig_argset_idx} Req: {sig_req_args}") log.debug(f"{sig_argset_idx} Pos: {sig_pos_args}") log.debug(f"{sig_argset_idx} Opt: {sig_opt_args}") log.debug(f"{sig_argset_idx} Mult: {sig_mult_args}") # Check required arguments reqs_mismatch_flag = False for sig_idx, sig_req in enumerate(sig_req_args): if len(arg_types) > sig_idx: log.debug( "Req args: arg_type {} vs sig_req {}".format( arg_types[sig_idx][0], sig_req ) ) if arg_types[sig_idx][0] not in sig_req: reqs_mismatch_flag = True msg = f"Missing required arguments for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) break if reqs_mismatch_flag: continue # test next argset # Check position_dependent optional arguments pos_dep_arg_types = arg_types[len(sig_req_args) :] log.debug(f"Optional arg types {pos_dep_arg_types}") log.debug(f"{sig_argset_idx} Pos: {sig_pos_args}") pos_mismatch_flag = False for sig_pos_idx, sig_pos in enumerate(sig_pos_args): if sig_pos_idx == len(pos_dep_arg_types): break # stop checking position dependent arguments when we run out of them vs signature optional position dependent arguments if pos_dep_arg_types[sig_pos_idx][0] not in sig_pos: pos_mismatch_flag = True msg = f"Missing position_dependent arguments for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) break if pos_mismatch_flag: continue # test next argset reqpos_arglen = len(sig_req_args) + len(sig_pos_args) optional_arg_types = arg_types[reqpos_arglen:] # Remove function args that are found in the mult_args signature optional_types = [ (opt_type, opt_val) for opt_type, opt_val in optional_arg_types if opt_type not in sig_mult_args ] log.debug(f"Optional types after sig mult args removed {optional_types}") # Check if any remaining function args are duplicated and therefore not unique opt_args if len(optional_types) != len(set(optional_types)): msg = f"Duplicate optional arguments {optional_types} for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) continue optional_types = [ (opt_type, opt_val) for opt_type, opt_val in optional_types if opt_type not in sig_opt_args ] if len(optional_types) > 0: msg = f"Invalid arguments {optional_types} for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) continue matched_signature_idx = sig_argset_idx messages = [] # reset messages if signature is matched valid_function = True break # Add NSArg and StrArg value types (e.g. Protein, Complex, ec) if matched_signature_idx > -1: # Shouldn't have single optional NSArg arguments - not currently checking for that log.debug( f'AST1, Sigs {signatures[matched_signature_idx]["arguments"]} Args: {args}' ) for arg_idx, arg in enumerate(args): log.debug(f"Arg type {arg.type}") for sig_idx, sig_arg in enumerate( signatures[matched_signature_idx]["arguments"] ): if arg.type == "Function" or sig_arg["type"] in [ "Function", "Modifier", ]: pass # Skip Function arguments elif sig_arg.get("position", None): if sig_arg["position"] == arg_idx + 1: arg.add_value_types(sig_arg["values"]) log.debug(f'AST2 {arg} {sig_arg["values"]}') elif arg.type in ["NSArg", "StrArg", "StrArgNSArg"]: log.debug(f"Arg {arg}") arg.add_value_types(sig_arg["values"]) log.debug(f'AST2 {arg} {sig_arg["values"]}') for arg in args: if arg.__class__.__name__ in ["NSArg", "StrArg"]: log.debug(f"Arg: {arg.to_string()} Value_types: {arg.value_types}") return (valid_function, messages)
python
def check_function_args(args, signatures, function_name): """Check function args - return message if function args don't match function signature Called from validate_functions We have following types of arguments to validate: 1. Required, position_dependent arguments, e.g. p(HGNC:AKT1), NSArg HGNC:AKT1 is required and must be first argument 2. Optional, position_dependent arguments, e.g. pmod(P, T, 308) - T and 308 are optional and position_dependent 3. Optional, e.g. loc() modifier can only be found once, but anywhere after the position_dependent arguments 4. Multiple, e.g. var(), can have more than one var() modifier in p() function Args: args (Union['Function', 'NSArg', 'StrArg']): AST Function arguments signatures (Mapping[str, Any]): function signatures from spec_dict, may be more than one per function function_name (str): passed in to improve error messaging Returns: Tuple[bool, List[str]]: (function_valid?, list of error messages per signature) """ messages = [] arg_types = [] for arg in args: arg_type = arg.__class__.__name__ if arg_type == "Function": arg_types.append((arg.name, "")) elif arg_type == "NSArg": arg_types.append((arg_type, f"{arg.namespace}:{arg.value}")) elif arg_type == "StrArg": arg_types.append((arg_type, arg.value)) log.debug(f"Arg_types {arg_types}") matched_signature_idx = -1 valid_function = False for sig_argset_idx, sig_argset in enumerate(signatures): sig_req_args = sig_argset["req_args"] # required position_dependent arguments sig_pos_args = sig_argset["pos_args"] # optional position_dependent arguments sig_opt_args = sig_argset["opt_args"] # optional arguments sig_mult_args = sig_argset["mult_args"] # multiple arguments log.debug(f"{sig_argset_idx} Req: {sig_req_args}") log.debug(f"{sig_argset_idx} Pos: {sig_pos_args}") log.debug(f"{sig_argset_idx} Opt: {sig_opt_args}") log.debug(f"{sig_argset_idx} Mult: {sig_mult_args}") # Check required arguments reqs_mismatch_flag = False for sig_idx, sig_req in enumerate(sig_req_args): if len(arg_types) > sig_idx: log.debug( "Req args: arg_type {} vs sig_req {}".format( arg_types[sig_idx][0], sig_req ) ) if arg_types[sig_idx][0] not in sig_req: reqs_mismatch_flag = True msg = f"Missing required arguments for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) break if reqs_mismatch_flag: continue # test next argset # Check position_dependent optional arguments pos_dep_arg_types = arg_types[len(sig_req_args) :] log.debug(f"Optional arg types {pos_dep_arg_types}") log.debug(f"{sig_argset_idx} Pos: {sig_pos_args}") pos_mismatch_flag = False for sig_pos_idx, sig_pos in enumerate(sig_pos_args): if sig_pos_idx == len(pos_dep_arg_types): break # stop checking position dependent arguments when we run out of them vs signature optional position dependent arguments if pos_dep_arg_types[sig_pos_idx][0] not in sig_pos: pos_mismatch_flag = True msg = f"Missing position_dependent arguments for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) break if pos_mismatch_flag: continue # test next argset reqpos_arglen = len(sig_req_args) + len(sig_pos_args) optional_arg_types = arg_types[reqpos_arglen:] # Remove function args that are found in the mult_args signature optional_types = [ (opt_type, opt_val) for opt_type, opt_val in optional_arg_types if opt_type not in sig_mult_args ] log.debug(f"Optional types after sig mult args removed {optional_types}") # Check if any remaining function args are duplicated and therefore not unique opt_args if len(optional_types) != len(set(optional_types)): msg = f"Duplicate optional arguments {optional_types} for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) continue optional_types = [ (opt_type, opt_val) for opt_type, opt_val in optional_types if opt_type not in sig_opt_args ] if len(optional_types) > 0: msg = f"Invalid arguments {optional_types} for {function_name} signature: {sig_argset_idx}" messages.append(msg) log.debug(msg) continue matched_signature_idx = sig_argset_idx messages = [] # reset messages if signature is matched valid_function = True break # Add NSArg and StrArg value types (e.g. Protein, Complex, ec) if matched_signature_idx > -1: # Shouldn't have single optional NSArg arguments - not currently checking for that log.debug( f'AST1, Sigs {signatures[matched_signature_idx]["arguments"]} Args: {args}' ) for arg_idx, arg in enumerate(args): log.debug(f"Arg type {arg.type}") for sig_idx, sig_arg in enumerate( signatures[matched_signature_idx]["arguments"] ): if arg.type == "Function" or sig_arg["type"] in [ "Function", "Modifier", ]: pass # Skip Function arguments elif sig_arg.get("position", None): if sig_arg["position"] == arg_idx + 1: arg.add_value_types(sig_arg["values"]) log.debug(f'AST2 {arg} {sig_arg["values"]}') elif arg.type in ["NSArg", "StrArg", "StrArgNSArg"]: log.debug(f"Arg {arg}") arg.add_value_types(sig_arg["values"]) log.debug(f'AST2 {arg} {sig_arg["values"]}') for arg in args: if arg.__class__.__name__ in ["NSArg", "StrArg"]: log.debug(f"Arg: {arg.to_string()} Value_types: {arg.value_types}") return (valid_function, messages)
[ "def", "check_function_args", "(", "args", ",", "signatures", ",", "function_name", ")", ":", "messages", "=", "[", "]", "arg_types", "=", "[", "]", "for", "arg", "in", "args", ":", "arg_type", "=", "arg", ".", "__class__", ".", "__name__", "if", "arg_type", "==", "\"Function\"", ":", "arg_types", ".", "append", "(", "(", "arg", ".", "name", ",", "\"\"", ")", ")", "elif", "arg_type", "==", "\"NSArg\"", ":", "arg_types", ".", "append", "(", "(", "arg_type", ",", "f\"{arg.namespace}:{arg.value}\"", ")", ")", "elif", "arg_type", "==", "\"StrArg\"", ":", "arg_types", ".", "append", "(", "(", "arg_type", ",", "arg", ".", "value", ")", ")", "log", ".", "debug", "(", "f\"Arg_types {arg_types}\"", ")", "matched_signature_idx", "=", "-", "1", "valid_function", "=", "False", "for", "sig_argset_idx", ",", "sig_argset", "in", "enumerate", "(", "signatures", ")", ":", "sig_req_args", "=", "sig_argset", "[", "\"req_args\"", "]", "# required position_dependent arguments", "sig_pos_args", "=", "sig_argset", "[", "\"pos_args\"", "]", "# optional position_dependent arguments", "sig_opt_args", "=", "sig_argset", "[", "\"opt_args\"", "]", "# optional arguments", "sig_mult_args", "=", "sig_argset", "[", "\"mult_args\"", "]", "# multiple arguments", "log", ".", "debug", "(", "f\"{sig_argset_idx} Req: {sig_req_args}\"", ")", "log", ".", "debug", "(", "f\"{sig_argset_idx} Pos: {sig_pos_args}\"", ")", "log", ".", "debug", "(", "f\"{sig_argset_idx} Opt: {sig_opt_args}\"", ")", "log", ".", "debug", "(", "f\"{sig_argset_idx} Mult: {sig_mult_args}\"", ")", "# Check required arguments", "reqs_mismatch_flag", "=", "False", "for", "sig_idx", ",", "sig_req", "in", "enumerate", "(", "sig_req_args", ")", ":", "if", "len", "(", "arg_types", ")", ">", "sig_idx", ":", "log", ".", "debug", "(", "\"Req args: arg_type {} vs sig_req {}\"", ".", "format", "(", "arg_types", "[", "sig_idx", "]", "[", "0", "]", ",", "sig_req", ")", ")", "if", "arg_types", "[", "sig_idx", "]", "[", "0", "]", "not", "in", "sig_req", ":", "reqs_mismatch_flag", "=", "True", "msg", "=", "f\"Missing required arguments for {function_name} signature: {sig_argset_idx}\"", "messages", ".", "append", "(", "msg", ")", "log", ".", "debug", "(", "msg", ")", "break", "if", "reqs_mismatch_flag", ":", "continue", "# test next argset", "# Check position_dependent optional arguments", "pos_dep_arg_types", "=", "arg_types", "[", "len", "(", "sig_req_args", ")", ":", "]", "log", ".", "debug", "(", "f\"Optional arg types {pos_dep_arg_types}\"", ")", "log", ".", "debug", "(", "f\"{sig_argset_idx} Pos: {sig_pos_args}\"", ")", "pos_mismatch_flag", "=", "False", "for", "sig_pos_idx", ",", "sig_pos", "in", "enumerate", "(", "sig_pos_args", ")", ":", "if", "sig_pos_idx", "==", "len", "(", "pos_dep_arg_types", ")", ":", "break", "# stop checking position dependent arguments when we run out of them vs signature optional position dependent arguments", "if", "pos_dep_arg_types", "[", "sig_pos_idx", "]", "[", "0", "]", "not", "in", "sig_pos", ":", "pos_mismatch_flag", "=", "True", "msg", "=", "f\"Missing position_dependent arguments for {function_name} signature: {sig_argset_idx}\"", "messages", ".", "append", "(", "msg", ")", "log", ".", "debug", "(", "msg", ")", "break", "if", "pos_mismatch_flag", ":", "continue", "# test next argset", "reqpos_arglen", "=", "len", "(", "sig_req_args", ")", "+", "len", "(", "sig_pos_args", ")", "optional_arg_types", "=", "arg_types", "[", "reqpos_arglen", ":", "]", "# Remove function args that are found in the mult_args signature", "optional_types", "=", "[", "(", "opt_type", ",", "opt_val", ")", "for", "opt_type", ",", "opt_val", "in", "optional_arg_types", "if", "opt_type", "not", "in", "sig_mult_args", "]", "log", ".", "debug", "(", "f\"Optional types after sig mult args removed {optional_types}\"", ")", "# Check if any remaining function args are duplicated and therefore not unique opt_args", "if", "len", "(", "optional_types", ")", "!=", "len", "(", "set", "(", "optional_types", ")", ")", ":", "msg", "=", "f\"Duplicate optional arguments {optional_types} for {function_name} signature: {sig_argset_idx}\"", "messages", ".", "append", "(", "msg", ")", "log", ".", "debug", "(", "msg", ")", "continue", "optional_types", "=", "[", "(", "opt_type", ",", "opt_val", ")", "for", "opt_type", ",", "opt_val", "in", "optional_types", "if", "opt_type", "not", "in", "sig_opt_args", "]", "if", "len", "(", "optional_types", ")", ">", "0", ":", "msg", "=", "f\"Invalid arguments {optional_types} for {function_name} signature: {sig_argset_idx}\"", "messages", ".", "append", "(", "msg", ")", "log", ".", "debug", "(", "msg", ")", "continue", "matched_signature_idx", "=", "sig_argset_idx", "messages", "=", "[", "]", "# reset messages if signature is matched", "valid_function", "=", "True", "break", "# Add NSArg and StrArg value types (e.g. Protein, Complex, ec)", "if", "matched_signature_idx", ">", "-", "1", ":", "# Shouldn't have single optional NSArg arguments - not currently checking for that", "log", ".", "debug", "(", "f'AST1, Sigs {signatures[matched_signature_idx][\"arguments\"]} Args: {args}'", ")", "for", "arg_idx", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "log", ".", "debug", "(", "f\"Arg type {arg.type}\"", ")", "for", "sig_idx", ",", "sig_arg", "in", "enumerate", "(", "signatures", "[", "matched_signature_idx", "]", "[", "\"arguments\"", "]", ")", ":", "if", "arg", ".", "type", "==", "\"Function\"", "or", "sig_arg", "[", "\"type\"", "]", "in", "[", "\"Function\"", ",", "\"Modifier\"", ",", "]", ":", "pass", "# Skip Function arguments", "elif", "sig_arg", ".", "get", "(", "\"position\"", ",", "None", ")", ":", "if", "sig_arg", "[", "\"position\"", "]", "==", "arg_idx", "+", "1", ":", "arg", ".", "add_value_types", "(", "sig_arg", "[", "\"values\"", "]", ")", "log", ".", "debug", "(", "f'AST2 {arg} {sig_arg[\"values\"]}'", ")", "elif", "arg", ".", "type", "in", "[", "\"NSArg\"", ",", "\"StrArg\"", ",", "\"StrArgNSArg\"", "]", ":", "log", ".", "debug", "(", "f\"Arg {arg}\"", ")", "arg", ".", "add_value_types", "(", "sig_arg", "[", "\"values\"", "]", ")", "log", ".", "debug", "(", "f'AST2 {arg} {sig_arg[\"values\"]}'", ")", "for", "arg", "in", "args", ":", "if", "arg", ".", "__class__", ".", "__name__", "in", "[", "\"NSArg\"", ",", "\"StrArg\"", "]", ":", "log", ".", "debug", "(", "f\"Arg: {arg.to_string()} Value_types: {arg.value_types}\"", ")", "return", "(", "valid_function", ",", "messages", ")" ]
Check function args - return message if function args don't match function signature Called from validate_functions We have following types of arguments to validate: 1. Required, position_dependent arguments, e.g. p(HGNC:AKT1), NSArg HGNC:AKT1 is required and must be first argument 2. Optional, position_dependent arguments, e.g. pmod(P, T, 308) - T and 308 are optional and position_dependent 3. Optional, e.g. loc() modifier can only be found once, but anywhere after the position_dependent arguments 4. Multiple, e.g. var(), can have more than one var() modifier in p() function Args: args (Union['Function', 'NSArg', 'StrArg']): AST Function arguments signatures (Mapping[str, Any]): function signatures from spec_dict, may be more than one per function function_name (str): passed in to improve error messaging Returns: Tuple[bool, List[str]]: (function_valid?, list of error messages per signature)
[ "Check", "function", "args", "-", "return", "message", "if", "function", "args", "don", "t", "match", "function", "signature" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L91-L236
belbio/bel
bel/lang/semantics.py
validate_arg_values
def validate_arg_values(ast, bo): """Recursively validate arg (NSArg and StrArg) values Check that NSArgs are found in BELbio API and match appropriate entity_type. Check that StrArgs match their value - either default namespace or regex string Generate a WARNING if not. Args: bo: bel object Returns: bel object """ if not bo.api_url: log.info("No API endpoint defined") return bo log.debug(f"AST: {ast}") # Test NSArg terms if isinstance(ast, NSArg): term_id = "{}:{}".format(ast.namespace, ast.value) value_types = ast.value_types log.debug(f"Value types: {value_types} AST value: {ast.value}") # Default namespaces are defined in the bel_specification file if ast.namespace == "DEFAULT": # may use the DEFAULT namespace or not for value_type in value_types: default_namespace = [ ns["name"] for ns in bo.spec["namespaces"][value_type]["info"] ] + [ ns["abbreviation"] for ns in bo.spec["namespaces"][value_type]["info"] ] if ast.value in default_namespace: log.debug("Default namespace valid term: {}".format(term_id)) break else: # if for loop doesn't hit the break, run this else log.debug("Default namespace invalid term: {}".format(term_id)) bo.validation_messages.append( ("WARNING", f"Default Term: {term_id} not found") ) # Process normal, non-default-namespace terms else: request_url = bo.api_url + "/terms/{}".format( url_path_param_quoting(term_id) ) log.info(f"Validate Arg Values url {request_url}") r = get_url(request_url) if r and r.status_code == 200: result = r.json() # function signature term value_types doesn't match up with API term entity_types log.debug( f'AST.value_types {ast.value_types} Entity types {result.get("entity_types", [])}' ) # Check that entity types match if ( len( set(ast.value_types).intersection( result.get("entity_types", []) ) ) == 0 ): log.debug( "Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format( term_id, ast.value_types, result.get("entity_types", []) ) ) bo.validation_messages.append( ( "WARNING", "Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format( term_id, ast.value_types, result.get("entity_types", []) ), ) ) if term_id in result.get("obsolete_ids", []): bo.validation_messages.append( ( "WARNING", f'Obsolete term: {term_id} Current term: {result["id"]}', ) ) elif r.status_code == 404: bo.validation_messages.append( ("WARNING", f"Term: {term_id} not found in namespace") ) else: log.error(f"Status {r.status_code} - Bad URL: {request_url}") # Process StrArgs if isinstance(ast, StrArg): log.debug(f" Check String Arg: {ast.value} {ast.value_types}") for value_type in ast.value_types: # Is this a regex to match against if re.match("/", value_type): value_type = re.sub("^/", "", value_type) value_type = re.sub("/$", "", value_type) match = re.match(value_type, ast.value) if match: break if value_type in bo.spec["namespaces"]: default_namespace = [ ns["name"] for ns in bo.spec["namespaces"][value_type]["info"] ] + [ ns["abbreviation"] for ns in bo.spec["namespaces"][value_type]["info"] ] if ast.value in default_namespace: break else: # If for loop doesn't hit the break, no matches found, therefore for StrArg value is bad bo.validation_messages.append( ( "WARNING", f"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}", ) ) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: validate_arg_values(arg, bo) return bo
python
def validate_arg_values(ast, bo): """Recursively validate arg (NSArg and StrArg) values Check that NSArgs are found in BELbio API and match appropriate entity_type. Check that StrArgs match their value - either default namespace or regex string Generate a WARNING if not. Args: bo: bel object Returns: bel object """ if not bo.api_url: log.info("No API endpoint defined") return bo log.debug(f"AST: {ast}") # Test NSArg terms if isinstance(ast, NSArg): term_id = "{}:{}".format(ast.namespace, ast.value) value_types = ast.value_types log.debug(f"Value types: {value_types} AST value: {ast.value}") # Default namespaces are defined in the bel_specification file if ast.namespace == "DEFAULT": # may use the DEFAULT namespace or not for value_type in value_types: default_namespace = [ ns["name"] for ns in bo.spec["namespaces"][value_type]["info"] ] + [ ns["abbreviation"] for ns in bo.spec["namespaces"][value_type]["info"] ] if ast.value in default_namespace: log.debug("Default namespace valid term: {}".format(term_id)) break else: # if for loop doesn't hit the break, run this else log.debug("Default namespace invalid term: {}".format(term_id)) bo.validation_messages.append( ("WARNING", f"Default Term: {term_id} not found") ) # Process normal, non-default-namespace terms else: request_url = bo.api_url + "/terms/{}".format( url_path_param_quoting(term_id) ) log.info(f"Validate Arg Values url {request_url}") r = get_url(request_url) if r and r.status_code == 200: result = r.json() # function signature term value_types doesn't match up with API term entity_types log.debug( f'AST.value_types {ast.value_types} Entity types {result.get("entity_types", [])}' ) # Check that entity types match if ( len( set(ast.value_types).intersection( result.get("entity_types", []) ) ) == 0 ): log.debug( "Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format( term_id, ast.value_types, result.get("entity_types", []) ) ) bo.validation_messages.append( ( "WARNING", "Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format( term_id, ast.value_types, result.get("entity_types", []) ), ) ) if term_id in result.get("obsolete_ids", []): bo.validation_messages.append( ( "WARNING", f'Obsolete term: {term_id} Current term: {result["id"]}', ) ) elif r.status_code == 404: bo.validation_messages.append( ("WARNING", f"Term: {term_id} not found in namespace") ) else: log.error(f"Status {r.status_code} - Bad URL: {request_url}") # Process StrArgs if isinstance(ast, StrArg): log.debug(f" Check String Arg: {ast.value} {ast.value_types}") for value_type in ast.value_types: # Is this a regex to match against if re.match("/", value_type): value_type = re.sub("^/", "", value_type) value_type = re.sub("/$", "", value_type) match = re.match(value_type, ast.value) if match: break if value_type in bo.spec["namespaces"]: default_namespace = [ ns["name"] for ns in bo.spec["namespaces"][value_type]["info"] ] + [ ns["abbreviation"] for ns in bo.spec["namespaces"][value_type]["info"] ] if ast.value in default_namespace: break else: # If for loop doesn't hit the break, no matches found, therefore for StrArg value is bad bo.validation_messages.append( ( "WARNING", f"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}", ) ) # Recursively process every NSArg by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: validate_arg_values(arg, bo) return bo
[ "def", "validate_arg_values", "(", "ast", ",", "bo", ")", ":", "if", "not", "bo", ".", "api_url", ":", "log", ".", "info", "(", "\"No API endpoint defined\"", ")", "return", "bo", "log", ".", "debug", "(", "f\"AST: {ast}\"", ")", "# Test NSArg terms", "if", "isinstance", "(", "ast", ",", "NSArg", ")", ":", "term_id", "=", "\"{}:{}\"", ".", "format", "(", "ast", ".", "namespace", ",", "ast", ".", "value", ")", "value_types", "=", "ast", ".", "value_types", "log", ".", "debug", "(", "f\"Value types: {value_types} AST value: {ast.value}\"", ")", "# Default namespaces are defined in the bel_specification file", "if", "ast", ".", "namespace", "==", "\"DEFAULT\"", ":", "# may use the DEFAULT namespace or not", "for", "value_type", "in", "value_types", ":", "default_namespace", "=", "[", "ns", "[", "\"name\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "+", "[", "ns", "[", "\"abbreviation\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "if", "ast", ".", "value", "in", "default_namespace", ":", "log", ".", "debug", "(", "\"Default namespace valid term: {}\"", ".", "format", "(", "term_id", ")", ")", "break", "else", ":", "# if for loop doesn't hit the break, run this else", "log", ".", "debug", "(", "\"Default namespace invalid term: {}\"", ".", "format", "(", "term_id", ")", ")", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f\"Default Term: {term_id} not found\"", ")", ")", "# Process normal, non-default-namespace terms", "else", ":", "request_url", "=", "bo", ".", "api_url", "+", "\"/terms/{}\"", ".", "format", "(", "url_path_param_quoting", "(", "term_id", ")", ")", "log", ".", "info", "(", "f\"Validate Arg Values url {request_url}\"", ")", "r", "=", "get_url", "(", "request_url", ")", "if", "r", "and", "r", ".", "status_code", "==", "200", ":", "result", "=", "r", ".", "json", "(", ")", "# function signature term value_types doesn't match up with API term entity_types", "log", ".", "debug", "(", "f'AST.value_types {ast.value_types} Entity types {result.get(\"entity_types\", [])}'", ")", "# Check that entity types match", "if", "(", "len", "(", "set", "(", "ast", ".", "value_types", ")", ".", "intersection", "(", "result", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ")", "==", "0", ")", ":", "log", ".", "debug", "(", "\"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}\"", ".", "format", "(", "term_id", ",", "ast", ".", "value_types", ",", "result", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ")", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "\"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}\"", ".", "format", "(", "term_id", ",", "ast", ".", "value_types", ",", "result", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ",", ")", ")", "if", "term_id", "in", "result", ".", "get", "(", "\"obsolete_ids\"", ",", "[", "]", ")", ":", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f'Obsolete term: {term_id} Current term: {result[\"id\"]}'", ",", ")", ")", "elif", "r", ".", "status_code", "==", "404", ":", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f\"Term: {term_id} not found in namespace\"", ")", ")", "else", ":", "log", ".", "error", "(", "f\"Status {r.status_code} - Bad URL: {request_url}\"", ")", "# Process StrArgs", "if", "isinstance", "(", "ast", ",", "StrArg", ")", ":", "log", ".", "debug", "(", "f\" Check String Arg: {ast.value} {ast.value_types}\"", ")", "for", "value_type", "in", "ast", ".", "value_types", ":", "# Is this a regex to match against", "if", "re", ".", "match", "(", "\"/\"", ",", "value_type", ")", ":", "value_type", "=", "re", ".", "sub", "(", "\"^/\"", ",", "\"\"", ",", "value_type", ")", "value_type", "=", "re", ".", "sub", "(", "\"/$\"", ",", "\"\"", ",", "value_type", ")", "match", "=", "re", ".", "match", "(", "value_type", ",", "ast", ".", "value", ")", "if", "match", ":", "break", "if", "value_type", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", ":", "default_namespace", "=", "[", "ns", "[", "\"name\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "+", "[", "ns", "[", "\"abbreviation\"", "]", "for", "ns", "in", "bo", ".", "spec", "[", "\"namespaces\"", "]", "[", "value_type", "]", "[", "\"info\"", "]", "]", "if", "ast", ".", "value", "in", "default_namespace", ":", "break", "else", ":", "# If for loop doesn't hit the break, no matches found, therefore for StrArg value is bad", "bo", ".", "validation_messages", ".", "append", "(", "(", "\"WARNING\"", ",", "f\"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}\"", ",", ")", ")", "# Recursively process every NSArg by processing BELAst and Functions", "if", "hasattr", "(", "ast", ",", "\"args\"", ")", ":", "for", "arg", "in", "ast", ".", "args", ":", "validate_arg_values", "(", "arg", ",", "bo", ")", "return", "bo" ]
Recursively validate arg (NSArg and StrArg) values Check that NSArgs are found in BELbio API and match appropriate entity_type. Check that StrArgs match their value - either default namespace or regex string Generate a WARNING if not. Args: bo: bel object Returns: bel object
[ "Recursively", "validate", "arg", "(", "NSArg", "and", "StrArg", ")", "values" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/semantics.py#L239-L370
belbio/bel
bel/Config.py
get_belbio_conf_files
def get_belbio_conf_files(): """Get belbio configuration from files """ home = os.path.expanduser("~") cwd = os.getcwd() belbio_conf_fp, belbio_secrets_fp = "", "" env_conf_dir = os.getenv("BELBIO_CONF", "").rstrip("/") conf_paths = [ f"{cwd}/belbio_conf.yaml", f"{cwd}/belbio_conf.yml", f"{env_conf_dir}/belbio_conf.yaml", f"{env_conf_dir}/belbio_conf.yml", f"{home}/.belbio/conf", ] secret_paths = [ f"{cwd}/belbio_secrets.yaml", f"{cwd}/belbio_secrets.yml", f"{env_conf_dir}/belbio_secrets.yaml", f"{env_conf_dir}/belbio_secrets.yml", f"{home}/.belbio/secrets", ] for fn in conf_paths: if os.path.exists(fn): belbio_conf_fp = fn break else: log.error( "No BELBio configuration file found - please add one (see http://bel.readthedocs.io/en/latest/configuration.html)" ) for fn in secret_paths: if os.path.exists(fn): belbio_secrets_fp = fn break return (belbio_conf_fp, belbio_secrets_fp)
python
def get_belbio_conf_files(): """Get belbio configuration from files """ home = os.path.expanduser("~") cwd = os.getcwd() belbio_conf_fp, belbio_secrets_fp = "", "" env_conf_dir = os.getenv("BELBIO_CONF", "").rstrip("/") conf_paths = [ f"{cwd}/belbio_conf.yaml", f"{cwd}/belbio_conf.yml", f"{env_conf_dir}/belbio_conf.yaml", f"{env_conf_dir}/belbio_conf.yml", f"{home}/.belbio/conf", ] secret_paths = [ f"{cwd}/belbio_secrets.yaml", f"{cwd}/belbio_secrets.yml", f"{env_conf_dir}/belbio_secrets.yaml", f"{env_conf_dir}/belbio_secrets.yml", f"{home}/.belbio/secrets", ] for fn in conf_paths: if os.path.exists(fn): belbio_conf_fp = fn break else: log.error( "No BELBio configuration file found - please add one (see http://bel.readthedocs.io/en/latest/configuration.html)" ) for fn in secret_paths: if os.path.exists(fn): belbio_secrets_fp = fn break return (belbio_conf_fp, belbio_secrets_fp)
[ "def", "get_belbio_conf_files", "(", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "cwd", "=", "os", ".", "getcwd", "(", ")", "belbio_conf_fp", ",", "belbio_secrets_fp", "=", "\"\"", ",", "\"\"", "env_conf_dir", "=", "os", ".", "getenv", "(", "\"BELBIO_CONF\"", ",", "\"\"", ")", ".", "rstrip", "(", "\"/\"", ")", "conf_paths", "=", "[", "f\"{cwd}/belbio_conf.yaml\"", ",", "f\"{cwd}/belbio_conf.yml\"", ",", "f\"{env_conf_dir}/belbio_conf.yaml\"", ",", "f\"{env_conf_dir}/belbio_conf.yml\"", ",", "f\"{home}/.belbio/conf\"", ",", "]", "secret_paths", "=", "[", "f\"{cwd}/belbio_secrets.yaml\"", ",", "f\"{cwd}/belbio_secrets.yml\"", ",", "f\"{env_conf_dir}/belbio_secrets.yaml\"", ",", "f\"{env_conf_dir}/belbio_secrets.yml\"", ",", "f\"{home}/.belbio/secrets\"", ",", "]", "for", "fn", "in", "conf_paths", ":", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "belbio_conf_fp", "=", "fn", "break", "else", ":", "log", ".", "error", "(", "\"No BELBio configuration file found - please add one (see http://bel.readthedocs.io/en/latest/configuration.html)\"", ")", "for", "fn", "in", "secret_paths", ":", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "belbio_secrets_fp", "=", "fn", "break", "return", "(", "belbio_conf_fp", ",", "belbio_secrets_fp", ")" ]
Get belbio configuration from files
[ "Get", "belbio", "configuration", "from", "files" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L26-L66
belbio/bel
bel/Config.py
load_configuration
def load_configuration(): """Load the configuration""" (belbio_conf_fp, belbio_secrets_fp) = get_belbio_conf_files() log.info(f"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} ") config = {} if belbio_conf_fp: with open(belbio_conf_fp, "r") as f: config = yaml.load(f, Loader=yaml.SafeLoader) config["source_files"] = {} config["source_files"]["conf"] = belbio_conf_fp if belbio_secrets_fp: with open(belbio_secrets_fp, "r") as f: secrets = yaml.load(f, Loader=yaml.SafeLoader) config["secrets"] = copy.deepcopy(secrets) if "source_files" in config: config["source_files"]["secrets"] = belbio_secrets_fp get_versions(config) # TODO - needs to be completed # add_environment_vars(config) return config
python
def load_configuration(): """Load the configuration""" (belbio_conf_fp, belbio_secrets_fp) = get_belbio_conf_files() log.info(f"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} ") config = {} if belbio_conf_fp: with open(belbio_conf_fp, "r") as f: config = yaml.load(f, Loader=yaml.SafeLoader) config["source_files"] = {} config["source_files"]["conf"] = belbio_conf_fp if belbio_secrets_fp: with open(belbio_secrets_fp, "r") as f: secrets = yaml.load(f, Loader=yaml.SafeLoader) config["secrets"] = copy.deepcopy(secrets) if "source_files" in config: config["source_files"]["secrets"] = belbio_secrets_fp get_versions(config) # TODO - needs to be completed # add_environment_vars(config) return config
[ "def", "load_configuration", "(", ")", ":", "(", "belbio_conf_fp", ",", "belbio_secrets_fp", ")", "=", "get_belbio_conf_files", "(", ")", "log", ".", "info", "(", "f\"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} \"", ")", "config", "=", "{", "}", "if", "belbio_conf_fp", ":", "with", "open", "(", "belbio_conf_fp", ",", "\"r\"", ")", "as", "f", ":", "config", "=", "yaml", ".", "load", "(", "f", ",", "Loader", "=", "yaml", ".", "SafeLoader", ")", "config", "[", "\"source_files\"", "]", "=", "{", "}", "config", "[", "\"source_files\"", "]", "[", "\"conf\"", "]", "=", "belbio_conf_fp", "if", "belbio_secrets_fp", ":", "with", "open", "(", "belbio_secrets_fp", ",", "\"r\"", ")", "as", "f", ":", "secrets", "=", "yaml", ".", "load", "(", "f", ",", "Loader", "=", "yaml", ".", "SafeLoader", ")", "config", "[", "\"secrets\"", "]", "=", "copy", ".", "deepcopy", "(", "secrets", ")", "if", "\"source_files\"", "in", "config", ":", "config", "[", "\"source_files\"", "]", "[", "\"secrets\"", "]", "=", "belbio_secrets_fp", "get_versions", "(", "config", ")", "# TODO - needs to be completed", "# add_environment_vars(config)", "return", "config" ]
Load the configuration
[ "Load", "the", "configuration" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L69-L94
belbio/bel
bel/Config.py
get_versions
def get_versions(config) -> dict: """Get versions of bel modules and tools""" # Collect bel package version try: import bel.__version__ config["bel"]["version"] = bel.__version__.__version__ except KeyError: config["bel"] = {"version": bel.__version__.__version__} except ModuleNotFoundError: pass # Collect bel_resources version try: import tools.__version__ config["bel_resources"]["version"] = tools.__version__.__version__ except KeyError: config["bel_resources"] = {"version": tools.__version__.__version__} except ModuleNotFoundError: pass # Collect bel_api version try: import __version__ if __version__.__name__ == "BELBIO API": config["bel_api"]["version"] = __version__.__version__ except KeyError: if __version__.__name__ == "BELBIO API": config["bel_api"] = {"version": __version__.__version__} except ModuleNotFoundError: pass
python
def get_versions(config) -> dict: """Get versions of bel modules and tools""" # Collect bel package version try: import bel.__version__ config["bel"]["version"] = bel.__version__.__version__ except KeyError: config["bel"] = {"version": bel.__version__.__version__} except ModuleNotFoundError: pass # Collect bel_resources version try: import tools.__version__ config["bel_resources"]["version"] = tools.__version__.__version__ except KeyError: config["bel_resources"] = {"version": tools.__version__.__version__} except ModuleNotFoundError: pass # Collect bel_api version try: import __version__ if __version__.__name__ == "BELBIO API": config["bel_api"]["version"] = __version__.__version__ except KeyError: if __version__.__name__ == "BELBIO API": config["bel_api"] = {"version": __version__.__version__} except ModuleNotFoundError: pass
[ "def", "get_versions", "(", "config", ")", "->", "dict", ":", "# Collect bel package version", "try", ":", "import", "bel", ".", "__version__", "config", "[", "\"bel\"", "]", "[", "\"version\"", "]", "=", "bel", ".", "__version__", ".", "__version__", "except", "KeyError", ":", "config", "[", "\"bel\"", "]", "=", "{", "\"version\"", ":", "bel", ".", "__version__", ".", "__version__", "}", "except", "ModuleNotFoundError", ":", "pass", "# Collect bel_resources version", "try", ":", "import", "tools", ".", "__version__", "config", "[", "\"bel_resources\"", "]", "[", "\"version\"", "]", "=", "tools", ".", "__version__", ".", "__version__", "except", "KeyError", ":", "config", "[", "\"bel_resources\"", "]", "=", "{", "\"version\"", ":", "tools", ".", "__version__", ".", "__version__", "}", "except", "ModuleNotFoundError", ":", "pass", "# Collect bel_api version", "try", ":", "import", "__version__", "if", "__version__", ".", "__name__", "==", "\"BELBIO API\"", ":", "config", "[", "\"bel_api\"", "]", "[", "\"version\"", "]", "=", "__version__", ".", "__version__", "except", "KeyError", ":", "if", "__version__", ".", "__name__", "==", "\"BELBIO API\"", ":", "config", "[", "\"bel_api\"", "]", "=", "{", "\"version\"", ":", "__version__", ".", "__version__", "}", "except", "ModuleNotFoundError", ":", "pass" ]
Get versions of bel modules and tools
[ "Get", "versions", "of", "bel", "modules", "and", "tools" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L97-L130
belbio/bel
bel/Config.py
add_environment_vars
def add_environment_vars(config: MutableMapping[str, Any]): """Override config with environment variables Environment variables have to be prefixed with BELBIO_ which will be stripped before splitting on '__' and lower-casing the environment variable name that is left into keys for the config dictionary. Example: BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio 1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL 2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url 3. bel_api__servers__api_url ==> [bel_api, servers, api_url] 4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio """ # TODO need to redo config - can't add value to dictionary without recursively building up the dict # check into config libraries again for e in os.environ: if re.match("BELBIO_", e): val = os.environ.get(e) if val: e.replace("BELBIO_", "") env_keys = e.lower().split("__") if len(env_keys) > 1: joined = '"]["'.join(env_keys) eval_config = f'config["{joined}"] = val' try: eval(eval_config) except Exception as exc: log.warn("Cannot process {e} into config") else: config[env_keys[0]] = val
python
def add_environment_vars(config: MutableMapping[str, Any]): """Override config with environment variables Environment variables have to be prefixed with BELBIO_ which will be stripped before splitting on '__' and lower-casing the environment variable name that is left into keys for the config dictionary. Example: BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio 1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL 2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url 3. bel_api__servers__api_url ==> [bel_api, servers, api_url] 4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio """ # TODO need to redo config - can't add value to dictionary without recursively building up the dict # check into config libraries again for e in os.environ: if re.match("BELBIO_", e): val = os.environ.get(e) if val: e.replace("BELBIO_", "") env_keys = e.lower().split("__") if len(env_keys) > 1: joined = '"]["'.join(env_keys) eval_config = f'config["{joined}"] = val' try: eval(eval_config) except Exception as exc: log.warn("Cannot process {e} into config") else: config[env_keys[0]] = val
[ "def", "add_environment_vars", "(", "config", ":", "MutableMapping", "[", "str", ",", "Any", "]", ")", ":", "# TODO need to redo config - can't add value to dictionary without recursively building up the dict", "# check into config libraries again", "for", "e", "in", "os", ".", "environ", ":", "if", "re", ".", "match", "(", "\"BELBIO_\"", ",", "e", ")", ":", "val", "=", "os", ".", "environ", ".", "get", "(", "e", ")", "if", "val", ":", "e", ".", "replace", "(", "\"BELBIO_\"", ",", "\"\"", ")", "env_keys", "=", "e", ".", "lower", "(", ")", ".", "split", "(", "\"__\"", ")", "if", "len", "(", "env_keys", ")", ">", "1", ":", "joined", "=", "'\"][\"'", ".", "join", "(", "env_keys", ")", "eval_config", "=", "f'config[\"{joined}\"] = val'", "try", ":", "eval", "(", "eval_config", ")", "except", "Exception", "as", "exc", ":", "log", ".", "warn", "(", "\"Cannot process {e} into config\"", ")", "else", ":", "config", "[", "env_keys", "[", "0", "]", "]", "=", "val" ]
Override config with environment variables Environment variables have to be prefixed with BELBIO_ which will be stripped before splitting on '__' and lower-casing the environment variable name that is left into keys for the config dictionary. Example: BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio 1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL 2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url 3. bel_api__servers__api_url ==> [bel_api, servers, api_url] 4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio
[ "Override", "config", "with", "environment", "variables" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L134-L168
belbio/bel
bel/Config.py
merge_config
def merge_config( config: Mapping[str, Any], override_config: Mapping[str, Any] = None, override_config_fn: str = None, ) -> Mapping[str, Any]: """Override config with additional configuration in override_config or override_config_fn Used in script to merge CLI options with Config Args: config: original configuration override_config: new configuration to override/extend current config override_config_fn: new configuration filename as YAML file """ if override_config_fn: with open(override_config_fn, "r") as f: override_config = yaml.load(f, Loader=yaml.SafeLoader) if not override_config: log.info("Missing override_config") return functools.reduce(rec_merge, (config, override_config))
python
def merge_config( config: Mapping[str, Any], override_config: Mapping[str, Any] = None, override_config_fn: str = None, ) -> Mapping[str, Any]: """Override config with additional configuration in override_config or override_config_fn Used in script to merge CLI options with Config Args: config: original configuration override_config: new configuration to override/extend current config override_config_fn: new configuration filename as YAML file """ if override_config_fn: with open(override_config_fn, "r") as f: override_config = yaml.load(f, Loader=yaml.SafeLoader) if not override_config: log.info("Missing override_config") return functools.reduce(rec_merge, (config, override_config))
[ "def", "merge_config", "(", "config", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "override_config", ":", "Mapping", "[", "str", ",", "Any", "]", "=", "None", ",", "override_config_fn", ":", "str", "=", "None", ",", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "if", "override_config_fn", ":", "with", "open", "(", "override_config_fn", ",", "\"r\"", ")", "as", "f", ":", "override_config", "=", "yaml", ".", "load", "(", "f", ",", "Loader", "=", "yaml", ".", "SafeLoader", ")", "if", "not", "override_config", ":", "log", ".", "info", "(", "\"Missing override_config\"", ")", "return", "functools", ".", "reduce", "(", "rec_merge", ",", "(", "config", ",", "override_config", ")", ")" ]
Override config with additional configuration in override_config or override_config_fn Used in script to merge CLI options with Config Args: config: original configuration override_config: new configuration to override/extend current config override_config_fn: new configuration filename as YAML file
[ "Override", "config", "with", "additional", "configuration", "in", "override_config", "or", "override_config_fn" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L171-L193
belbio/bel
bel/Config.py
rec_merge
def rec_merge(d1, d2): """ Recursively merge two dictionaries Update two dicts of dicts recursively, if either mapping has leaves that are non-dicts, the second's leaf overwrites the first's. import collections import functools e.g. functools.reduce(rec_merge, (d1, d2, d3, d4)) """ for k, v in d1.items(): if k in d2: # this next check is the only difference! if all(isinstance(e, collections.MutableMapping) for e in (v, d2[k])): d2[k] = rec_merge(v, d2[k]) # we could further check types and merge as appropriate here. d3 = d1.copy() d3.update(d2) return d3
python
def rec_merge(d1, d2): """ Recursively merge two dictionaries Update two dicts of dicts recursively, if either mapping has leaves that are non-dicts, the second's leaf overwrites the first's. import collections import functools e.g. functools.reduce(rec_merge, (d1, d2, d3, d4)) """ for k, v in d1.items(): if k in d2: # this next check is the only difference! if all(isinstance(e, collections.MutableMapping) for e in (v, d2[k])): d2[k] = rec_merge(v, d2[k]) # we could further check types and merge as appropriate here. d3 = d1.copy() d3.update(d2) return d3
[ "def", "rec_merge", "(", "d1", ",", "d2", ")", ":", "for", "k", ",", "v", "in", "d1", ".", "items", "(", ")", ":", "if", "k", "in", "d2", ":", "# this next check is the only difference!", "if", "all", "(", "isinstance", "(", "e", ",", "collections", ".", "MutableMapping", ")", "for", "e", "in", "(", "v", ",", "d2", "[", "k", "]", ")", ")", ":", "d2", "[", "k", "]", "=", "rec_merge", "(", "v", ",", "d2", "[", "k", "]", ")", "# we could further check types and merge as appropriate here.", "d3", "=", "d1", ".", "copy", "(", ")", "d3", ".", "update", "(", "d2", ")", "return", "d3" ]
Recursively merge two dictionaries Update two dicts of dicts recursively, if either mapping has leaves that are non-dicts, the second's leaf overwrites the first's. import collections import functools e.g. functools.reduce(rec_merge, (d1, d2, d3, d4))
[ "Recursively", "merge", "two", "dictionaries" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/Config.py#L197-L218
belbio/bel
bel/resources/namespace.py
load_terms
def load_terms(fo: IO, metadata: dict, forceupdate: bool): """Load terms into Elasticsearch and ArangoDB Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: fo: file obj - terminology file metadata: dict containing the metadata for terminology forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches """ version = metadata["metadata"]["version"] # LOAD TERMS INTO Elasticsearch with timy.Timer("Load Terms") as timer: es = bel.db.elasticsearch.get_client() es_version = version.replace("T", "").replace("-", "").replace(":", "") index_prefix = f"terms_{metadata['metadata']['namespace'].lower()}" index_name = f"{index_prefix}_{es_version}" # Create index with mapping if not elasticsearch.index_exists(es, index_name): elasticsearch.create_terms_index(es, index_name) elif forceupdate: # force an update to the index index_name += "_alt" elasticsearch.create_terms_index(es, index_name) else: return # Skip loading if not forced and not a new namespace terms_iterator = terms_iterator_for_elasticsearch(fo, index_name) elasticsearch.bulk_load_docs(es, terms_iterator) # Remove old namespace index index_names = elasticsearch.get_all_index_names(es) for name in index_names: if name != index_name and index_prefix in name: elasticsearch.delete_index(es, name) # Add terms_alias to this index elasticsearch.add_index_alias(es, index_name, terms_alias) log.info( "Load namespace terms", elapsed=timer.elapsed, namespace=metadata["metadata"]["namespace"], ) # LOAD EQUIVALENCES INTO ArangoDB with timy.Timer("Load Term Equivalences") as timer: arango_client = arangodb.get_client() belns_db = arangodb.get_belns_handle(arango_client) arangodb.batch_load_docs( belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate="update" ) log.info( "Loaded namespace equivalences", elapsed=timer.elapsed, namespace=metadata["metadata"]["namespace"], ) # Clean up old entries remove_old_equivalence_edges = f""" FOR edge in equivalence_edges FILTER edge.source == "{metadata["metadata"]["namespace"]}" FILTER edge.version != "{version}" REMOVE edge IN equivalence_edges """ remove_old_equivalence_nodes = f""" FOR node in equivalence_nodes FILTER node.source == "{metadata["metadata"]["namespace"]}" FILTER node.version != "{version}" REMOVE node IN equivalence_nodes """ arangodb.aql_query(belns_db, remove_old_equivalence_edges) arangodb.aql_query(belns_db, remove_old_equivalence_nodes) # Add metadata to resource metadata collection metadata["_key"] = f"Namespace_{metadata['metadata']['namespace']}" try: belns_db.collection(arangodb.belns_metadata_name).insert(metadata) except ArangoError as ae: belns_db.collection(arangodb.belns_metadata_name).replace(metadata)
python
def load_terms(fo: IO, metadata: dict, forceupdate: bool): """Load terms into Elasticsearch and ArangoDB Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: fo: file obj - terminology file metadata: dict containing the metadata for terminology forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches """ version = metadata["metadata"]["version"] # LOAD TERMS INTO Elasticsearch with timy.Timer("Load Terms") as timer: es = bel.db.elasticsearch.get_client() es_version = version.replace("T", "").replace("-", "").replace(":", "") index_prefix = f"terms_{metadata['metadata']['namespace'].lower()}" index_name = f"{index_prefix}_{es_version}" # Create index with mapping if not elasticsearch.index_exists(es, index_name): elasticsearch.create_terms_index(es, index_name) elif forceupdate: # force an update to the index index_name += "_alt" elasticsearch.create_terms_index(es, index_name) else: return # Skip loading if not forced and not a new namespace terms_iterator = terms_iterator_for_elasticsearch(fo, index_name) elasticsearch.bulk_load_docs(es, terms_iterator) # Remove old namespace index index_names = elasticsearch.get_all_index_names(es) for name in index_names: if name != index_name and index_prefix in name: elasticsearch.delete_index(es, name) # Add terms_alias to this index elasticsearch.add_index_alias(es, index_name, terms_alias) log.info( "Load namespace terms", elapsed=timer.elapsed, namespace=metadata["metadata"]["namespace"], ) # LOAD EQUIVALENCES INTO ArangoDB with timy.Timer("Load Term Equivalences") as timer: arango_client = arangodb.get_client() belns_db = arangodb.get_belns_handle(arango_client) arangodb.batch_load_docs( belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate="update" ) log.info( "Loaded namespace equivalences", elapsed=timer.elapsed, namespace=metadata["metadata"]["namespace"], ) # Clean up old entries remove_old_equivalence_edges = f""" FOR edge in equivalence_edges FILTER edge.source == "{metadata["metadata"]["namespace"]}" FILTER edge.version != "{version}" REMOVE edge IN equivalence_edges """ remove_old_equivalence_nodes = f""" FOR node in equivalence_nodes FILTER node.source == "{metadata["metadata"]["namespace"]}" FILTER node.version != "{version}" REMOVE node IN equivalence_nodes """ arangodb.aql_query(belns_db, remove_old_equivalence_edges) arangodb.aql_query(belns_db, remove_old_equivalence_nodes) # Add metadata to resource metadata collection metadata["_key"] = f"Namespace_{metadata['metadata']['namespace']}" try: belns_db.collection(arangodb.belns_metadata_name).insert(metadata) except ArangoError as ae: belns_db.collection(arangodb.belns_metadata_name).replace(metadata)
[ "def", "load_terms", "(", "fo", ":", "IO", ",", "metadata", ":", "dict", ",", "forceupdate", ":", "bool", ")", ":", "version", "=", "metadata", "[", "\"metadata\"", "]", "[", "\"version\"", "]", "# LOAD TERMS INTO Elasticsearch", "with", "timy", ".", "Timer", "(", "\"Load Terms\"", ")", "as", "timer", ":", "es", "=", "bel", ".", "db", ".", "elasticsearch", ".", "get_client", "(", ")", "es_version", "=", "version", ".", "replace", "(", "\"T\"", ",", "\"\"", ")", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", ".", "replace", "(", "\":\"", ",", "\"\"", ")", "index_prefix", "=", "f\"terms_{metadata['metadata']['namespace'].lower()}\"", "index_name", "=", "f\"{index_prefix}_{es_version}\"", "# Create index with mapping", "if", "not", "elasticsearch", ".", "index_exists", "(", "es", ",", "index_name", ")", ":", "elasticsearch", ".", "create_terms_index", "(", "es", ",", "index_name", ")", "elif", "forceupdate", ":", "# force an update to the index", "index_name", "+=", "\"_alt\"", "elasticsearch", ".", "create_terms_index", "(", "es", ",", "index_name", ")", "else", ":", "return", "# Skip loading if not forced and not a new namespace", "terms_iterator", "=", "terms_iterator_for_elasticsearch", "(", "fo", ",", "index_name", ")", "elasticsearch", ".", "bulk_load_docs", "(", "es", ",", "terms_iterator", ")", "# Remove old namespace index", "index_names", "=", "elasticsearch", ".", "get_all_index_names", "(", "es", ")", "for", "name", "in", "index_names", ":", "if", "name", "!=", "index_name", "and", "index_prefix", "in", "name", ":", "elasticsearch", ".", "delete_index", "(", "es", ",", "name", ")", "# Add terms_alias to this index", "elasticsearch", ".", "add_index_alias", "(", "es", ",", "index_name", ",", "terms_alias", ")", "log", ".", "info", "(", "\"Load namespace terms\"", ",", "elapsed", "=", "timer", ".", "elapsed", ",", "namespace", "=", "metadata", "[", "\"metadata\"", "]", "[", "\"namespace\"", "]", ",", ")", "# LOAD EQUIVALENCES INTO ArangoDB", "with", "timy", ".", "Timer", "(", "\"Load Term Equivalences\"", ")", "as", "timer", ":", "arango_client", "=", "arangodb", ".", "get_client", "(", ")", "belns_db", "=", "arangodb", ".", "get_belns_handle", "(", "arango_client", ")", "arangodb", ".", "batch_load_docs", "(", "belns_db", ",", "terms_iterator_for_arangodb", "(", "fo", ",", "version", ")", ",", "on_duplicate", "=", "\"update\"", ")", "log", ".", "info", "(", "\"Loaded namespace equivalences\"", ",", "elapsed", "=", "timer", ".", "elapsed", ",", "namespace", "=", "metadata", "[", "\"metadata\"", "]", "[", "\"namespace\"", "]", ",", ")", "# Clean up old entries", "remove_old_equivalence_edges", "=", "f\"\"\"\n FOR edge in equivalence_edges\n FILTER edge.source == \"{metadata[\"metadata\"][\"namespace\"]}\"\n FILTER edge.version != \"{version}\"\n REMOVE edge IN equivalence_edges\n \"\"\"", "remove_old_equivalence_nodes", "=", "f\"\"\"\n FOR node in equivalence_nodes\n FILTER node.source == \"{metadata[\"metadata\"][\"namespace\"]}\"\n FILTER node.version != \"{version}\"\n REMOVE node IN equivalence_nodes\n \"\"\"", "arangodb", ".", "aql_query", "(", "belns_db", ",", "remove_old_equivalence_edges", ")", "arangodb", ".", "aql_query", "(", "belns_db", ",", "remove_old_equivalence_nodes", ")", "# Add metadata to resource metadata collection", "metadata", "[", "\"_key\"", "]", "=", "f\"Namespace_{metadata['metadata']['namespace']}\"", "try", ":", "belns_db", ".", "collection", "(", "arangodb", ".", "belns_metadata_name", ")", ".", "insert", "(", "metadata", ")", "except", "ArangoError", "as", "ae", ":", "belns_db", ".", "collection", "(", "arangodb", ".", "belns_metadata_name", ")", ".", "replace", "(", "metadata", ")" ]
Load terms into Elasticsearch and ArangoDB Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: fo: file obj - terminology file metadata: dict containing the metadata for terminology forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
[ "Load", "terms", "into", "Elasticsearch", "and", "ArangoDB" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/namespace.py#L27-L112
belbio/bel
bel/resources/namespace.py
terms_iterator_for_elasticsearch
def terms_iterator_for_elasticsearch(fo: IO, index_name: str): """Add index_name to term documents for bulk load""" species_list = config["bel_resources"].get("species_list", []) fo.seek(0) # Seek back to beginning of file with gzip.open(fo, "rt") as f: for line in f: term = json.loads(line) # skip if not term record (e.g. is a metadata record) if "term" not in term: continue term = term["term"] # Filter species if enabled in config species_id = term.get("species_id", None) if species_list and species_id and species_id not in species_list: continue all_term_ids = set() for term_id in [term["id"]] + term.get("alt_ids", []): all_term_ids.add(term_id) all_term_ids.add(lowercase_term_id(term_id)) term["alt_ids"] = copy.copy(list(all_term_ids)) yield { "_op_type": "index", "_index": index_name, "_type": "term", "_id": term["id"], "_source": copy.deepcopy(term), }
python
def terms_iterator_for_elasticsearch(fo: IO, index_name: str): """Add index_name to term documents for bulk load""" species_list = config["bel_resources"].get("species_list", []) fo.seek(0) # Seek back to beginning of file with gzip.open(fo, "rt") as f: for line in f: term = json.loads(line) # skip if not term record (e.g. is a metadata record) if "term" not in term: continue term = term["term"] # Filter species if enabled in config species_id = term.get("species_id", None) if species_list and species_id and species_id not in species_list: continue all_term_ids = set() for term_id in [term["id"]] + term.get("alt_ids", []): all_term_ids.add(term_id) all_term_ids.add(lowercase_term_id(term_id)) term["alt_ids"] = copy.copy(list(all_term_ids)) yield { "_op_type": "index", "_index": index_name, "_type": "term", "_id": term["id"], "_source": copy.deepcopy(term), }
[ "def", "terms_iterator_for_elasticsearch", "(", "fo", ":", "IO", ",", "index_name", ":", "str", ")", ":", "species_list", "=", "config", "[", "\"bel_resources\"", "]", ".", "get", "(", "\"species_list\"", ",", "[", "]", ")", "fo", ".", "seek", "(", "0", ")", "# Seek back to beginning of file", "with", "gzip", ".", "open", "(", "fo", ",", "\"rt\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "term", "=", "json", ".", "loads", "(", "line", ")", "# skip if not term record (e.g. is a metadata record)", "if", "\"term\"", "not", "in", "term", ":", "continue", "term", "=", "term", "[", "\"term\"", "]", "# Filter species if enabled in config", "species_id", "=", "term", ".", "get", "(", "\"species_id\"", ",", "None", ")", "if", "species_list", "and", "species_id", "and", "species_id", "not", "in", "species_list", ":", "continue", "all_term_ids", "=", "set", "(", ")", "for", "term_id", "in", "[", "term", "[", "\"id\"", "]", "]", "+", "term", ".", "get", "(", "\"alt_ids\"", ",", "[", "]", ")", ":", "all_term_ids", ".", "add", "(", "term_id", ")", "all_term_ids", ".", "add", "(", "lowercase_term_id", "(", "term_id", ")", ")", "term", "[", "\"alt_ids\"", "]", "=", "copy", ".", "copy", "(", "list", "(", "all_term_ids", ")", ")", "yield", "{", "\"_op_type\"", ":", "\"index\"", ",", "\"_index\"", ":", "index_name", ",", "\"_type\"", ":", "\"term\"", ",", "\"_id\"", ":", "term", "[", "\"id\"", "]", ",", "\"_source\"", ":", "copy", ".", "deepcopy", "(", "term", ")", ",", "}" ]
Add index_name to term documents for bulk load
[ "Add", "index_name", "to", "term", "documents", "for", "bulk", "load" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/namespace.py#L206-L238
belbio/bel
bel/resources/namespace.py
lowercase_term_id
def lowercase_term_id(term_id: str) -> str: """Lowercase the term value (not the namespace prefix) Args: term_id (str): term identifier with namespace prefix, e.g. MESH:Atherosclerosis Returns: str: lowercased, e.g. MESH:atherosclerosis """ (ns, val) = term_id.split(":", maxsplit=1) term_id = f"{ns}:{val.lower()}" return term_id
python
def lowercase_term_id(term_id: str) -> str: """Lowercase the term value (not the namespace prefix) Args: term_id (str): term identifier with namespace prefix, e.g. MESH:Atherosclerosis Returns: str: lowercased, e.g. MESH:atherosclerosis """ (ns, val) = term_id.split(":", maxsplit=1) term_id = f"{ns}:{val.lower()}" return term_id
[ "def", "lowercase_term_id", "(", "term_id", ":", "str", ")", "->", "str", ":", "(", "ns", ",", "val", ")", "=", "term_id", ".", "split", "(", "\":\"", ",", "maxsplit", "=", "1", ")", "term_id", "=", "f\"{ns}:{val.lower()}\"", "return", "term_id" ]
Lowercase the term value (not the namespace prefix) Args: term_id (str): term identifier with namespace prefix, e.g. MESH:Atherosclerosis Returns: str: lowercased, e.g. MESH:atherosclerosis
[ "Lowercase", "the", "term", "value", "(", "not", "the", "namespace", "prefix", ")" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/resources/namespace.py#L241-L253
belbio/bel
bel/nanopub/pubmed.py
node_text
def node_text(node): """Needed for things like abstracts which have internal tags (see PMID:27822475)""" if node.text: result = node.text else: result = "" for child in node: if child.tail is not None: result += child.tail return result
python
def node_text(node): """Needed for things like abstracts which have internal tags (see PMID:27822475)""" if node.text: result = node.text else: result = "" for child in node: if child.tail is not None: result += child.tail return result
[ "def", "node_text", "(", "node", ")", ":", "if", "node", ".", "text", ":", "result", "=", "node", ".", "text", "else", ":", "result", "=", "\"\"", "for", "child", "in", "node", ":", "if", "child", ".", "tail", "is", "not", "None", ":", "result", "+=", "child", ".", "tail", "return", "result" ]
Needed for things like abstracts which have internal tags (see PMID:27822475)
[ "Needed", "for", "things", "like", "abstracts", "which", "have", "internal", "tags", "(", "see", "PMID", ":", "27822475", ")" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L49-L59
belbio/bel
bel/nanopub/pubmed.py
get_pubtator
def get_pubtator(pmid): """Get Pubtator Bioconcepts from Pubmed Abstract Re-configure the denotations into an annotation dictionary format and collapse duplicate terms so that their spans are in a list. """ r = get_url(PUBTATOR_TMPL.replace("PMID", pmid), timeout=10) if r and r.status_code == 200: pubtator = r.json()[0] else: log.error( f"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}" ) return None known_types = ["CHEBI", "Chemical", "Disease", "Gene", "Species"] for idx, anno in enumerate(pubtator["denotations"]): s_match = re.match(r"(\w+):(\w+)", anno["obj"]) c_match = re.match(r"(\w+):(\w+):(\w+)", anno["obj"]) if c_match: (ctype, namespace, cid) = ( c_match.group(1), c_match.group(2), c_match.group(3), ) if ctype not in known_types: log.info(f"{ctype} not in known_types for Pubtator") if namespace not in known_types: log.info(f"{namespace} not in known_types for Pubtator") pubtator["denotations"][idx][ "obj" ] = f'{pubtator_ns_convert.get(namespace, "UNKNOWN")}:{cid}' pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get( ctype, None ) pubtator["denotations"][idx][ "annotation_type" ] = pubtator_annotation_convert.get(ctype, None) elif s_match: (ctype, cid) = (s_match.group(1), s_match.group(2)) if ctype not in known_types: log.info(f"{ctype} not in known_types for Pubtator") pubtator["denotations"][idx][ "obj" ] = f'{pubtator_ns_convert.get(ctype, "UNKNOWN")}:{cid}' pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get( ctype, None ) pubtator["denotations"][idx][ "annotation_type" ] = pubtator_annotation_convert.get(ctype, None) annotations = {} for anno in pubtator["denotations"]: log.info(anno) if anno["obj"] not in annotations: annotations[anno["obj"]] = {"spans": [anno["span"]]} annotations[anno["obj"]]["entity_types"] = [anno.get("entity_type", [])] annotations[anno["obj"]]["annotation_types"] = [ anno.get("annotation_type", []) ] else: annotations[anno["obj"]]["spans"].append(anno["span"]) del pubtator["denotations"] pubtator["annotations"] = copy.deepcopy(annotations) return pubtator
python
def get_pubtator(pmid): """Get Pubtator Bioconcepts from Pubmed Abstract Re-configure the denotations into an annotation dictionary format and collapse duplicate terms so that their spans are in a list. """ r = get_url(PUBTATOR_TMPL.replace("PMID", pmid), timeout=10) if r and r.status_code == 200: pubtator = r.json()[0] else: log.error( f"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}" ) return None known_types = ["CHEBI", "Chemical", "Disease", "Gene", "Species"] for idx, anno in enumerate(pubtator["denotations"]): s_match = re.match(r"(\w+):(\w+)", anno["obj"]) c_match = re.match(r"(\w+):(\w+):(\w+)", anno["obj"]) if c_match: (ctype, namespace, cid) = ( c_match.group(1), c_match.group(2), c_match.group(3), ) if ctype not in known_types: log.info(f"{ctype} not in known_types for Pubtator") if namespace not in known_types: log.info(f"{namespace} not in known_types for Pubtator") pubtator["denotations"][idx][ "obj" ] = f'{pubtator_ns_convert.get(namespace, "UNKNOWN")}:{cid}' pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get( ctype, None ) pubtator["denotations"][idx][ "annotation_type" ] = pubtator_annotation_convert.get(ctype, None) elif s_match: (ctype, cid) = (s_match.group(1), s_match.group(2)) if ctype not in known_types: log.info(f"{ctype} not in known_types for Pubtator") pubtator["denotations"][idx][ "obj" ] = f'{pubtator_ns_convert.get(ctype, "UNKNOWN")}:{cid}' pubtator["denotations"][idx]["entity_type"] = pubtator_entity_convert.get( ctype, None ) pubtator["denotations"][idx][ "annotation_type" ] = pubtator_annotation_convert.get(ctype, None) annotations = {} for anno in pubtator["denotations"]: log.info(anno) if anno["obj"] not in annotations: annotations[anno["obj"]] = {"spans": [anno["span"]]} annotations[anno["obj"]]["entity_types"] = [anno.get("entity_type", [])] annotations[anno["obj"]]["annotation_types"] = [ anno.get("annotation_type", []) ] else: annotations[anno["obj"]]["spans"].append(anno["span"]) del pubtator["denotations"] pubtator["annotations"] = copy.deepcopy(annotations) return pubtator
[ "def", "get_pubtator", "(", "pmid", ")", ":", "r", "=", "get_url", "(", "PUBTATOR_TMPL", ".", "replace", "(", "\"PMID\"", ",", "pmid", ")", ",", "timeout", "=", "10", ")", "if", "r", "and", "r", ".", "status_code", "==", "200", ":", "pubtator", "=", "r", ".", "json", "(", ")", "[", "0", "]", "else", ":", "log", ".", "error", "(", "f\"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}\"", ")", "return", "None", "known_types", "=", "[", "\"CHEBI\"", ",", "\"Chemical\"", ",", "\"Disease\"", ",", "\"Gene\"", ",", "\"Species\"", "]", "for", "idx", ",", "anno", "in", "enumerate", "(", "pubtator", "[", "\"denotations\"", "]", ")", ":", "s_match", "=", "re", ".", "match", "(", "r\"(\\w+):(\\w+)\"", ",", "anno", "[", "\"obj\"", "]", ")", "c_match", "=", "re", ".", "match", "(", "r\"(\\w+):(\\w+):(\\w+)\"", ",", "anno", "[", "\"obj\"", "]", ")", "if", "c_match", ":", "(", "ctype", ",", "namespace", ",", "cid", ")", "=", "(", "c_match", ".", "group", "(", "1", ")", ",", "c_match", ".", "group", "(", "2", ")", ",", "c_match", ".", "group", "(", "3", ")", ",", ")", "if", "ctype", "not", "in", "known_types", ":", "log", ".", "info", "(", "f\"{ctype} not in known_types for Pubtator\"", ")", "if", "namespace", "not", "in", "known_types", ":", "log", ".", "info", "(", "f\"{namespace} not in known_types for Pubtator\"", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"obj\"", "]", "=", "f'{pubtator_ns_convert.get(namespace, \"UNKNOWN\")}:{cid}'", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"entity_type\"", "]", "=", "pubtator_entity_convert", ".", "get", "(", "ctype", ",", "None", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"annotation_type\"", "]", "=", "pubtator_annotation_convert", ".", "get", "(", "ctype", ",", "None", ")", "elif", "s_match", ":", "(", "ctype", ",", "cid", ")", "=", "(", "s_match", ".", "group", "(", "1", ")", ",", "s_match", ".", "group", "(", "2", ")", ")", "if", "ctype", "not", "in", "known_types", ":", "log", ".", "info", "(", "f\"{ctype} not in known_types for Pubtator\"", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"obj\"", "]", "=", "f'{pubtator_ns_convert.get(ctype, \"UNKNOWN\")}:{cid}'", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"entity_type\"", "]", "=", "pubtator_entity_convert", ".", "get", "(", "ctype", ",", "None", ")", "pubtator", "[", "\"denotations\"", "]", "[", "idx", "]", "[", "\"annotation_type\"", "]", "=", "pubtator_annotation_convert", ".", "get", "(", "ctype", ",", "None", ")", "annotations", "=", "{", "}", "for", "anno", "in", "pubtator", "[", "\"denotations\"", "]", ":", "log", ".", "info", "(", "anno", ")", "if", "anno", "[", "\"obj\"", "]", "not", "in", "annotations", ":", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "=", "{", "\"spans\"", ":", "[", "anno", "[", "\"span\"", "]", "]", "}", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "[", "\"entity_types\"", "]", "=", "[", "anno", ".", "get", "(", "\"entity_type\"", ",", "[", "]", ")", "]", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "[", "\"annotation_types\"", "]", "=", "[", "anno", ".", "get", "(", "\"annotation_type\"", ",", "[", "]", ")", "]", "else", ":", "annotations", "[", "anno", "[", "\"obj\"", "]", "]", "[", "\"spans\"", "]", ".", "append", "(", "anno", "[", "\"span\"", "]", ")", "del", "pubtator", "[", "\"denotations\"", "]", "pubtator", "[", "\"annotations\"", "]", "=", "copy", ".", "deepcopy", "(", "annotations", ")", "return", "pubtator" ]
Get Pubtator Bioconcepts from Pubmed Abstract Re-configure the denotations into an annotation dictionary format and collapse duplicate terms so that their spans are in a list.
[ "Get", "Pubtator", "Bioconcepts", "from", "Pubmed", "Abstract" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L62-L135
belbio/bel
bel/nanopub/pubmed.py
process_pub_date
def process_pub_date(year, mon, day): """Create pub_date from what Pubmed provides in Journal PubDate entry """ pub_date = None if year and re.match("[a-zA-Z]+", mon): pub_date = datetime.datetime.strptime( f"{year}-{mon}-{day}", "%Y-%b-%d" ).strftime("%Y-%m-%d") elif year: pub_date = f"{year}-{mon}-{day}" return pub_date
python
def process_pub_date(year, mon, day): """Create pub_date from what Pubmed provides in Journal PubDate entry """ pub_date = None if year and re.match("[a-zA-Z]+", mon): pub_date = datetime.datetime.strptime( f"{year}-{mon}-{day}", "%Y-%b-%d" ).strftime("%Y-%m-%d") elif year: pub_date = f"{year}-{mon}-{day}" return pub_date
[ "def", "process_pub_date", "(", "year", ",", "mon", ",", "day", ")", ":", "pub_date", "=", "None", "if", "year", "and", "re", ".", "match", "(", "\"[a-zA-Z]+\"", ",", "mon", ")", ":", "pub_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "f\"{year}-{mon}-{day}\"", ",", "\"%Y-%b-%d\"", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "elif", "year", ":", "pub_date", "=", "f\"{year}-{mon}-{day}\"", "return", "pub_date" ]
Create pub_date from what Pubmed provides in Journal PubDate entry
[ "Create", "pub_date", "from", "what", "Pubmed", "provides", "in", "Journal", "PubDate", "entry" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L138-L150
belbio/bel
bel/nanopub/pubmed.py
get_pubmed
def get_pubmed(pmid: str) -> Mapping[str, Any]: """Get pubmed xml for pmid and convert to JSON Remove MESH terms if they are duplicated in the compound term set ArticleDate vs PubDate gets complicated: https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html see <ArticleDate> and <PubDate> Only getting pub_year at this point from the <PubDate> element. Args: pmid: pubmed id number as a string Returns: pubmed json """ pubmed_url = PUBMED_TMPL.replace("PMID", str(pmid)) r = get_url(pubmed_url) log.info(f"Getting Pubmed URL {pubmed_url}") try: root = etree.fromstring(r.content) doc = {"abstract": ""} doc["pmid"] = root.xpath("//PMID/text()")[0] doc["title"] = next(iter(root.xpath("//ArticleTitle/text()")), "") # TODO https://stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element atext = next(iter(root.xpath("//Abstract/AbstractText/text()")), "") print("Text", atext) for abstracttext in root.xpath("//Abstract/AbstractText"): abstext = node_text(abstracttext) label = abstracttext.get("Label", None) if label: doc["abstract"] += f"{label}: {abstext}\n" else: doc["abstract"] += f"{abstext}\n" doc["abstract"] = doc["abstract"].rstrip() doc["authors"] = [] for author in root.xpath("//Author"): last_name = next(iter(author.xpath("LastName/text()")), "") first_name = next(iter(author.xpath("ForeName/text()")), "") initials = next(iter(author.xpath("Initials/text()")), "") if not first_name and initials: first_name = initials doc["authors"].append(f"{last_name}, {first_name}") pub_year = next( iter(root.xpath("//Journal/JournalIssue/PubDate/Year/text()")), None ) pub_mon = next( iter(root.xpath("//Journal/JournalIssue/PubDate/Month/text()")), "Jan" ) pub_day = next( iter(root.xpath("//Journal/JournalIssue/PubDate/Day/text()")), "01" ) pub_date = process_pub_date(pub_year, pub_mon, pub_day) doc["pub_date"] = pub_date doc["journal_title"] = next(iter(root.xpath("//Journal/Title/text()")), "") doc["joural_iso_title"] = next( iter(root.xpath("//Journal/ISOAbbreviation/text()")), "" ) doc["doi"] = next(iter(root.xpath('//ArticleId[@IdType="doi"]/text()')), None) doc["compounds"] = [] for chem in root.xpath("//ChemicalList/Chemical/NameOfSubstance"): chem_id = chem.get("UI") doc["compounds"].append({"id": f"MESH:{chem_id}", "name": chem.text}) compounds = [cmpd["id"] for cmpd in doc["compounds"]] doc["mesh"] = [] for mesh in root.xpath("//MeshHeading/DescriptorName"): mesh_id = f"MESH:{mesh.get('UI')}" if mesh_id in compounds: continue doc["mesh"].append({"id": mesh_id, "name": mesh.text}) return doc except Exception as e: log.error( f"Bad Pubmed request, status: {r.status_code} error: {e}", url=f'{PUBMED_TMPL.replace("PMID", pmid)}', ) return {"message": f"Cannot get PMID: {pubmed_url}"}
python
def get_pubmed(pmid: str) -> Mapping[str, Any]: """Get pubmed xml for pmid and convert to JSON Remove MESH terms if they are duplicated in the compound term set ArticleDate vs PubDate gets complicated: https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html see <ArticleDate> and <PubDate> Only getting pub_year at this point from the <PubDate> element. Args: pmid: pubmed id number as a string Returns: pubmed json """ pubmed_url = PUBMED_TMPL.replace("PMID", str(pmid)) r = get_url(pubmed_url) log.info(f"Getting Pubmed URL {pubmed_url}") try: root = etree.fromstring(r.content) doc = {"abstract": ""} doc["pmid"] = root.xpath("//PMID/text()")[0] doc["title"] = next(iter(root.xpath("//ArticleTitle/text()")), "") # TODO https://stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element atext = next(iter(root.xpath("//Abstract/AbstractText/text()")), "") print("Text", atext) for abstracttext in root.xpath("//Abstract/AbstractText"): abstext = node_text(abstracttext) label = abstracttext.get("Label", None) if label: doc["abstract"] += f"{label}: {abstext}\n" else: doc["abstract"] += f"{abstext}\n" doc["abstract"] = doc["abstract"].rstrip() doc["authors"] = [] for author in root.xpath("//Author"): last_name = next(iter(author.xpath("LastName/text()")), "") first_name = next(iter(author.xpath("ForeName/text()")), "") initials = next(iter(author.xpath("Initials/text()")), "") if not first_name and initials: first_name = initials doc["authors"].append(f"{last_name}, {first_name}") pub_year = next( iter(root.xpath("//Journal/JournalIssue/PubDate/Year/text()")), None ) pub_mon = next( iter(root.xpath("//Journal/JournalIssue/PubDate/Month/text()")), "Jan" ) pub_day = next( iter(root.xpath("//Journal/JournalIssue/PubDate/Day/text()")), "01" ) pub_date = process_pub_date(pub_year, pub_mon, pub_day) doc["pub_date"] = pub_date doc["journal_title"] = next(iter(root.xpath("//Journal/Title/text()")), "") doc["joural_iso_title"] = next( iter(root.xpath("//Journal/ISOAbbreviation/text()")), "" ) doc["doi"] = next(iter(root.xpath('//ArticleId[@IdType="doi"]/text()')), None) doc["compounds"] = [] for chem in root.xpath("//ChemicalList/Chemical/NameOfSubstance"): chem_id = chem.get("UI") doc["compounds"].append({"id": f"MESH:{chem_id}", "name": chem.text}) compounds = [cmpd["id"] for cmpd in doc["compounds"]] doc["mesh"] = [] for mesh in root.xpath("//MeshHeading/DescriptorName"): mesh_id = f"MESH:{mesh.get('UI')}" if mesh_id in compounds: continue doc["mesh"].append({"id": mesh_id, "name": mesh.text}) return doc except Exception as e: log.error( f"Bad Pubmed request, status: {r.status_code} error: {e}", url=f'{PUBMED_TMPL.replace("PMID", pmid)}', ) return {"message": f"Cannot get PMID: {pubmed_url}"}
[ "def", "get_pubmed", "(", "pmid", ":", "str", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "pubmed_url", "=", "PUBMED_TMPL", ".", "replace", "(", "\"PMID\"", ",", "str", "(", "pmid", ")", ")", "r", "=", "get_url", "(", "pubmed_url", ")", "log", ".", "info", "(", "f\"Getting Pubmed URL {pubmed_url}\"", ")", "try", ":", "root", "=", "etree", ".", "fromstring", "(", "r", ".", "content", ")", "doc", "=", "{", "\"abstract\"", ":", "\"\"", "}", "doc", "[", "\"pmid\"", "]", "=", "root", ".", "xpath", "(", "\"//PMID/text()\"", ")", "[", "0", "]", "doc", "[", "\"title\"", "]", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "\"//ArticleTitle/text()\"", ")", ")", ",", "\"\"", ")", "# TODO https://stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element", "atext", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "\"//Abstract/AbstractText/text()\"", ")", ")", ",", "\"\"", ")", "print", "(", "\"Text\"", ",", "atext", ")", "for", "abstracttext", "in", "root", ".", "xpath", "(", "\"//Abstract/AbstractText\"", ")", ":", "abstext", "=", "node_text", "(", "abstracttext", ")", "label", "=", "abstracttext", ".", "get", "(", "\"Label\"", ",", "None", ")", "if", "label", ":", "doc", "[", "\"abstract\"", "]", "+=", "f\"{label}: {abstext}\\n\"", "else", ":", "doc", "[", "\"abstract\"", "]", "+=", "f\"{abstext}\\n\"", "doc", "[", "\"abstract\"", "]", "=", "doc", "[", "\"abstract\"", "]", ".", "rstrip", "(", ")", "doc", "[", "\"authors\"", "]", "=", "[", "]", "for", "author", "in", "root", ".", "xpath", "(", "\"//Author\"", ")", ":", "last_name", "=", "next", "(", "iter", "(", "author", ".", "xpath", "(", "\"LastName/text()\"", ")", ")", ",", "\"\"", ")", "first_name", "=", "next", "(", "iter", "(", "author", ".", "xpath", "(", "\"ForeName/text()\"", ")", ")", ",", "\"\"", ")", "initials", "=", "next", "(", "iter", "(", "author", ".", "xpath", "(", "\"Initials/text()\"", ")", ")", ",", "\"\"", ")", "if", "not", "first_name", "and", "initials", ":", "first_name", "=", "initials", "doc", "[", "\"authors\"", "]", ".", "append", "(", "f\"{last_name}, {first_name}\"", ")", "pub_year", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "\"//Journal/JournalIssue/PubDate/Year/text()\"", ")", ")", ",", "None", ")", "pub_mon", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "\"//Journal/JournalIssue/PubDate/Month/text()\"", ")", ")", ",", "\"Jan\"", ")", "pub_day", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "\"//Journal/JournalIssue/PubDate/Day/text()\"", ")", ")", ",", "\"01\"", ")", "pub_date", "=", "process_pub_date", "(", "pub_year", ",", "pub_mon", ",", "pub_day", ")", "doc", "[", "\"pub_date\"", "]", "=", "pub_date", "doc", "[", "\"journal_title\"", "]", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "\"//Journal/Title/text()\"", ")", ")", ",", "\"\"", ")", "doc", "[", "\"joural_iso_title\"", "]", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "\"//Journal/ISOAbbreviation/text()\"", ")", ")", ",", "\"\"", ")", "doc", "[", "\"doi\"", "]", "=", "next", "(", "iter", "(", "root", ".", "xpath", "(", "'//ArticleId[@IdType=\"doi\"]/text()'", ")", ")", ",", "None", ")", "doc", "[", "\"compounds\"", "]", "=", "[", "]", "for", "chem", "in", "root", ".", "xpath", "(", "\"//ChemicalList/Chemical/NameOfSubstance\"", ")", ":", "chem_id", "=", "chem", ".", "get", "(", "\"UI\"", ")", "doc", "[", "\"compounds\"", "]", ".", "append", "(", "{", "\"id\"", ":", "f\"MESH:{chem_id}\"", ",", "\"name\"", ":", "chem", ".", "text", "}", ")", "compounds", "=", "[", "cmpd", "[", "\"id\"", "]", "for", "cmpd", "in", "doc", "[", "\"compounds\"", "]", "]", "doc", "[", "\"mesh\"", "]", "=", "[", "]", "for", "mesh", "in", "root", ".", "xpath", "(", "\"//MeshHeading/DescriptorName\"", ")", ":", "mesh_id", "=", "f\"MESH:{mesh.get('UI')}\"", "if", "mesh_id", "in", "compounds", ":", "continue", "doc", "[", "\"mesh\"", "]", ".", "append", "(", "{", "\"id\"", ":", "mesh_id", ",", "\"name\"", ":", "mesh", ".", "text", "}", ")", "return", "doc", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Bad Pubmed request, status: {r.status_code} error: {e}\"", ",", "url", "=", "f'{PUBMED_TMPL.replace(\"PMID\", pmid)}'", ",", ")", "return", "{", "\"message\"", ":", "f\"Cannot get PMID: {pubmed_url}\"", "}" ]
Get pubmed xml for pmid and convert to JSON Remove MESH terms if they are duplicated in the compound term set ArticleDate vs PubDate gets complicated: https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html see <ArticleDate> and <PubDate> Only getting pub_year at this point from the <PubDate> element. Args: pmid: pubmed id number as a string Returns: pubmed json
[ "Get", "pubmed", "xml", "for", "pmid", "and", "convert", "to", "JSON" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L153-L239
belbio/bel
bel/nanopub/pubmed.py
enhance_pubmed_annotations
def enhance_pubmed_annotations(pubmed: Mapping[str, Any]) -> Mapping[str, Any]: """Enhance pubmed namespace IDs Add additional entity and annotation types to annotations Use preferred id for namespaces as needed Add strings from Title, Abstract matching Pubtator BioConcept spans NOTE - basically duplicated code with bel_api:api.services.pubmed Args: pubmed Returns: pubmed object """ text = pubmed["title"] + pubmed["abstract"] annotations = {} for nsarg in pubmed["annotations"]: url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{url_path_param_quoting(nsarg)}' log.info(f"URL: {url}") r = get_url(url) log.info(f"Result: {r}") new_nsarg = "" if r and r.status_code == 200: term = r.json() new_nsarg = bel_utils.convert_nsarg(term["id"], decanonicalize=True) pubmed["annotations"][nsarg]["name"] = term["name"] pubmed["annotations"][nsarg]["label"] = term["label"] pubmed["annotations"][nsarg]["entity_types"] = list( set( pubmed["annotations"][nsarg]["entity_types"] + term.get("entity_types", []) ) ) pubmed["annotations"][nsarg]["annotation_types"] = list( set( pubmed["annotations"][nsarg]["annotation_types"] + term.get("annotation_types", []) ) ) if new_nsarg != nsarg: annotations[new_nsarg] = copy.deepcopy(pubmed["annotations"][nsarg]) else: annotations[nsarg] = copy.deepcopy(pubmed["annotations"][nsarg]) for nsarg in annotations: for idx, span in enumerate(annotations[nsarg]["spans"]): string = text[span["begin"] - 1 : span["end"] - 1] annotations[nsarg]["spans"][idx]["text"] = string pubmed["annotations"] = copy.deepcopy(annotations) return pubmed
python
def enhance_pubmed_annotations(pubmed: Mapping[str, Any]) -> Mapping[str, Any]: """Enhance pubmed namespace IDs Add additional entity and annotation types to annotations Use preferred id for namespaces as needed Add strings from Title, Abstract matching Pubtator BioConcept spans NOTE - basically duplicated code with bel_api:api.services.pubmed Args: pubmed Returns: pubmed object """ text = pubmed["title"] + pubmed["abstract"] annotations = {} for nsarg in pubmed["annotations"]: url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{url_path_param_quoting(nsarg)}' log.info(f"URL: {url}") r = get_url(url) log.info(f"Result: {r}") new_nsarg = "" if r and r.status_code == 200: term = r.json() new_nsarg = bel_utils.convert_nsarg(term["id"], decanonicalize=True) pubmed["annotations"][nsarg]["name"] = term["name"] pubmed["annotations"][nsarg]["label"] = term["label"] pubmed["annotations"][nsarg]["entity_types"] = list( set( pubmed["annotations"][nsarg]["entity_types"] + term.get("entity_types", []) ) ) pubmed["annotations"][nsarg]["annotation_types"] = list( set( pubmed["annotations"][nsarg]["annotation_types"] + term.get("annotation_types", []) ) ) if new_nsarg != nsarg: annotations[new_nsarg] = copy.deepcopy(pubmed["annotations"][nsarg]) else: annotations[nsarg] = copy.deepcopy(pubmed["annotations"][nsarg]) for nsarg in annotations: for idx, span in enumerate(annotations[nsarg]["spans"]): string = text[span["begin"] - 1 : span["end"] - 1] annotations[nsarg]["spans"][idx]["text"] = string pubmed["annotations"] = copy.deepcopy(annotations) return pubmed
[ "def", "enhance_pubmed_annotations", "(", "pubmed", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "text", "=", "pubmed", "[", "\"title\"", "]", "+", "pubmed", "[", "\"abstract\"", "]", "annotations", "=", "{", "}", "for", "nsarg", "in", "pubmed", "[", "\"annotations\"", "]", ":", "url", "=", "f'{config[\"bel_api\"][\"servers\"][\"api_url\"]}/terms/{url_path_param_quoting(nsarg)}'", "log", ".", "info", "(", "f\"URL: {url}\"", ")", "r", "=", "get_url", "(", "url", ")", "log", ".", "info", "(", "f\"Result: {r}\"", ")", "new_nsarg", "=", "\"\"", "if", "r", "and", "r", ".", "status_code", "==", "200", ":", "term", "=", "r", ".", "json", "(", ")", "new_nsarg", "=", "bel_utils", ".", "convert_nsarg", "(", "term", "[", "\"id\"", "]", ",", "decanonicalize", "=", "True", ")", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"name\"", "]", "=", "term", "[", "\"name\"", "]", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"label\"", "]", "=", "term", "[", "\"label\"", "]", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"entity_types\"", "]", "=", "list", "(", "set", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"entity_types\"", "]", "+", "term", ".", "get", "(", "\"entity_types\"", ",", "[", "]", ")", ")", ")", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"annotation_types\"", "]", "=", "list", "(", "set", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", "[", "\"annotation_types\"", "]", "+", "term", ".", "get", "(", "\"annotation_types\"", ",", "[", "]", ")", ")", ")", "if", "new_nsarg", "!=", "nsarg", ":", "annotations", "[", "new_nsarg", "]", "=", "copy", ".", "deepcopy", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", ")", "else", ":", "annotations", "[", "nsarg", "]", "=", "copy", ".", "deepcopy", "(", "pubmed", "[", "\"annotations\"", "]", "[", "nsarg", "]", ")", "for", "nsarg", "in", "annotations", ":", "for", "idx", ",", "span", "in", "enumerate", "(", "annotations", "[", "nsarg", "]", "[", "\"spans\"", "]", ")", ":", "string", "=", "text", "[", "span", "[", "\"begin\"", "]", "-", "1", ":", "span", "[", "\"end\"", "]", "-", "1", "]", "annotations", "[", "nsarg", "]", "[", "\"spans\"", "]", "[", "idx", "]", "[", "\"text\"", "]", "=", "string", "pubmed", "[", "\"annotations\"", "]", "=", "copy", ".", "deepcopy", "(", "annotations", ")", "return", "pubmed" ]
Enhance pubmed namespace IDs Add additional entity and annotation types to annotations Use preferred id for namespaces as needed Add strings from Title, Abstract matching Pubtator BioConcept spans NOTE - basically duplicated code with bel_api:api.services.pubmed Args: pubmed Returns: pubmed object
[ "Enhance", "pubmed", "namespace", "IDs" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L242-L299
belbio/bel
bel/nanopub/pubmed.py
get_pubmed_for_beleditor
def get_pubmed_for_beleditor(pmid: str) -> Mapping[str, Any]: """Get fully annotated pubmed doc with Pubtator and full entity/annotation_types Args: pmid: Pubmed PMID Returns: Mapping[str, Any]: pubmed dictionary """ pubmed = get_pubmed(pmid) pubtator = get_pubtator(pmid) pubmed["annotations"] = copy.deepcopy(pubtator["annotations"]) # Add entity types and annotation types to annotations pubmed = enhance_pubmed_annotations(pubmed) return pubmed
python
def get_pubmed_for_beleditor(pmid: str) -> Mapping[str, Any]: """Get fully annotated pubmed doc with Pubtator and full entity/annotation_types Args: pmid: Pubmed PMID Returns: Mapping[str, Any]: pubmed dictionary """ pubmed = get_pubmed(pmid) pubtator = get_pubtator(pmid) pubmed["annotations"] = copy.deepcopy(pubtator["annotations"]) # Add entity types and annotation types to annotations pubmed = enhance_pubmed_annotations(pubmed) return pubmed
[ "def", "get_pubmed_for_beleditor", "(", "pmid", ":", "str", ")", "->", "Mapping", "[", "str", ",", "Any", "]", ":", "pubmed", "=", "get_pubmed", "(", "pmid", ")", "pubtator", "=", "get_pubtator", "(", "pmid", ")", "pubmed", "[", "\"annotations\"", "]", "=", "copy", ".", "deepcopy", "(", "pubtator", "[", "\"annotations\"", "]", ")", "# Add entity types and annotation types to annotations", "pubmed", "=", "enhance_pubmed_annotations", "(", "pubmed", ")", "return", "pubmed" ]
Get fully annotated pubmed doc with Pubtator and full entity/annotation_types Args: pmid: Pubmed PMID Returns: Mapping[str, Any]: pubmed dictionary
[ "Get", "fully", "annotated", "pubmed", "doc", "with", "Pubtator", "and", "full", "entity", "/", "annotation_types" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/pubmed.py#L302-L319
belbio/bel
bel/terms/orthologs.py
get_orthologs
def get_orthologs(canonical_gene_id: str, species: list = []) -> List[dict]: """Get orthologs for given gene_id and species Canonicalize prior to ortholog query and decanonicalize the resulting ortholog Args: canonical_gene_id: canonical gene_id for which to retrieve ortholog species: target species for ortholog - tax id format TAX:<number> Returns: List[dict]: {'tax_id': <tax_id>, 'canonical': canonical_id, 'decanonical': decanonical_id} """ gene_id_key = bel.db.arangodb.arango_id_to_key(canonical_gene_id) orthologs = {} if species: query_filter = f"FILTER vertex.tax_id IN {species}" query = f""" LET start = ( FOR vertex in ortholog_nodes FILTER vertex._key == "{gene_id_key}" RETURN {{ "name": vertex.name, "tax_id": vertex.tax_id }} ) LET orthologs = ( FOR vertex IN 1..3 ANY "ortholog_nodes/{gene_id_key}" ortholog_edges OPTIONS {{ bfs: true, uniqueVertices : 'global' }} {query_filter} RETURN DISTINCT {{ "name": vertex.name, "tax_id": vertex.tax_id }} ) RETURN {{ 'orthologs': FLATTEN(UNION(start, orthologs)) }} """ cursor = belns_db.aql.execute(query, batch_size=20) results = cursor.pop() for ortholog in results["orthologs"]: norms = bel.terms.terms.get_normalized_terms(ortholog["name"]) orthologs[ortholog["tax_id"]] = { "canonical": norms["canonical"], "decanonical": norms["decanonical"], } return orthologs
python
def get_orthologs(canonical_gene_id: str, species: list = []) -> List[dict]: """Get orthologs for given gene_id and species Canonicalize prior to ortholog query and decanonicalize the resulting ortholog Args: canonical_gene_id: canonical gene_id for which to retrieve ortholog species: target species for ortholog - tax id format TAX:<number> Returns: List[dict]: {'tax_id': <tax_id>, 'canonical': canonical_id, 'decanonical': decanonical_id} """ gene_id_key = bel.db.arangodb.arango_id_to_key(canonical_gene_id) orthologs = {} if species: query_filter = f"FILTER vertex.tax_id IN {species}" query = f""" LET start = ( FOR vertex in ortholog_nodes FILTER vertex._key == "{gene_id_key}" RETURN {{ "name": vertex.name, "tax_id": vertex.tax_id }} ) LET orthologs = ( FOR vertex IN 1..3 ANY "ortholog_nodes/{gene_id_key}" ortholog_edges OPTIONS {{ bfs: true, uniqueVertices : 'global' }} {query_filter} RETURN DISTINCT {{ "name": vertex.name, "tax_id": vertex.tax_id }} ) RETURN {{ 'orthologs': FLATTEN(UNION(start, orthologs)) }} """ cursor = belns_db.aql.execute(query, batch_size=20) results = cursor.pop() for ortholog in results["orthologs"]: norms = bel.terms.terms.get_normalized_terms(ortholog["name"]) orthologs[ortholog["tax_id"]] = { "canonical": norms["canonical"], "decanonical": norms["decanonical"], } return orthologs
[ "def", "get_orthologs", "(", "canonical_gene_id", ":", "str", ",", "species", ":", "list", "=", "[", "]", ")", "->", "List", "[", "dict", "]", ":", "gene_id_key", "=", "bel", ".", "db", ".", "arangodb", ".", "arango_id_to_key", "(", "canonical_gene_id", ")", "orthologs", "=", "{", "}", "if", "species", ":", "query_filter", "=", "f\"FILTER vertex.tax_id IN {species}\"", "query", "=", "f\"\"\"\n LET start = (\n FOR vertex in ortholog_nodes\n FILTER vertex._key == \"{gene_id_key}\"\n RETURN {{ \"name\": vertex.name, \"tax_id\": vertex.tax_id }}\n )\n\n LET orthologs = (\n FOR vertex IN 1..3\n ANY \"ortholog_nodes/{gene_id_key}\" ortholog_edges\n OPTIONS {{ bfs: true, uniqueVertices : 'global' }}\n {query_filter}\n RETURN DISTINCT {{ \"name\": vertex.name, \"tax_id\": vertex.tax_id }}\n )\n\n RETURN {{ 'orthologs': FLATTEN(UNION(start, orthologs)) }}\n \"\"\"", "cursor", "=", "belns_db", ".", "aql", ".", "execute", "(", "query", ",", "batch_size", "=", "20", ")", "results", "=", "cursor", ".", "pop", "(", ")", "for", "ortholog", "in", "results", "[", "\"orthologs\"", "]", ":", "norms", "=", "bel", ".", "terms", ".", "terms", ".", "get_normalized_terms", "(", "ortholog", "[", "\"name\"", "]", ")", "orthologs", "[", "ortholog", "[", "\"tax_id\"", "]", "]", "=", "{", "\"canonical\"", ":", "norms", "[", "\"canonical\"", "]", ",", "\"decanonical\"", ":", "norms", "[", "\"decanonical\"", "]", ",", "}", "return", "orthologs" ]
Get orthologs for given gene_id and species Canonicalize prior to ortholog query and decanonicalize the resulting ortholog Args: canonical_gene_id: canonical gene_id for which to retrieve ortholog species: target species for ortholog - tax id format TAX:<number> Returns: List[dict]: {'tax_id': <tax_id>, 'canonical': canonical_id, 'decanonical': decanonical_id}
[ "Get", "orthologs", "for", "given", "gene_id", "and", "species" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/terms/orthologs.py#L16-L63
PayEx/pypayex
payex/utils.py
normalize_value
def normalize_value(val): """ Normalize strings with booleans into Python types. """ if val is not None: if val.lower() == 'false': val = False elif val.lower() == 'true': val = True return val
python
def normalize_value(val): """ Normalize strings with booleans into Python types. """ if val is not None: if val.lower() == 'false': val = False elif val.lower() == 'true': val = True return val
[ "def", "normalize_value", "(", "val", ")", ":", "if", "val", "is", "not", "None", ":", "if", "val", ".", "lower", "(", ")", "==", "'false'", ":", "val", "=", "False", "elif", "val", ".", "lower", "(", ")", "==", "'true'", ":", "val", "=", "True", "return", "val" ]
Normalize strings with booleans into Python types.
[ "Normalize", "strings", "with", "booleans", "into", "Python", "types", "." ]
train
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/utils.py#L12-L23
PayEx/pypayex
payex/utils.py
normalize_dictionary_values
def normalize_dictionary_values(dictionary): """ Normalizes the values in a dictionary recursivly. """ for key, val in dictionary.iteritems(): if isinstance(val, dict): dictionary[key] = normalize_dictionary_values(val) elif isinstance(val, list): dictionary[key] = list(val) else: dictionary[key] = normalize_value(val) return dictionary
python
def normalize_dictionary_values(dictionary): """ Normalizes the values in a dictionary recursivly. """ for key, val in dictionary.iteritems(): if isinstance(val, dict): dictionary[key] = normalize_dictionary_values(val) elif isinstance(val, list): dictionary[key] = list(val) else: dictionary[key] = normalize_value(val) return dictionary
[ "def", "normalize_dictionary_values", "(", "dictionary", ")", ":", "for", "key", ",", "val", "in", "dictionary", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "dictionary", "[", "key", "]", "=", "normalize_dictionary_values", "(", "val", ")", "elif", "isinstance", "(", "val", ",", "list", ")", ":", "dictionary", "[", "key", "]", "=", "list", "(", "val", ")", "else", ":", "dictionary", "[", "key", "]", "=", "normalize_value", "(", "val", ")", "return", "dictionary" ]
Normalizes the values in a dictionary recursivly.
[ "Normalizes", "the", "values", "in", "a", "dictionary", "recursivly", "." ]
train
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/utils.py#L25-L38
belbio/bel
bel/utils.py
get_url
def get_url(url: str, params: dict = {}, timeout: float = 5.0, cache: bool = True): """Wrapper for requests.get(url) Args: url: url to retrieve params: query string parameters timeout: allow this much time for the request and time it out if over cache: Cache for up to a day unless this is false Returns: Requests Result obj or None if timed out """ try: if not cache: with requests_cache.disabled(): r = requests.get(url, params=params, timeout=timeout) else: r = requests.get(url, params=params, timeout=timeout) log.debug(f"Response headers {r.headers} From cache {r.from_cache}") return r except requests.exceptions.Timeout: log.warn(f"Timed out getting url in get_url: {url}") return None except Exception as e: log.warn(f"Error getting url: {url} error: {e}") return None
python
def get_url(url: str, params: dict = {}, timeout: float = 5.0, cache: bool = True): """Wrapper for requests.get(url) Args: url: url to retrieve params: query string parameters timeout: allow this much time for the request and time it out if over cache: Cache for up to a day unless this is false Returns: Requests Result obj or None if timed out """ try: if not cache: with requests_cache.disabled(): r = requests.get(url, params=params, timeout=timeout) else: r = requests.get(url, params=params, timeout=timeout) log.debug(f"Response headers {r.headers} From cache {r.from_cache}") return r except requests.exceptions.Timeout: log.warn(f"Timed out getting url in get_url: {url}") return None except Exception as e: log.warn(f"Error getting url: {url} error: {e}") return None
[ "def", "get_url", "(", "url", ":", "str", ",", "params", ":", "dict", "=", "{", "}", ",", "timeout", ":", "float", "=", "5.0", ",", "cache", ":", "bool", "=", "True", ")", ":", "try", ":", "if", "not", "cache", ":", "with", "requests_cache", ".", "disabled", "(", ")", ":", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "timeout", "=", "timeout", ")", "else", ":", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "timeout", "=", "timeout", ")", "log", ".", "debug", "(", "f\"Response headers {r.headers} From cache {r.from_cache}\"", ")", "return", "r", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "log", ".", "warn", "(", "f\"Timed out getting url in get_url: {url}\"", ")", "return", "None", "except", "Exception", "as", "e", ":", "log", ".", "warn", "(", "f\"Error getting url: {url} error: {e}\"", ")", "return", "None" ]
Wrapper for requests.get(url) Args: url: url to retrieve params: query string parameters timeout: allow this much time for the request and time it out if over cache: Cache for up to a day unless this is false Returns: Requests Result obj or None if timed out
[ "Wrapper", "for", "requests", ".", "get", "(", "url", ")" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L27-L56
belbio/bel
bel/utils.py
timespan
def timespan(start_time): """Return time in milliseconds from start_time""" timespan = datetime.datetime.now() - start_time timespan_ms = timespan.total_seconds() * 1000 return timespan_ms
python
def timespan(start_time): """Return time in milliseconds from start_time""" timespan = datetime.datetime.now() - start_time timespan_ms = timespan.total_seconds() * 1000 return timespan_ms
[ "def", "timespan", "(", "start_time", ")", ":", "timespan", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "start_time", "timespan_ms", "=", "timespan", ".", "total_seconds", "(", ")", "*", "1000", "return", "timespan_ms" ]
Return time in milliseconds from start_time
[ "Return", "time", "in", "milliseconds", "from", "start_time" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L59-L64