repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
belbio/bel
bel/utils.py
download_file
def download_file(url): """Download file""" response = requests.get(url, stream=True) fp = tempfile.NamedTemporaryFile() for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks fp.write(chunk) # log.info(f'Download file - tmp file: {fp.name} size: {fp.tell()}') return fp
python
def download_file(url): """Download file""" response = requests.get(url, stream=True) fp = tempfile.NamedTemporaryFile() for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks fp.write(chunk) # log.info(f'Download file - tmp file: {fp.name} size: {fp.tell()}') return fp
[ "def", "download_file", "(", "url", ")", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "fp", "=", "tempfile", ".", "NamedTemporaryFile", "(", ")", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "1024", ")", ":", "if", "chunk", ":", "# filter out keep-alive new chunks", "fp", ".", "write", "(", "chunk", ")", "# log.info(f'Download file - tmp file: {fp.name} size: {fp.tell()}')", "return", "fp" ]
Download file
[ "Download", "file" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L67-L77
belbio/bel
bel/utils.py
first_true
def first_true(iterable, default=False, pred=None): """Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true. """ # first_true([a,b,c], x) --> a or b or c or x # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x return next(filter(pred, iterable), default)
python
def first_true(iterable, default=False, pred=None): """Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true. """ # first_true([a,b,c], x) --> a or b or c or x # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x return next(filter(pred, iterable), default)
[ "def", "first_true", "(", "iterable", ",", "default", "=", "False", ",", "pred", "=", "None", ")", ":", "# first_true([a,b,c], x) --> a or b or c or x", "# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x", "return", "next", "(", "filter", "(", "pred", ",", "iterable", ")", ",", "default", ")" ]
Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true.
[ "Returns", "the", "first", "true", "value", "in", "the", "iterable", "." ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L90-L101
belbio/bel
bel/utils.py
_create_hash_from_doc
def _create_hash_from_doc(doc: Mapping[str, Any]) -> str: """Create hash Id from edge record Args: edge (Mapping[str, Any]): edge record to create hash from Returns: str: Murmur3 128 bit hash """ doc_string = json.dumps(doc, sort_keys=True) return _create_hash(doc_string)
python
def _create_hash_from_doc(doc: Mapping[str, Any]) -> str: """Create hash Id from edge record Args: edge (Mapping[str, Any]): edge record to create hash from Returns: str: Murmur3 128 bit hash """ doc_string = json.dumps(doc, sort_keys=True) return _create_hash(doc_string)
[ "def", "_create_hash_from_doc", "(", "doc", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "str", ":", "doc_string", "=", "json", ".", "dumps", "(", "doc", ",", "sort_keys", "=", "True", ")", "return", "_create_hash", "(", "doc_string", ")" ]
Create hash Id from edge record Args: edge (Mapping[str, Any]): edge record to create hash from Returns: str: Murmur3 128 bit hash
[ "Create", "hash", "Id", "from", "edge", "record" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L104-L115
belbio/bel
bel/utils.py
Timer.elapsed
def elapsed(self): """ Return the current elapsed time since start If the `elapsed` property is called in the context manager scope, the elapsed time bewteen start and property access is returned. However, if it is accessed outside of the context manager scope, it returns the elapsed time bewteen entering and exiting the scope. The `elapsed` property can thus be accessed at different points within the context manager scope, to time different parts of the block. """ if self.end is None: # if elapsed is called in the context manager scope return (self() - self.start) * self.factor else: # if elapsed is called out of the context manager scope return (self.end - self.start) * self.factor
python
def elapsed(self): """ Return the current elapsed time since start If the `elapsed` property is called in the context manager scope, the elapsed time bewteen start and property access is returned. However, if it is accessed outside of the context manager scope, it returns the elapsed time bewteen entering and exiting the scope. The `elapsed` property can thus be accessed at different points within the context manager scope, to time different parts of the block. """ if self.end is None: # if elapsed is called in the context manager scope return (self() - self.start) * self.factor else: # if elapsed is called out of the context manager scope return (self.end - self.start) * self.factor
[ "def", "elapsed", "(", "self", ")", ":", "if", "self", ".", "end", "is", "None", ":", "# if elapsed is called in the context manager scope", "return", "(", "self", "(", ")", "-", "self", ".", "start", ")", "*", "self", ".", "factor", "else", ":", "# if elapsed is called out of the context manager scope", "return", "(", "self", ".", "end", "-", "self", ".", "start", ")", "*", "self", ".", "factor" ]
Return the current elapsed time since start If the `elapsed` property is called in the context manager scope, the elapsed time bewteen start and property access is returned. However, if it is accessed outside of the context manager scope, it returns the elapsed time bewteen entering and exiting the scope. The `elapsed` property can thus be accessed at different points within the context manager scope, to time different parts of the block.
[ "Return", "the", "current", "elapsed", "time", "since", "start", "If", "the", "elapsed", "property", "is", "called", "in", "the", "context", "manager", "scope", "the", "elapsed", "time", "bewteen", "start", "and", "property", "access", "is", "returned", ".", "However", "if", "it", "is", "accessed", "outside", "of", "the", "context", "manager", "scope", "it", "returns", "the", "elapsed", "time", "bewteen", "entering", "and", "exiting", "the", "scope", ".", "The", "elapsed", "property", "can", "thus", "be", "accessed", "at", "different", "points", "within", "the", "context", "manager", "scope", "to", "time", "different", "parts", "of", "the", "block", "." ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L245-L259
belbio/bel
bel/edge/pipeline.py
load_edges_into_db
def load_edges_into_db( nanopub_id: str, nanopub_url: str, edges: list = [], edges_coll_name: str = edges_coll_name, nodes_coll_name: str = nodes_coll_name, ): """Load edges into Edgestore""" start_time = datetime.datetime.now() # Clean out edges for nanopub in edgestore query = f""" FOR edge IN {edges_coll_name} FILTER edge.nanopub_id == "{nanopub_id}" REMOVE edge IN edges """ try: edgestore_db.aql.execute(query) except Exception as e: log.debug(f"Could not remove nanopub-related edges: {query} msg: {e}") end_time1 = datetime.datetime.now() delta_ms = f"{(end_time1 - start_time).total_seconds() * 1000:.1f}" log.info("Timing - Delete edges for nanopub", delta_ms=delta_ms) # Clean out errors for nanopub in pipeline_errors query = f""" FOR e IN pipeline_errors FILTER e.nanopub_id == "{nanopub_id}" REMOVE e IN pipeline_errors """ try: edgestore_db.aql.execute(query) except Exception as e: log.debug(f"Could not remove nanopub-related errors: {query} msg: {e}") end_time2 = datetime.datetime.now() delta_ms = f"{(end_time2 - end_time1).total_seconds() * 1000:.1f}" log.info("Timing - Delete pipeline errors for nanopub", delta_ms=delta_ms) # Collect edges and nodes to load into arangodb node_list, edge_list = [], [] for doc in edge_iterator(edges=edges): if doc[0] == "nodes": node_list.append(doc[1]) else: edge_list.append(doc[1]) end_time3 = datetime.datetime.now() delta_ms = f"{(end_time3 - end_time2).total_seconds() * 1000:.1f}" log.info("Timing - Collect edges and nodes", delta_ms=delta_ms) try: results = edgestore_db.collection(edges_coll_name).import_bulk( edge_list, on_duplicate="replace", halt_on_error=False ) except Exception as e: log.error(f"Could not load edges msg: {e}") end_time4 = datetime.datetime.now() delta_ms = f"{(end_time4 - end_time3).total_seconds() * 1000:.1f}" log.info("Timing - Load edges into edgestore", delta_ms=delta_ms) try: results = edgestore_db.collection(nodes_coll_name).import_bulk( node_list, on_duplicate="replace", halt_on_error=False ) except Exception as e: log.error(f"Could not load nodes msg: {e}") end_time5 = datetime.datetime.now() delta_ms = f"{(end_time5 - end_time4).total_seconds() * 1000:.1f}" log.info("Timing - Load nodes into edgestore", delta_ms=delta_ms)
python
def load_edges_into_db( nanopub_id: str, nanopub_url: str, edges: list = [], edges_coll_name: str = edges_coll_name, nodes_coll_name: str = nodes_coll_name, ): """Load edges into Edgestore""" start_time = datetime.datetime.now() # Clean out edges for nanopub in edgestore query = f""" FOR edge IN {edges_coll_name} FILTER edge.nanopub_id == "{nanopub_id}" REMOVE edge IN edges """ try: edgestore_db.aql.execute(query) except Exception as e: log.debug(f"Could not remove nanopub-related edges: {query} msg: {e}") end_time1 = datetime.datetime.now() delta_ms = f"{(end_time1 - start_time).total_seconds() * 1000:.1f}" log.info("Timing - Delete edges for nanopub", delta_ms=delta_ms) # Clean out errors for nanopub in pipeline_errors query = f""" FOR e IN pipeline_errors FILTER e.nanopub_id == "{nanopub_id}" REMOVE e IN pipeline_errors """ try: edgestore_db.aql.execute(query) except Exception as e: log.debug(f"Could not remove nanopub-related errors: {query} msg: {e}") end_time2 = datetime.datetime.now() delta_ms = f"{(end_time2 - end_time1).total_seconds() * 1000:.1f}" log.info("Timing - Delete pipeline errors for nanopub", delta_ms=delta_ms) # Collect edges and nodes to load into arangodb node_list, edge_list = [], [] for doc in edge_iterator(edges=edges): if doc[0] == "nodes": node_list.append(doc[1]) else: edge_list.append(doc[1]) end_time3 = datetime.datetime.now() delta_ms = f"{(end_time3 - end_time2).total_seconds() * 1000:.1f}" log.info("Timing - Collect edges and nodes", delta_ms=delta_ms) try: results = edgestore_db.collection(edges_coll_name).import_bulk( edge_list, on_duplicate="replace", halt_on_error=False ) except Exception as e: log.error(f"Could not load edges msg: {e}") end_time4 = datetime.datetime.now() delta_ms = f"{(end_time4 - end_time3).total_seconds() * 1000:.1f}" log.info("Timing - Load edges into edgestore", delta_ms=delta_ms) try: results = edgestore_db.collection(nodes_coll_name).import_bulk( node_list, on_duplicate="replace", halt_on_error=False ) except Exception as e: log.error(f"Could not load nodes msg: {e}") end_time5 = datetime.datetime.now() delta_ms = f"{(end_time5 - end_time4).total_seconds() * 1000:.1f}" log.info("Timing - Load nodes into edgestore", delta_ms=delta_ms)
[ "def", "load_edges_into_db", "(", "nanopub_id", ":", "str", ",", "nanopub_url", ":", "str", ",", "edges", ":", "list", "=", "[", "]", ",", "edges_coll_name", ":", "str", "=", "edges_coll_name", ",", "nodes_coll_name", ":", "str", "=", "nodes_coll_name", ",", ")", ":", "start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# Clean out edges for nanopub in edgestore", "query", "=", "f\"\"\"\n FOR edge IN {edges_coll_name}\n FILTER edge.nanopub_id == \"{nanopub_id}\"\n REMOVE edge IN edges\n \"\"\"", "try", ":", "edgestore_db", ".", "aql", ".", "execute", "(", "query", ")", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "f\"Could not remove nanopub-related edges: {query} msg: {e}\"", ")", "end_time1", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta_ms", "=", "f\"{(end_time1 - start_time).total_seconds() * 1000:.1f}\"", "log", ".", "info", "(", "\"Timing - Delete edges for nanopub\"", ",", "delta_ms", "=", "delta_ms", ")", "# Clean out errors for nanopub in pipeline_errors", "query", "=", "f\"\"\"\n FOR e IN pipeline_errors\n FILTER e.nanopub_id == \"{nanopub_id}\"\n REMOVE e IN pipeline_errors\n \"\"\"", "try", ":", "edgestore_db", ".", "aql", ".", "execute", "(", "query", ")", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "f\"Could not remove nanopub-related errors: {query} msg: {e}\"", ")", "end_time2", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta_ms", "=", "f\"{(end_time2 - end_time1).total_seconds() * 1000:.1f}\"", "log", ".", "info", "(", "\"Timing - Delete pipeline errors for nanopub\"", ",", "delta_ms", "=", "delta_ms", ")", "# Collect edges and nodes to load into arangodb", "node_list", ",", "edge_list", "=", "[", "]", ",", "[", "]", "for", "doc", "in", "edge_iterator", "(", "edges", "=", "edges", ")", ":", "if", "doc", "[", "0", "]", "==", "\"nodes\"", ":", "node_list", ".", "append", "(", "doc", "[", "1", "]", ")", "else", ":", "edge_list", ".", "append", "(", "doc", "[", "1", "]", ")", "end_time3", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta_ms", "=", "f\"{(end_time3 - end_time2).total_seconds() * 1000:.1f}\"", "log", ".", "info", "(", "\"Timing - Collect edges and nodes\"", ",", "delta_ms", "=", "delta_ms", ")", "try", ":", "results", "=", "edgestore_db", ".", "collection", "(", "edges_coll_name", ")", ".", "import_bulk", "(", "edge_list", ",", "on_duplicate", "=", "\"replace\"", ",", "halt_on_error", "=", "False", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Could not load edges msg: {e}\"", ")", "end_time4", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta_ms", "=", "f\"{(end_time4 - end_time3).total_seconds() * 1000:.1f}\"", "log", ".", "info", "(", "\"Timing - Load edges into edgestore\"", ",", "delta_ms", "=", "delta_ms", ")", "try", ":", "results", "=", "edgestore_db", ".", "collection", "(", "nodes_coll_name", ")", ".", "import_bulk", "(", "node_list", ",", "on_duplicate", "=", "\"replace\"", ",", "halt_on_error", "=", "False", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "f\"Could not load nodes msg: {e}\"", ")", "end_time5", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta_ms", "=", "f\"{(end_time5 - end_time4).total_seconds() * 1000:.1f}\"", "log", ".", "info", "(", "\"Timing - Load nodes into edgestore\"", ",", "delta_ms", "=", "delta_ms", ")" ]
Load edges into Edgestore
[ "Load", "edges", "into", "Edgestore" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/pipeline.py#L141-L215
belbio/bel
bel/edge/pipeline.py
edge_iterator
def edge_iterator(edges=[], edges_fn=None): """Yield documents from edge for loading into ArangoDB""" for edge in itertools.chain(edges, files.read_edges(edges_fn)): subj = copy.deepcopy(edge["edge"]["subject"]) subj_id = str(utils._create_hash_from_doc(subj)) subj["_key"] = subj_id obj = copy.deepcopy(edge["edge"]["object"]) obj_id = str(utils._create_hash_from_doc(obj)) obj["_key"] = obj_id relation = copy.deepcopy(edge["edge"]["relation"]) relation["_from"] = f"nodes/{subj_id}" relation["_to"] = f"nodes/{obj_id}" # Create edge _key relation_hash = copy.deepcopy(relation) relation_hash.pop("edge_dt", None) relation_hash.pop("edge_hash", None) relation_hash.pop("nanopub_dt", None) relation_hash.pop("nanopub_url", None) relation_hash.pop("subject_canon", None) relation_hash.pop("object_canon", None) relation_hash.pop("public_flag", None) relation_hash.pop("metadata", None) relation_id = str(utils._create_hash_from_doc(relation_hash)) relation["_key"] = relation_id if edge.get("nanopub_id", None): if "metadata" not in relation: relation["metadata"] = {} relation["metadata"]["nanopub_id"] = edge["nanopub_id"] yield ("nodes", subj) yield ("nodes", obj) yield ("edges", relation)
python
def edge_iterator(edges=[], edges_fn=None): """Yield documents from edge for loading into ArangoDB""" for edge in itertools.chain(edges, files.read_edges(edges_fn)): subj = copy.deepcopy(edge["edge"]["subject"]) subj_id = str(utils._create_hash_from_doc(subj)) subj["_key"] = subj_id obj = copy.deepcopy(edge["edge"]["object"]) obj_id = str(utils._create_hash_from_doc(obj)) obj["_key"] = obj_id relation = copy.deepcopy(edge["edge"]["relation"]) relation["_from"] = f"nodes/{subj_id}" relation["_to"] = f"nodes/{obj_id}" # Create edge _key relation_hash = copy.deepcopy(relation) relation_hash.pop("edge_dt", None) relation_hash.pop("edge_hash", None) relation_hash.pop("nanopub_dt", None) relation_hash.pop("nanopub_url", None) relation_hash.pop("subject_canon", None) relation_hash.pop("object_canon", None) relation_hash.pop("public_flag", None) relation_hash.pop("metadata", None) relation_id = str(utils._create_hash_from_doc(relation_hash)) relation["_key"] = relation_id if edge.get("nanopub_id", None): if "metadata" not in relation: relation["metadata"] = {} relation["metadata"]["nanopub_id"] = edge["nanopub_id"] yield ("nodes", subj) yield ("nodes", obj) yield ("edges", relation)
[ "def", "edge_iterator", "(", "edges", "=", "[", "]", ",", "edges_fn", "=", "None", ")", ":", "for", "edge", "in", "itertools", ".", "chain", "(", "edges", ",", "files", ".", "read_edges", "(", "edges_fn", ")", ")", ":", "subj", "=", "copy", ".", "deepcopy", "(", "edge", "[", "\"edge\"", "]", "[", "\"subject\"", "]", ")", "subj_id", "=", "str", "(", "utils", ".", "_create_hash_from_doc", "(", "subj", ")", ")", "subj", "[", "\"_key\"", "]", "=", "subj_id", "obj", "=", "copy", ".", "deepcopy", "(", "edge", "[", "\"edge\"", "]", "[", "\"object\"", "]", ")", "obj_id", "=", "str", "(", "utils", ".", "_create_hash_from_doc", "(", "obj", ")", ")", "obj", "[", "\"_key\"", "]", "=", "obj_id", "relation", "=", "copy", ".", "deepcopy", "(", "edge", "[", "\"edge\"", "]", "[", "\"relation\"", "]", ")", "relation", "[", "\"_from\"", "]", "=", "f\"nodes/{subj_id}\"", "relation", "[", "\"_to\"", "]", "=", "f\"nodes/{obj_id}\"", "# Create edge _key", "relation_hash", "=", "copy", ".", "deepcopy", "(", "relation", ")", "relation_hash", ".", "pop", "(", "\"edge_dt\"", ",", "None", ")", "relation_hash", ".", "pop", "(", "\"edge_hash\"", ",", "None", ")", "relation_hash", ".", "pop", "(", "\"nanopub_dt\"", ",", "None", ")", "relation_hash", ".", "pop", "(", "\"nanopub_url\"", ",", "None", ")", "relation_hash", ".", "pop", "(", "\"subject_canon\"", ",", "None", ")", "relation_hash", ".", "pop", "(", "\"object_canon\"", ",", "None", ")", "relation_hash", ".", "pop", "(", "\"public_flag\"", ",", "None", ")", "relation_hash", ".", "pop", "(", "\"metadata\"", ",", "None", ")", "relation_id", "=", "str", "(", "utils", ".", "_create_hash_from_doc", "(", "relation_hash", ")", ")", "relation", "[", "\"_key\"", "]", "=", "relation_id", "if", "edge", ".", "get", "(", "\"nanopub_id\"", ",", "None", ")", ":", "if", "\"metadata\"", "not", "in", "relation", ":", "relation", "[", "\"metadata\"", "]", "=", "{", "}", "relation", "[", "\"metadata\"", "]", "[", "\"nanopub_id\"", "]", "=", "edge", "[", "\"nanopub_id\"", "]", "yield", "(", "\"nodes\"", ",", "subj", ")", "yield", "(", "\"nodes\"", ",", "obj", ")", "yield", "(", "\"edges\"", ",", "relation", ")" ]
Yield documents from edge for loading into ArangoDB
[ "Yield", "documents", "from", "edge", "for", "loading", "into", "ArangoDB" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/pipeline.py#L218-L256
belbio/bel
bel/nanopub/nanopubstore.py
update_nanopubstore_start_dt
def update_nanopubstore_start_dt(url: str, start_dt: str): """Add nanopubstore start_dt to belapi.state_mgmt collection Args: url: url of nanopubstore start_dt: datetime of last query against nanopubstore for new ID's """ hostname = urllib.parse.urlsplit(url)[1] start_dates_doc = state_mgmt.get(start_dates_doc_key) if not start_dates_doc: start_dates_doc = { "_key": start_dates_doc_key, "start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}], } state_mgmt.insert(start_dates_doc) else: for idx, start_date in enumerate(start_dates_doc["start_dates"]): if start_date["nanopubstore"] == hostname: start_dates_doc["start_dates"][idx]["start_dt"] = start_dt break else: start_dates_doc["start_dates"].append( {"nanopubstore": hostname, "start_dt": start_dt} ) state_mgmt.replace(start_dates_doc)
python
def update_nanopubstore_start_dt(url: str, start_dt: str): """Add nanopubstore start_dt to belapi.state_mgmt collection Args: url: url of nanopubstore start_dt: datetime of last query against nanopubstore for new ID's """ hostname = urllib.parse.urlsplit(url)[1] start_dates_doc = state_mgmt.get(start_dates_doc_key) if not start_dates_doc: start_dates_doc = { "_key": start_dates_doc_key, "start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}], } state_mgmt.insert(start_dates_doc) else: for idx, start_date in enumerate(start_dates_doc["start_dates"]): if start_date["nanopubstore"] == hostname: start_dates_doc["start_dates"][idx]["start_dt"] = start_dt break else: start_dates_doc["start_dates"].append( {"nanopubstore": hostname, "start_dt": start_dt} ) state_mgmt.replace(start_dates_doc)
[ "def", "update_nanopubstore_start_dt", "(", "url", ":", "str", ",", "start_dt", ":", "str", ")", ":", "hostname", "=", "urllib", ".", "parse", ".", "urlsplit", "(", "url", ")", "[", "1", "]", "start_dates_doc", "=", "state_mgmt", ".", "get", "(", "start_dates_doc_key", ")", "if", "not", "start_dates_doc", ":", "start_dates_doc", "=", "{", "\"_key\"", ":", "start_dates_doc_key", ",", "\"start_dates\"", ":", "[", "{", "\"nanopubstore\"", ":", "hostname", ",", "\"start_dt\"", ":", "start_dt", "}", "]", ",", "}", "state_mgmt", ".", "insert", "(", "start_dates_doc", ")", "else", ":", "for", "idx", ",", "start_date", "in", "enumerate", "(", "start_dates_doc", "[", "\"start_dates\"", "]", ")", ":", "if", "start_date", "[", "\"nanopubstore\"", "]", "==", "hostname", ":", "start_dates_doc", "[", "\"start_dates\"", "]", "[", "idx", "]", "[", "\"start_dt\"", "]", "=", "start_dt", "break", "else", ":", "start_dates_doc", "[", "\"start_dates\"", "]", ".", "append", "(", "{", "\"nanopubstore\"", ":", "hostname", ",", "\"start_dt\"", ":", "start_dt", "}", ")", "state_mgmt", ".", "replace", "(", "start_dates_doc", ")" ]
Add nanopubstore start_dt to belapi.state_mgmt collection Args: url: url of nanopubstore start_dt: datetime of last query against nanopubstore for new ID's
[ "Add", "nanopubstore", "start_dt", "to", "belapi", ".", "state_mgmt", "collection" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L23-L50
belbio/bel
bel/nanopub/nanopubstore.py
get_nanopubstore_start_dt
def get_nanopubstore_start_dt(url: str): """Get last start_dt recorded for getting new nanopub ID's""" hostname = urllib.parse.urlsplit(url)[1] start_dates_doc = state_mgmt.get(start_dates_doc_key) if start_dates_doc and start_dates_doc.get("start_dates"): date = [ dt["start_dt"] for dt in start_dates_doc["start_dates"] if dt["nanopubstore"] == hostname ] log.info(f"Selected start_dt: {date} len: {len(date)}") if len(date) == 1: return date[0] return "1900-01-01T00:00:00.000Z"
python
def get_nanopubstore_start_dt(url: str): """Get last start_dt recorded for getting new nanopub ID's""" hostname = urllib.parse.urlsplit(url)[1] start_dates_doc = state_mgmt.get(start_dates_doc_key) if start_dates_doc and start_dates_doc.get("start_dates"): date = [ dt["start_dt"] for dt in start_dates_doc["start_dates"] if dt["nanopubstore"] == hostname ] log.info(f"Selected start_dt: {date} len: {len(date)}") if len(date) == 1: return date[0] return "1900-01-01T00:00:00.000Z"
[ "def", "get_nanopubstore_start_dt", "(", "url", ":", "str", ")", ":", "hostname", "=", "urllib", ".", "parse", ".", "urlsplit", "(", "url", ")", "[", "1", "]", "start_dates_doc", "=", "state_mgmt", ".", "get", "(", "start_dates_doc_key", ")", "if", "start_dates_doc", "and", "start_dates_doc", ".", "get", "(", "\"start_dates\"", ")", ":", "date", "=", "[", "dt", "[", "\"start_dt\"", "]", "for", "dt", "in", "start_dates_doc", "[", "\"start_dates\"", "]", "if", "dt", "[", "\"nanopubstore\"", "]", "==", "hostname", "]", "log", ".", "info", "(", "f\"Selected start_dt: {date} len: {len(date)}\"", ")", "if", "len", "(", "date", ")", "==", "1", ":", "return", "date", "[", "0", "]", "return", "\"1900-01-01T00:00:00.000Z\"" ]
Get last start_dt recorded for getting new nanopub ID's
[ "Get", "last", "start_dt", "recorded", "for", "getting", "new", "nanopub", "ID", "s" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L53-L69
belbio/bel
bel/nanopub/nanopubstore.py
get_nanopub_urls
def get_nanopub_urls(ns_root_url: str = None, start_dt: str = None) -> dict: """Get modified and deleted nanopub urls Limited by last datetime retrieved (start_dt). Modified includes new and updated nanopubs Returns: dict: {'modified': [], 'deleted': []} """ if not ns_root_url: ns_root_url = config["bel_api"]["servers"]["nanopubstore"] url = f"{ns_root_url}/nanopubs/timed" if not start_dt: start_dt = get_nanopubstore_start_dt(ns_root_url) params = {"startTime": start_dt, "published": True} # TODO - this is coming back without a status code in some cases - why? r = bel.utils.get_url(url, params=params, cache=False) if r and r.status_code == 200: data = r.json() new_start_dt = data["queryTime"] update_nanopubstore_start_dt(ns_root_url, new_start_dt) nanopub_urls = {"modified": [], "deleted": []} # Deleted nanopubs for nid in data["deleteddata"]: nanopub_urls["deleted"].append(f"{ns_root_url}/nanopubs/{nid}") # Modified nanopubs for nid in data["data"]: nanopub_urls["modified"].append(f"{ns_root_url}/nanopubs/{nid}") return nanopub_urls else: log.error( f"Bad request to Nanopubstore", url=url, status=r.status_code, type="api_request", ) return {}
python
def get_nanopub_urls(ns_root_url: str = None, start_dt: str = None) -> dict: """Get modified and deleted nanopub urls Limited by last datetime retrieved (start_dt). Modified includes new and updated nanopubs Returns: dict: {'modified': [], 'deleted': []} """ if not ns_root_url: ns_root_url = config["bel_api"]["servers"]["nanopubstore"] url = f"{ns_root_url}/nanopubs/timed" if not start_dt: start_dt = get_nanopubstore_start_dt(ns_root_url) params = {"startTime": start_dt, "published": True} # TODO - this is coming back without a status code in some cases - why? r = bel.utils.get_url(url, params=params, cache=False) if r and r.status_code == 200: data = r.json() new_start_dt = data["queryTime"] update_nanopubstore_start_dt(ns_root_url, new_start_dt) nanopub_urls = {"modified": [], "deleted": []} # Deleted nanopubs for nid in data["deleteddata"]: nanopub_urls["deleted"].append(f"{ns_root_url}/nanopubs/{nid}") # Modified nanopubs for nid in data["data"]: nanopub_urls["modified"].append(f"{ns_root_url}/nanopubs/{nid}") return nanopub_urls else: log.error( f"Bad request to Nanopubstore", url=url, status=r.status_code, type="api_request", ) return {}
[ "def", "get_nanopub_urls", "(", "ns_root_url", ":", "str", "=", "None", ",", "start_dt", ":", "str", "=", "None", ")", "->", "dict", ":", "if", "not", "ns_root_url", ":", "ns_root_url", "=", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", "[", "\"nanopubstore\"", "]", "url", "=", "f\"{ns_root_url}/nanopubs/timed\"", "if", "not", "start_dt", ":", "start_dt", "=", "get_nanopubstore_start_dt", "(", "ns_root_url", ")", "params", "=", "{", "\"startTime\"", ":", "start_dt", ",", "\"published\"", ":", "True", "}", "# TODO - this is coming back without a status code in some cases - why?", "r", "=", "bel", ".", "utils", ".", "get_url", "(", "url", ",", "params", "=", "params", ",", "cache", "=", "False", ")", "if", "r", "and", "r", ".", "status_code", "==", "200", ":", "data", "=", "r", ".", "json", "(", ")", "new_start_dt", "=", "data", "[", "\"queryTime\"", "]", "update_nanopubstore_start_dt", "(", "ns_root_url", ",", "new_start_dt", ")", "nanopub_urls", "=", "{", "\"modified\"", ":", "[", "]", ",", "\"deleted\"", ":", "[", "]", "}", "# Deleted nanopubs", "for", "nid", "in", "data", "[", "\"deleteddata\"", "]", ":", "nanopub_urls", "[", "\"deleted\"", "]", ".", "append", "(", "f\"{ns_root_url}/nanopubs/{nid}\"", ")", "# Modified nanopubs", "for", "nid", "in", "data", "[", "\"data\"", "]", ":", "nanopub_urls", "[", "\"modified\"", "]", ".", "append", "(", "f\"{ns_root_url}/nanopubs/{nid}\"", ")", "return", "nanopub_urls", "else", ":", "log", ".", "error", "(", "f\"Bad request to Nanopubstore\"", ",", "url", "=", "url", ",", "status", "=", "r", ".", "status_code", ",", "type", "=", "\"api_request\"", ",", ")", "return", "{", "}" ]
Get modified and deleted nanopub urls Limited by last datetime retrieved (start_dt). Modified includes new and updated nanopubs Returns: dict: {'modified': [], 'deleted': []}
[ "Get", "modified", "and", "deleted", "nanopub", "urls" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L72-L115
belbio/bel
bel/nanopub/nanopubstore.py
get_nanopub
def get_nanopub(url): """Get Nanopub from nanopubstore given url""" r = bel.utils.get_url(url, cache=False) if r and r.json(): return r.json() else: return {}
python
def get_nanopub(url): """Get Nanopub from nanopubstore given url""" r = bel.utils.get_url(url, cache=False) if r and r.json(): return r.json() else: return {}
[ "def", "get_nanopub", "(", "url", ")", ":", "r", "=", "bel", ".", "utils", ".", "get_url", "(", "url", ",", "cache", "=", "False", ")", "if", "r", "and", "r", ".", "json", "(", ")", ":", "return", "r", ".", "json", "(", ")", "else", ":", "return", "{", "}" ]
Get Nanopub from nanopubstore given url
[ "Get", "Nanopub", "from", "nanopubstore", "given", "url" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubstore.py#L118-L125
belbio/bel
bel/scripts.py
pipeline
def pipeline( ctx, input_fn, db_save, db_delete, output_fn, rules, species, namespace_targets, version, api, config_fn, ): """BEL Pipeline - BEL Nanopubs into BEL Edges This will process BEL Nanopubs into BEL Edges by validating, orthologizing (if requested), canonicalizing, and then computing the BEL Edges based on the given rule_set. \b input_fn: If input fn has *.gz, will read as a gzip file If input fn has *.jsonl*, will parsed as a JSONLines file IF input fn has *.json*, will be parsed as a JSON file If input fn has *.yaml* or *.yml*, will be parsed as a YAML file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file If output fn has *.jgf, will be written as JSON Graph Formatted file """ if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) if rules: rules = rules.replace(" ", "").split(",") namespace_targets = utils.first_true( [namespace_targets, config["bel"]["lang"].get("canonical")], None ) rules = utils.first_true( [rules, config["bel"]["nanopub"].get("pipeline_edge_rules", False)], False ) api = utils.first_true( [api, config["bel_api"]["servers"].get("api_url", None)], None ) version = utils.first_true( [version, config["bel"]["lang"].get("default_bel_version", None)], None ) n = bnn.Nanopub() try: json_flag, jsonl_flag, yaml_flag, jgf_flag = False, False, False, False all_bel_edges = [] fout = None if db_save or db_delete: if db_delete: arango_client = bel.db.arangodb.get_client() bel.db.arangodb.delete_database(arango_client, "edgestore") else: arango_client = bel.db.arangodb.get_client() edgestore_handle = bel.db.arangodb.get_edgestore_handle(arango_client) elif re.search("ya?ml", output_fn): yaml_flag = True elif "jsonl" in output_fn: jsonl_flag = True elif "json" in output_fn: json_flag = True elif "jgf" in output_fn: jgf_flag = True if db_save: pass elif "gz" in output_fn: fout = gzip.open(output_fn, "wt") else: fout = open(output_fn, "wt") nanopub_cnt = 0 with timy.Timer() as timer: for np in bnf.read_nanopubs(input_fn): # print('Nanopub:\n', json.dumps(np, indent=4)) nanopub_cnt += 1 if nanopub_cnt % 100 == 0: timer.track(f"{nanopub_cnt} Nanopubs processed into Edges") bel_edges = n.bel_edges( np, namespace_targets=namespace_targets, orthologize_target=species, rules=rules, ) if db_save: bel.edge.edges.load_edges_into_db(edgestore_handle, edges=bel_edges) elif jsonl_flag: fout.write("{}\n".format(json.dumps(bel_edges))) else: all_bel_edges.extend(bel_edges) if db_save: pass elif yaml_flag: fout.write("{}\n".format(yaml.dumps(all_bel_edges))) elif json_flag: fout.write("{}\n".format(json.dumps(all_bel_edges))) elif jgf_flag: bnf.edges_to_jgf(output_fn, all_bel_edges) finally: if fout: fout.close()
python
def pipeline( ctx, input_fn, db_save, db_delete, output_fn, rules, species, namespace_targets, version, api, config_fn, ): """BEL Pipeline - BEL Nanopubs into BEL Edges This will process BEL Nanopubs into BEL Edges by validating, orthologizing (if requested), canonicalizing, and then computing the BEL Edges based on the given rule_set. \b input_fn: If input fn has *.gz, will read as a gzip file If input fn has *.jsonl*, will parsed as a JSONLines file IF input fn has *.json*, will be parsed as a JSON file If input fn has *.yaml* or *.yml*, will be parsed as a YAML file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file If output fn has *.jgf, will be written as JSON Graph Formatted file """ if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) if rules: rules = rules.replace(" ", "").split(",") namespace_targets = utils.first_true( [namespace_targets, config["bel"]["lang"].get("canonical")], None ) rules = utils.first_true( [rules, config["bel"]["nanopub"].get("pipeline_edge_rules", False)], False ) api = utils.first_true( [api, config["bel_api"]["servers"].get("api_url", None)], None ) version = utils.first_true( [version, config["bel"]["lang"].get("default_bel_version", None)], None ) n = bnn.Nanopub() try: json_flag, jsonl_flag, yaml_flag, jgf_flag = False, False, False, False all_bel_edges = [] fout = None if db_save or db_delete: if db_delete: arango_client = bel.db.arangodb.get_client() bel.db.arangodb.delete_database(arango_client, "edgestore") else: arango_client = bel.db.arangodb.get_client() edgestore_handle = bel.db.arangodb.get_edgestore_handle(arango_client) elif re.search("ya?ml", output_fn): yaml_flag = True elif "jsonl" in output_fn: jsonl_flag = True elif "json" in output_fn: json_flag = True elif "jgf" in output_fn: jgf_flag = True if db_save: pass elif "gz" in output_fn: fout = gzip.open(output_fn, "wt") else: fout = open(output_fn, "wt") nanopub_cnt = 0 with timy.Timer() as timer: for np in bnf.read_nanopubs(input_fn): # print('Nanopub:\n', json.dumps(np, indent=4)) nanopub_cnt += 1 if nanopub_cnt % 100 == 0: timer.track(f"{nanopub_cnt} Nanopubs processed into Edges") bel_edges = n.bel_edges( np, namespace_targets=namespace_targets, orthologize_target=species, rules=rules, ) if db_save: bel.edge.edges.load_edges_into_db(edgestore_handle, edges=bel_edges) elif jsonl_flag: fout.write("{}\n".format(json.dumps(bel_edges))) else: all_bel_edges.extend(bel_edges) if db_save: pass elif yaml_flag: fout.write("{}\n".format(yaml.dumps(all_bel_edges))) elif json_flag: fout.write("{}\n".format(json.dumps(all_bel_edges))) elif jgf_flag: bnf.edges_to_jgf(output_fn, all_bel_edges) finally: if fout: fout.close()
[ "def", "pipeline", "(", "ctx", ",", "input_fn", ",", "db_save", ",", "db_delete", ",", "output_fn", ",", "rules", ",", "species", ",", "namespace_targets", ",", "version", ",", "api", ",", "config_fn", ",", ")", ":", "if", "config_fn", ":", "config", "=", "bel", ".", "db", ".", "Config", ".", "merge_config", "(", "ctx", ".", "config", ",", "override_config_fn", "=", "config_fn", ")", "else", ":", "config", "=", "ctx", ".", "config", "# Configuration - will return the first truthy result in list else the default option", "if", "namespace_targets", ":", "namespace_targets", "=", "json", ".", "loads", "(", "namespace_targets", ")", "if", "rules", ":", "rules", "=", "rules", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "split", "(", "\",\"", ")", "namespace_targets", "=", "utils", ".", "first_true", "(", "[", "namespace_targets", ",", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", ".", "get", "(", "\"canonical\"", ")", "]", ",", "None", ")", "rules", "=", "utils", ".", "first_true", "(", "[", "rules", ",", "config", "[", "\"bel\"", "]", "[", "\"nanopub\"", "]", ".", "get", "(", "\"pipeline_edge_rules\"", ",", "False", ")", "]", ",", "False", ")", "api", "=", "utils", ".", "first_true", "(", "[", "api", ",", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", ".", "get", "(", "\"api_url\"", ",", "None", ")", "]", ",", "None", ")", "version", "=", "utils", ".", "first_true", "(", "[", "version", ",", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", ".", "get", "(", "\"default_bel_version\"", ",", "None", ")", "]", ",", "None", ")", "n", "=", "bnn", ".", "Nanopub", "(", ")", "try", ":", "json_flag", ",", "jsonl_flag", ",", "yaml_flag", ",", "jgf_flag", "=", "False", ",", "False", ",", "False", ",", "False", "all_bel_edges", "=", "[", "]", "fout", "=", "None", "if", "db_save", "or", "db_delete", ":", "if", "db_delete", ":", "arango_client", "=", "bel", ".", "db", ".", "arangodb", ".", "get_client", "(", ")", "bel", ".", "db", ".", "arangodb", ".", "delete_database", "(", "arango_client", ",", "\"edgestore\"", ")", "else", ":", "arango_client", "=", "bel", ".", "db", ".", "arangodb", ".", "get_client", "(", ")", "edgestore_handle", "=", "bel", ".", "db", ".", "arangodb", ".", "get_edgestore_handle", "(", "arango_client", ")", "elif", "re", ".", "search", "(", "\"ya?ml\"", ",", "output_fn", ")", ":", "yaml_flag", "=", "True", "elif", "\"jsonl\"", "in", "output_fn", ":", "jsonl_flag", "=", "True", "elif", "\"json\"", "in", "output_fn", ":", "json_flag", "=", "True", "elif", "\"jgf\"", "in", "output_fn", ":", "jgf_flag", "=", "True", "if", "db_save", ":", "pass", "elif", "\"gz\"", "in", "output_fn", ":", "fout", "=", "gzip", ".", "open", "(", "output_fn", ",", "\"wt\"", ")", "else", ":", "fout", "=", "open", "(", "output_fn", ",", "\"wt\"", ")", "nanopub_cnt", "=", "0", "with", "timy", ".", "Timer", "(", ")", "as", "timer", ":", "for", "np", "in", "bnf", ".", "read_nanopubs", "(", "input_fn", ")", ":", "# print('Nanopub:\\n', json.dumps(np, indent=4))", "nanopub_cnt", "+=", "1", "if", "nanopub_cnt", "%", "100", "==", "0", ":", "timer", ".", "track", "(", "f\"{nanopub_cnt} Nanopubs processed into Edges\"", ")", "bel_edges", "=", "n", ".", "bel_edges", "(", "np", ",", "namespace_targets", "=", "namespace_targets", ",", "orthologize_target", "=", "species", ",", "rules", "=", "rules", ",", ")", "if", "db_save", ":", "bel", ".", "edge", ".", "edges", ".", "load_edges_into_db", "(", "edgestore_handle", ",", "edges", "=", "bel_edges", ")", "elif", "jsonl_flag", ":", "fout", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "json", ".", "dumps", "(", "bel_edges", ")", ")", ")", "else", ":", "all_bel_edges", ".", "extend", "(", "bel_edges", ")", "if", "db_save", ":", "pass", "elif", "yaml_flag", ":", "fout", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "yaml", ".", "dumps", "(", "all_bel_edges", ")", ")", ")", "elif", "json_flag", ":", "fout", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "json", ".", "dumps", "(", "all_bel_edges", ")", ")", ")", "elif", "jgf_flag", ":", "bnf", ".", "edges_to_jgf", "(", "output_fn", ",", "all_bel_edges", ")", "finally", ":", "if", "fout", ":", "fout", ".", "close", "(", ")" ]
BEL Pipeline - BEL Nanopubs into BEL Edges This will process BEL Nanopubs into BEL Edges by validating, orthologizing (if requested), canonicalizing, and then computing the BEL Edges based on the given rule_set. \b input_fn: If input fn has *.gz, will read as a gzip file If input fn has *.jsonl*, will parsed as a JSONLines file IF input fn has *.json*, will be parsed as a JSON file If input fn has *.yaml* or *.yml*, will be parsed as a YAML file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file If output fn has *.jgf, will be written as JSON Graph Formatted file
[ "BEL", "Pipeline", "-", "BEL", "Nanopubs", "into", "BEL", "Edges" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L86-L210
belbio/bel
bel/scripts.py
nanopub_validate
def nanopub_validate(ctx, input_fn, output_fn, api, config_fn): """Validate nanopubs""" if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config api = utils.first_true( [api, config["bel_api"]["servers"].get("api_url", None)], None ) print(f"Running validate nanopubs using {api}")
python
def nanopub_validate(ctx, input_fn, output_fn, api, config_fn): """Validate nanopubs""" if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config api = utils.first_true( [api, config["bel_api"]["servers"].get("api_url", None)], None ) print(f"Running validate nanopubs using {api}")
[ "def", "nanopub_validate", "(", "ctx", ",", "input_fn", ",", "output_fn", ",", "api", ",", "config_fn", ")", ":", "if", "config_fn", ":", "config", "=", "bel", ".", "db", ".", "Config", ".", "merge_config", "(", "ctx", ".", "config", ",", "override_config_fn", "=", "config_fn", ")", "else", ":", "config", "=", "ctx", ".", "config", "api", "=", "utils", ".", "first_true", "(", "[", "api", ",", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", ".", "get", "(", "\"api_url\"", ",", "None", ")", "]", ",", "None", ")", "print", "(", "f\"Running validate nanopubs using {api}\"", ")" ]
Validate nanopubs
[ "Validate", "nanopubs" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L227-L239
belbio/bel
bel/scripts.py
convert_belscript
def convert_belscript(ctx, input_fn, output_fn): """Convert belscript to nanopubs_bel format This will convert the OpenBEL BELScript file format to nanopub_bel-1.0.0 format. \b input_fn: If input fn has *.gz, will read as a gzip file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file """ try: ( out_fh, yaml_flag, jsonl_flag, json_flag, ) = bel.nanopub.files.create_nanopubs_fh(output_fn) if yaml_flag or json_flag: docs = [] # input file if re.search("gz$", input_fn): f = gzip.open(input_fn, "rt") else: f = open(input_fn, "rt") # process belscript for doc in bel.nanopub.belscripts.parse_belscript(f): if yaml_flag or json_flag: docs.append(doc) elif jsonl_flag: out_fh.write("{}\n".format(json.dumps(doc))) if yaml_flag: yaml.dump(docs, out_fh) elif json_flag: json.dump(docs, out_fh, indent=4) finally: f.close() out_fh.close()
python
def convert_belscript(ctx, input_fn, output_fn): """Convert belscript to nanopubs_bel format This will convert the OpenBEL BELScript file format to nanopub_bel-1.0.0 format. \b input_fn: If input fn has *.gz, will read as a gzip file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file """ try: ( out_fh, yaml_flag, jsonl_flag, json_flag, ) = bel.nanopub.files.create_nanopubs_fh(output_fn) if yaml_flag or json_flag: docs = [] # input file if re.search("gz$", input_fn): f = gzip.open(input_fn, "rt") else: f = open(input_fn, "rt") # process belscript for doc in bel.nanopub.belscripts.parse_belscript(f): if yaml_flag or json_flag: docs.append(doc) elif jsonl_flag: out_fh.write("{}\n".format(json.dumps(doc))) if yaml_flag: yaml.dump(docs, out_fh) elif json_flag: json.dump(docs, out_fh, indent=4) finally: f.close() out_fh.close()
[ "def", "convert_belscript", "(", "ctx", ",", "input_fn", ",", "output_fn", ")", ":", "try", ":", "(", "out_fh", ",", "yaml_flag", ",", "jsonl_flag", ",", "json_flag", ",", ")", "=", "bel", ".", "nanopub", ".", "files", ".", "create_nanopubs_fh", "(", "output_fn", ")", "if", "yaml_flag", "or", "json_flag", ":", "docs", "=", "[", "]", "# input file", "if", "re", ".", "search", "(", "\"gz$\"", ",", "input_fn", ")", ":", "f", "=", "gzip", ".", "open", "(", "input_fn", ",", "\"rt\"", ")", "else", ":", "f", "=", "open", "(", "input_fn", ",", "\"rt\"", ")", "# process belscript", "for", "doc", "in", "bel", ".", "nanopub", ".", "belscripts", ".", "parse_belscript", "(", "f", ")", ":", "if", "yaml_flag", "or", "json_flag", ":", "docs", ".", "append", "(", "doc", ")", "elif", "jsonl_flag", ":", "out_fh", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "json", ".", "dumps", "(", "doc", ")", ")", ")", "if", "yaml_flag", ":", "yaml", ".", "dump", "(", "docs", ",", "out_fh", ")", "elif", "json_flag", ":", "json", ".", "dump", "(", "docs", ",", "out_fh", ",", "indent", "=", "4", ")", "finally", ":", "f", ".", "close", "(", ")", "out_fh", ".", "close", "(", ")" ]
Convert belscript to nanopubs_bel format This will convert the OpenBEL BELScript file format to nanopub_bel-1.0.0 format. \b input_fn: If input fn has *.gz, will read as a gzip file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file
[ "Convert", "belscript", "to", "nanopubs_bel", "format" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L252-L302
belbio/bel
bel/scripts.py
reformat
def reformat(ctx, input_fn, output_fn): """Reformat between JSON, YAML, JSONLines formats \b input_fn: If input fn has *.gz, will read as a gzip file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file """ try: ( out_fh, yaml_flag, jsonl_flag, json_flag, ) = bel.nanopub.files.create_nanopubs_fh(output_fn) if yaml_flag or json_flag: docs = [] # input file if re.search("gz$", input_fn): f = gzip.open(input_fn, "rt") else: f = open(input_fn, "rt") for np in bnf.read_nanopubs(input_fn): if yaml_flag or json_flag: docs.append(np) elif jsonl_flag: out_fh.write("{}\n".format(json.dumps(np))) if yaml_flag: yaml.dump(docs, out_fh) elif json_flag: json.dump(docs, out_fh, indent=4) finally: f.close() out_fh.close()
python
def reformat(ctx, input_fn, output_fn): """Reformat between JSON, YAML, JSONLines formats \b input_fn: If input fn has *.gz, will read as a gzip file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file """ try: ( out_fh, yaml_flag, jsonl_flag, json_flag, ) = bel.nanopub.files.create_nanopubs_fh(output_fn) if yaml_flag or json_flag: docs = [] # input file if re.search("gz$", input_fn): f = gzip.open(input_fn, "rt") else: f = open(input_fn, "rt") for np in bnf.read_nanopubs(input_fn): if yaml_flag or json_flag: docs.append(np) elif jsonl_flag: out_fh.write("{}\n".format(json.dumps(np))) if yaml_flag: yaml.dump(docs, out_fh) elif json_flag: json.dump(docs, out_fh, indent=4) finally: f.close() out_fh.close()
[ "def", "reformat", "(", "ctx", ",", "input_fn", ",", "output_fn", ")", ":", "try", ":", "(", "out_fh", ",", "yaml_flag", ",", "jsonl_flag", ",", "json_flag", ",", ")", "=", "bel", ".", "nanopub", ".", "files", ".", "create_nanopubs_fh", "(", "output_fn", ")", "if", "yaml_flag", "or", "json_flag", ":", "docs", "=", "[", "]", "# input file", "if", "re", ".", "search", "(", "\"gz$\"", ",", "input_fn", ")", ":", "f", "=", "gzip", ".", "open", "(", "input_fn", ",", "\"rt\"", ")", "else", ":", "f", "=", "open", "(", "input_fn", ",", "\"rt\"", ")", "for", "np", "in", "bnf", ".", "read_nanopubs", "(", "input_fn", ")", ":", "if", "yaml_flag", "or", "json_flag", ":", "docs", ".", "append", "(", "np", ")", "elif", "jsonl_flag", ":", "out_fh", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "json", ".", "dumps", "(", "np", ")", ")", ")", "if", "yaml_flag", ":", "yaml", ".", "dump", "(", "docs", ",", "out_fh", ")", "elif", "json_flag", ":", "json", ".", "dump", "(", "docs", ",", "out_fh", ",", "indent", "=", "4", ")", "finally", ":", "f", ".", "close", "(", ")", "out_fh", ".", "close", "(", ")" ]
Reformat between JSON, YAML, JSONLines formats \b input_fn: If input fn has *.gz, will read as a gzip file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file
[ "Reformat", "between", "JSON", "YAML", "JSONLines", "formats" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L309-L355
belbio/bel
bel/scripts.py
nanopub_stats
def nanopub_stats(ctx, input_fn): """Collect statistics on nanopub file input_fn can be json, jsonl or yaml and additionally gzipped """ counts = { "nanopubs": 0, "assertions": {"total": 0, "subject_only": 0, "nested": 0, "relations": {}}, } for np in bnf.read_nanopubs(input_fn): if "nanopub" in np: counts["nanopubs"] += 1 counts["assertions"]["total"] += len(np["nanopub"]["assertions"]) for assertion in np["nanopub"]["assertions"]: if assertion["relation"] is None: counts["assertions"]["subject_only"] += 1 else: if re.match("\s*\(", assertion["object"]): counts["assertions"]["nested"] += 1 if ( not assertion.get("relation") in counts["assertions"]["relations"] ): counts["assertions"]["relations"][assertion.get("relation")] = 1 else: counts["assertions"]["relations"][ assertion.get("relation") ] += 1 counts["assertions"]["relations"] = sorted(counts["assertions"]["relations"]) print("DumpVar:\n", json.dumps(counts, indent=4))
python
def nanopub_stats(ctx, input_fn): """Collect statistics on nanopub file input_fn can be json, jsonl or yaml and additionally gzipped """ counts = { "nanopubs": 0, "assertions": {"total": 0, "subject_only": 0, "nested": 0, "relations": {}}, } for np in bnf.read_nanopubs(input_fn): if "nanopub" in np: counts["nanopubs"] += 1 counts["assertions"]["total"] += len(np["nanopub"]["assertions"]) for assertion in np["nanopub"]["assertions"]: if assertion["relation"] is None: counts["assertions"]["subject_only"] += 1 else: if re.match("\s*\(", assertion["object"]): counts["assertions"]["nested"] += 1 if ( not assertion.get("relation") in counts["assertions"]["relations"] ): counts["assertions"]["relations"][assertion.get("relation")] = 1 else: counts["assertions"]["relations"][ assertion.get("relation") ] += 1 counts["assertions"]["relations"] = sorted(counts["assertions"]["relations"]) print("DumpVar:\n", json.dumps(counts, indent=4))
[ "def", "nanopub_stats", "(", "ctx", ",", "input_fn", ")", ":", "counts", "=", "{", "\"nanopubs\"", ":", "0", ",", "\"assertions\"", ":", "{", "\"total\"", ":", "0", ",", "\"subject_only\"", ":", "0", ",", "\"nested\"", ":", "0", ",", "\"relations\"", ":", "{", "}", "}", ",", "}", "for", "np", "in", "bnf", ".", "read_nanopubs", "(", "input_fn", ")", ":", "if", "\"nanopub\"", "in", "np", ":", "counts", "[", "\"nanopubs\"", "]", "+=", "1", "counts", "[", "\"assertions\"", "]", "[", "\"total\"", "]", "+=", "len", "(", "np", "[", "\"nanopub\"", "]", "[", "\"assertions\"", "]", ")", "for", "assertion", "in", "np", "[", "\"nanopub\"", "]", "[", "\"assertions\"", "]", ":", "if", "assertion", "[", "\"relation\"", "]", "is", "None", ":", "counts", "[", "\"assertions\"", "]", "[", "\"subject_only\"", "]", "+=", "1", "else", ":", "if", "re", ".", "match", "(", "\"\\s*\\(\"", ",", "assertion", "[", "\"object\"", "]", ")", ":", "counts", "[", "\"assertions\"", "]", "[", "\"nested\"", "]", "+=", "1", "if", "(", "not", "assertion", ".", "get", "(", "\"relation\"", ")", "in", "counts", "[", "\"assertions\"", "]", "[", "\"relations\"", "]", ")", ":", "counts", "[", "\"assertions\"", "]", "[", "\"relations\"", "]", "[", "assertion", ".", "get", "(", "\"relation\"", ")", "]", "=", "1", "else", ":", "counts", "[", "\"assertions\"", "]", "[", "\"relations\"", "]", "[", "assertion", ".", "get", "(", "\"relation\"", ")", "]", "+=", "1", "counts", "[", "\"assertions\"", "]", "[", "\"relations\"", "]", "=", "sorted", "(", "counts", "[", "\"assertions\"", "]", "[", "\"relations\"", "]", ")", "print", "(", "\"DumpVar:\\n\"", ",", "json", ".", "dumps", "(", "counts", ",", "indent", "=", "4", ")", ")" ]
Collect statistics on nanopub file input_fn can be json, jsonl or yaml and additionally gzipped
[ "Collect", "statistics", "on", "nanopub", "file" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L361-L395
belbio/bel
bel/scripts.py
canonicalize
def canonicalize(ctx, statement, namespace_targets, version, api, config_fn): """Canonicalize statement Target namespaces can be provided in the following manner: bel stmt canonicalize "<BELStmt>" --namespace_targets '{"HGNC": ["EG", "SP"], "CHEMBL": ["CHEBI"]}' the value of target_namespaces must be JSON and embedded in single quotes reserving double quotes for the dictionary elements """ if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) namespace_targets = utils.first_true( [namespace_targets, config.get("canonical")], None ) api = utils.first_true([api, config.get("api", None)], None) version = utils.first_true([version, config.get("bel_version", None)], None) print("------------------------------") print("BEL version: {}".format(version)) print("API Endpoint: {}".format(api)) print("------------------------------") bo = BEL(version=version, endpoint=api) bo.parse(statement).canonicalize(namespace_targets=namespace_targets) if bo.ast is None: print(bo.original_bel_stmt) print(bo.parse_visualize_error) print(bo.validation_messages) else: print("ORIGINAL ", bo.original_bel_stmt) print("CANONICAL", bo.ast) if bo.validation_messages: print(bo.validation_messages) else: print("No problems found") return
python
def canonicalize(ctx, statement, namespace_targets, version, api, config_fn): """Canonicalize statement Target namespaces can be provided in the following manner: bel stmt canonicalize "<BELStmt>" --namespace_targets '{"HGNC": ["EG", "SP"], "CHEMBL": ["CHEBI"]}' the value of target_namespaces must be JSON and embedded in single quotes reserving double quotes for the dictionary elements """ if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) namespace_targets = utils.first_true( [namespace_targets, config.get("canonical")], None ) api = utils.first_true([api, config.get("api", None)], None) version = utils.first_true([version, config.get("bel_version", None)], None) print("------------------------------") print("BEL version: {}".format(version)) print("API Endpoint: {}".format(api)) print("------------------------------") bo = BEL(version=version, endpoint=api) bo.parse(statement).canonicalize(namespace_targets=namespace_targets) if bo.ast is None: print(bo.original_bel_stmt) print(bo.parse_visualize_error) print(bo.validation_messages) else: print("ORIGINAL ", bo.original_bel_stmt) print("CANONICAL", bo.ast) if bo.validation_messages: print(bo.validation_messages) else: print("No problems found") return
[ "def", "canonicalize", "(", "ctx", ",", "statement", ",", "namespace_targets", ",", "version", ",", "api", ",", "config_fn", ")", ":", "if", "config_fn", ":", "config", "=", "bel", ".", "db", ".", "Config", ".", "merge_config", "(", "ctx", ".", "config", ",", "override_config_fn", "=", "config_fn", ")", "else", ":", "config", "=", "ctx", ".", "config", "# Configuration - will return the first truthy result in list else the default option", "if", "namespace_targets", ":", "namespace_targets", "=", "json", ".", "loads", "(", "namespace_targets", ")", "namespace_targets", "=", "utils", ".", "first_true", "(", "[", "namespace_targets", ",", "config", ".", "get", "(", "\"canonical\"", ")", "]", ",", "None", ")", "api", "=", "utils", ".", "first_true", "(", "[", "api", ",", "config", ".", "get", "(", "\"api\"", ",", "None", ")", "]", ",", "None", ")", "version", "=", "utils", ".", "first_true", "(", "[", "version", ",", "config", ".", "get", "(", "\"bel_version\"", ",", "None", ")", "]", ",", "None", ")", "print", "(", "\"------------------------------\"", ")", "print", "(", "\"BEL version: {}\"", ".", "format", "(", "version", ")", ")", "print", "(", "\"API Endpoint: {}\"", ".", "format", "(", "api", ")", ")", "print", "(", "\"------------------------------\"", ")", "bo", "=", "BEL", "(", "version", "=", "version", ",", "endpoint", "=", "api", ")", "bo", ".", "parse", "(", "statement", ")", ".", "canonicalize", "(", "namespace_targets", "=", "namespace_targets", ")", "if", "bo", ".", "ast", "is", "None", ":", "print", "(", "bo", ".", "original_bel_stmt", ")", "print", "(", "bo", ".", "parse_visualize_error", ")", "print", "(", "bo", ".", "validation_messages", ")", "else", ":", "print", "(", "\"ORIGINAL \"", ",", "bo", ".", "original_bel_stmt", ")", "print", "(", "\"CANONICAL\"", ",", "bo", ".", "ast", ")", "if", "bo", ".", "validation_messages", ":", "print", "(", "bo", ".", "validation_messages", ")", "else", ":", "print", "(", "\"No problems found\"", ")", "return" ]
Canonicalize statement Target namespaces can be provided in the following manner: bel stmt canonicalize "<BELStmt>" --namespace_targets '{"HGNC": ["EG", "SP"], "CHEMBL": ["CHEBI"]}' the value of target_namespaces must be JSON and embedded in single quotes reserving double quotes for the dictionary elements
[ "Canonicalize", "statement" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L464-L508
belbio/bel
bel/scripts.py
edges
def edges(ctx, statement, rules, species, namespace_targets, version, api, config_fn): """Create BEL Edges from BEL Statement""" if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) if rules: rules = rules.replace(" ", "").split(",") namespace_targets = utils.first_true( [namespace_targets, config["bel"]["lang"].get("canonical")], None ) api_url = utils.first_true( [api, config["bel_api"]["servers"].get("api_url", None)], None ) version = utils.first_true( [version, config["bel"]["lang"].get("default_bel_version", None)], None ) print("------------------------------") print("BEL version: {}".format(version)) print("API Endpoint: {}".format(api)) print("------------------------------") bo = BEL(version=version, endpoint=api_url) if species: edges = ( bo.parse(statement) .orthologize(species) .canonicalize(namespace_targets=namespace_targets) .compute_edges(rules=rules) ) else: edges = ( bo.parse(statement) .canonicalize(namespace_targets=namespace_targets) .compute_edges(rules=rules) ) if edges is None: print(bo.original_bel_stmt) print(bo.parse_visualize_error) print(bo.validation_messages) else: print(json.dumps(edges, indent=4)) if bo.validation_messages: print(bo.validation_messages) else: print("No problems found") return
python
def edges(ctx, statement, rules, species, namespace_targets, version, api, config_fn): """Create BEL Edges from BEL Statement""" if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) if rules: rules = rules.replace(" ", "").split(",") namespace_targets = utils.first_true( [namespace_targets, config["bel"]["lang"].get("canonical")], None ) api_url = utils.first_true( [api, config["bel_api"]["servers"].get("api_url", None)], None ) version = utils.first_true( [version, config["bel"]["lang"].get("default_bel_version", None)], None ) print("------------------------------") print("BEL version: {}".format(version)) print("API Endpoint: {}".format(api)) print("------------------------------") bo = BEL(version=version, endpoint=api_url) if species: edges = ( bo.parse(statement) .orthologize(species) .canonicalize(namespace_targets=namespace_targets) .compute_edges(rules=rules) ) else: edges = ( bo.parse(statement) .canonicalize(namespace_targets=namespace_targets) .compute_edges(rules=rules) ) if edges is None: print(bo.original_bel_stmt) print(bo.parse_visualize_error) print(bo.validation_messages) else: print(json.dumps(edges, indent=4)) if bo.validation_messages: print(bo.validation_messages) else: print("No problems found") return
[ "def", "edges", "(", "ctx", ",", "statement", ",", "rules", ",", "species", ",", "namespace_targets", ",", "version", ",", "api", ",", "config_fn", ")", ":", "if", "config_fn", ":", "config", "=", "bel", ".", "db", ".", "Config", ".", "merge_config", "(", "ctx", ".", "config", ",", "override_config_fn", "=", "config_fn", ")", "else", ":", "config", "=", "ctx", ".", "config", "# Configuration - will return the first truthy result in list else the default option", "if", "namespace_targets", ":", "namespace_targets", "=", "json", ".", "loads", "(", "namespace_targets", ")", "if", "rules", ":", "rules", "=", "rules", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "split", "(", "\",\"", ")", "namespace_targets", "=", "utils", ".", "first_true", "(", "[", "namespace_targets", ",", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", ".", "get", "(", "\"canonical\"", ")", "]", ",", "None", ")", "api_url", "=", "utils", ".", "first_true", "(", "[", "api", ",", "config", "[", "\"bel_api\"", "]", "[", "\"servers\"", "]", ".", "get", "(", "\"api_url\"", ",", "None", ")", "]", ",", "None", ")", "version", "=", "utils", ".", "first_true", "(", "[", "version", ",", "config", "[", "\"bel\"", "]", "[", "\"lang\"", "]", ".", "get", "(", "\"default_bel_version\"", ",", "None", ")", "]", ",", "None", ")", "print", "(", "\"------------------------------\"", ")", "print", "(", "\"BEL version: {}\"", ".", "format", "(", "version", ")", ")", "print", "(", "\"API Endpoint: {}\"", ".", "format", "(", "api", ")", ")", "print", "(", "\"------------------------------\"", ")", "bo", "=", "BEL", "(", "version", "=", "version", ",", "endpoint", "=", "api_url", ")", "if", "species", ":", "edges", "=", "(", "bo", ".", "parse", "(", "statement", ")", ".", "orthologize", "(", "species", ")", ".", "canonicalize", "(", "namespace_targets", "=", "namespace_targets", ")", ".", "compute_edges", "(", "rules", "=", "rules", ")", ")", "else", ":", "edges", "=", "(", "bo", ".", "parse", "(", "statement", ")", ".", "canonicalize", "(", "namespace_targets", "=", "namespace_targets", ")", ".", "compute_edges", "(", "rules", "=", "rules", ")", ")", "if", "edges", "is", "None", ":", "print", "(", "bo", ".", "original_bel_stmt", ")", "print", "(", "bo", ".", "parse_visualize_error", ")", "print", "(", "bo", ".", "validation_messages", ")", "else", ":", "print", "(", "json", ".", "dumps", "(", "edges", ",", "indent", "=", "4", ")", ")", "if", "bo", ".", "validation_messages", ":", "print", "(", "bo", ".", "validation_messages", ")", "else", ":", "print", "(", "\"No problems found\"", ")", "return" ]
Create BEL Edges from BEL Statement
[ "Create", "BEL", "Edges", "from", "BEL", "Statement" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L582-L637
belbio/bel
bel/scripts.py
elasticsearch
def elasticsearch(delete, index_name): """Setup Elasticsearch namespace indexes This will by default only create the indexes and run the namespace index mapping if the indexes don't exist. The --delete option will force removal of the index if it exists. The index_name should be aliased to the index 'terms' when it's ready""" if delete: bel.db.elasticsearch.get_client(delete=True) else: bel.db.elasticsearch.get_client()
python
def elasticsearch(delete, index_name): """Setup Elasticsearch namespace indexes This will by default only create the indexes and run the namespace index mapping if the indexes don't exist. The --delete option will force removal of the index if it exists. The index_name should be aliased to the index 'terms' when it's ready""" if delete: bel.db.elasticsearch.get_client(delete=True) else: bel.db.elasticsearch.get_client()
[ "def", "elasticsearch", "(", "delete", ",", "index_name", ")", ":", "if", "delete", ":", "bel", ".", "db", ".", "elasticsearch", ".", "get_client", "(", "delete", "=", "True", ")", "else", ":", "bel", ".", "db", ".", "elasticsearch", ".", "get_client", "(", ")" ]
Setup Elasticsearch namespace indexes This will by default only create the indexes and run the namespace index mapping if the indexes don't exist. The --delete option will force removal of the index if it exists. The index_name should be aliased to the index 'terms' when it's ready
[ "Setup", "Elasticsearch", "namespace", "indexes" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L655-L667
belbio/bel
bel/scripts.py
arangodb
def arangodb(delete, db_name): """Setup ArangoDB database db_name: Either 'belns' or 'edgestore' - must be one or the other This will create the database, collections and indexes on the collection if it doesn't exist. The --delete option will force removal of the database if it exists.""" if delete: client = bel.db.arangodb.get_client() bel.db.arangodb.delete_database(client, db_name) if db_name == "belns": bel.db.arangodb.get_belns_handle(client) elif db_name == "edgestore": bel.db.arangodb.get_edgestore_handle(client)
python
def arangodb(delete, db_name): """Setup ArangoDB database db_name: Either 'belns' or 'edgestore' - must be one or the other This will create the database, collections and indexes on the collection if it doesn't exist. The --delete option will force removal of the database if it exists.""" if delete: client = bel.db.arangodb.get_client() bel.db.arangodb.delete_database(client, db_name) if db_name == "belns": bel.db.arangodb.get_belns_handle(client) elif db_name == "edgestore": bel.db.arangodb.get_edgestore_handle(client)
[ "def", "arangodb", "(", "delete", ",", "db_name", ")", ":", "if", "delete", ":", "client", "=", "bel", ".", "db", ".", "arangodb", ".", "get_client", "(", ")", "bel", ".", "db", ".", "arangodb", ".", "delete_database", "(", "client", ",", "db_name", ")", "if", "db_name", "==", "\"belns\"", ":", "bel", ".", "db", ".", "arangodb", ".", "get_belns_handle", "(", "client", ")", "elif", "db_name", "==", "\"edgestore\"", ":", "bel", ".", "db", ".", "arangodb", ".", "get_edgestore_handle", "(", "client", ")" ]
Setup ArangoDB database db_name: Either 'belns' or 'edgestore' - must be one or the other This will create the database, collections and indexes on the collection if it doesn't exist. The --delete option will force removal of the database if it exists.
[ "Setup", "ArangoDB", "database" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/scripts.py#L675-L691
belbio/bel
bel/nanopub/nanopubs.py
validate_to_schema
def validate_to_schema(nanopub, schema) -> Tuple[bool, List[Tuple[str, str]]]: """Validate nanopub against jsonschema for nanopub Args: nanopub (Mapping[str, Any]): nanopub dict schema (Mapping[str, Any]): nanopub schema Returns: Tuple[bool, List[str]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg) e.g. [('ERROR', "'subject' is a required property")] """ v = jsonschema.Draft4Validator(schema) messages = [] errors = sorted(v.iter_errors(nanopub), key=lambda e: e.path) for error in errors: for suberror in sorted(error.context, key=lambda e: e.schema_path): print(list(suberror.schema_path), suberror.message, sep=", ") messages.append(("ERROR", suberror.message)) is_valid = True if errors: is_valid = False return (is_valid, messages)
python
def validate_to_schema(nanopub, schema) -> Tuple[bool, List[Tuple[str, str]]]: """Validate nanopub against jsonschema for nanopub Args: nanopub (Mapping[str, Any]): nanopub dict schema (Mapping[str, Any]): nanopub schema Returns: Tuple[bool, List[str]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg) e.g. [('ERROR', "'subject' is a required property")] """ v = jsonschema.Draft4Validator(schema) messages = [] errors = sorted(v.iter_errors(nanopub), key=lambda e: e.path) for error in errors: for suberror in sorted(error.context, key=lambda e: e.schema_path): print(list(suberror.schema_path), suberror.message, sep=", ") messages.append(("ERROR", suberror.message)) is_valid = True if errors: is_valid = False return (is_valid, messages)
[ "def", "validate_to_schema", "(", "nanopub", ",", "schema", ")", "->", "Tuple", "[", "bool", ",", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", "]", ":", "v", "=", "jsonschema", ".", "Draft4Validator", "(", "schema", ")", "messages", "=", "[", "]", "errors", "=", "sorted", "(", "v", ".", "iter_errors", "(", "nanopub", ")", ",", "key", "=", "lambda", "e", ":", "e", ".", "path", ")", "for", "error", "in", "errors", ":", "for", "suberror", "in", "sorted", "(", "error", ".", "context", ",", "key", "=", "lambda", "e", ":", "e", ".", "schema_path", ")", ":", "print", "(", "list", "(", "suberror", ".", "schema_path", ")", ",", "suberror", ".", "message", ",", "sep", "=", "\", \"", ")", "messages", ".", "append", "(", "(", "\"ERROR\"", ",", "suberror", ".", "message", ")", ")", "is_valid", "=", "True", "if", "errors", ":", "is_valid", "=", "False", "return", "(", "is_valid", ",", "messages", ")" ]
Validate nanopub against jsonschema for nanopub Args: nanopub (Mapping[str, Any]): nanopub dict schema (Mapping[str, Any]): nanopub schema Returns: Tuple[bool, List[str]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg) e.g. [('ERROR', "'subject' is a required property")]
[ "Validate", "nanopub", "against", "jsonschema", "for", "nanopub" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L141-L167
belbio/bel
bel/nanopub/nanopubs.py
hash_nanopub
def hash_nanopub(nanopub: Mapping[str, Any]) -> str: """Create CityHash64 from nanopub for duplicate check TODO - check that this hash value is consistent between C# and Python running on laptop and server Build string to hash Collect flat array of (all values.strip()): nanopub.type.name nanopub.type.version One of: nanopub.citation.database.name nanopub.citation.database.id OR nanopub.citation.database.uri OR nanopub.citation.database.reference Extend with sorted list of assertions (SRO as single string with space between S, R and O) Extend with sorted list of annotations (nanopub.annotations.type + ' ' + nanopub.annotations.id) Convert array to string by joining array elements separated by a space Create CityHash64(str) and return """ hash_list = [] # Type hash_list.append(nanopub["nanopub"]["type"].get("name", "").strip()) hash_list.append(nanopub["nanopub"]["type"].get("version", "").strip()) # Citation if nanopub["nanopub"]["citation"].get("database", False): hash_list.append( nanopub["nanopub"]["citation"]["database"].get("name", "").strip() ) hash_list.append( nanopub["nanopub"]["citation"]["database"].get("id", "").strip() ) elif nanopub["nanopub"]["citation"].get("uri", False): hash_list.append(nanopub["nanopub"]["citation"].get("uri", "").strip()) elif nanopub["nanopub"]["citation"].get("reference", False): hash_list.append(nanopub["nanopub"]["citation"].get("reference", "").strip()) # Assertions assertions = [] for assertion in nanopub["nanopub"]["assertions"]: if assertion.get("relation") is None: assertion["relation"] = "" if assertion.get("object") is None: assertion["object"] = "" assertions.append( " ".join( ( assertion["subject"].strip(), assertion.get("relation", "").strip(), assertion.get("object", "").strip(), ) ).strip() ) assertions = sorted(assertions) hash_list.extend(assertions) # Annotations annotations = [] for anno in nanopub["nanopub"]["annotations"]: annotations.append( " ".join((anno.get("type", "").strip(), anno.get("id", "").strip())).strip() ) annotations = sorted(annotations) hash_list.extend(annotations) np_string = " ".join([l.lower() for l in hash_list]) return "{:x}".format(CityHash64(np_string))
python
def hash_nanopub(nanopub: Mapping[str, Any]) -> str: """Create CityHash64 from nanopub for duplicate check TODO - check that this hash value is consistent between C# and Python running on laptop and server Build string to hash Collect flat array of (all values.strip()): nanopub.type.name nanopub.type.version One of: nanopub.citation.database.name nanopub.citation.database.id OR nanopub.citation.database.uri OR nanopub.citation.database.reference Extend with sorted list of assertions (SRO as single string with space between S, R and O) Extend with sorted list of annotations (nanopub.annotations.type + ' ' + nanopub.annotations.id) Convert array to string by joining array elements separated by a space Create CityHash64(str) and return """ hash_list = [] # Type hash_list.append(nanopub["nanopub"]["type"].get("name", "").strip()) hash_list.append(nanopub["nanopub"]["type"].get("version", "").strip()) # Citation if nanopub["nanopub"]["citation"].get("database", False): hash_list.append( nanopub["nanopub"]["citation"]["database"].get("name", "").strip() ) hash_list.append( nanopub["nanopub"]["citation"]["database"].get("id", "").strip() ) elif nanopub["nanopub"]["citation"].get("uri", False): hash_list.append(nanopub["nanopub"]["citation"].get("uri", "").strip()) elif nanopub["nanopub"]["citation"].get("reference", False): hash_list.append(nanopub["nanopub"]["citation"].get("reference", "").strip()) # Assertions assertions = [] for assertion in nanopub["nanopub"]["assertions"]: if assertion.get("relation") is None: assertion["relation"] = "" if assertion.get("object") is None: assertion["object"] = "" assertions.append( " ".join( ( assertion["subject"].strip(), assertion.get("relation", "").strip(), assertion.get("object", "").strip(), ) ).strip() ) assertions = sorted(assertions) hash_list.extend(assertions) # Annotations annotations = [] for anno in nanopub["nanopub"]["annotations"]: annotations.append( " ".join((anno.get("type", "").strip(), anno.get("id", "").strip())).strip() ) annotations = sorted(annotations) hash_list.extend(annotations) np_string = " ".join([l.lower() for l in hash_list]) return "{:x}".format(CityHash64(np_string))
[ "def", "hash_nanopub", "(", "nanopub", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "str", ":", "hash_list", "=", "[", "]", "# Type", "hash_list", ".", "append", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", ".", "get", "(", "\"name\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", "hash_list", ".", "append", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", ".", "get", "(", "\"version\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", "# Citation", "if", "nanopub", "[", "\"nanopub\"", "]", "[", "\"citation\"", "]", ".", "get", "(", "\"database\"", ",", "False", ")", ":", "hash_list", ".", "append", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"citation\"", "]", "[", "\"database\"", "]", ".", "get", "(", "\"name\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", "hash_list", ".", "append", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"citation\"", "]", "[", "\"database\"", "]", ".", "get", "(", "\"id\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", "elif", "nanopub", "[", "\"nanopub\"", "]", "[", "\"citation\"", "]", ".", "get", "(", "\"uri\"", ",", "False", ")", ":", "hash_list", ".", "append", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"citation\"", "]", ".", "get", "(", "\"uri\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", "elif", "nanopub", "[", "\"nanopub\"", "]", "[", "\"citation\"", "]", ".", "get", "(", "\"reference\"", ",", "False", ")", ":", "hash_list", ".", "append", "(", "nanopub", "[", "\"nanopub\"", "]", "[", "\"citation\"", "]", ".", "get", "(", "\"reference\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", "# Assertions", "assertions", "=", "[", "]", "for", "assertion", "in", "nanopub", "[", "\"nanopub\"", "]", "[", "\"assertions\"", "]", ":", "if", "assertion", ".", "get", "(", "\"relation\"", ")", "is", "None", ":", "assertion", "[", "\"relation\"", "]", "=", "\"\"", "if", "assertion", ".", "get", "(", "\"object\"", ")", "is", "None", ":", "assertion", "[", "\"object\"", "]", "=", "\"\"", "assertions", ".", "append", "(", "\" \"", ".", "join", "(", "(", "assertion", "[", "\"subject\"", "]", ".", "strip", "(", ")", ",", "assertion", ".", "get", "(", "\"relation\"", ",", "\"\"", ")", ".", "strip", "(", ")", ",", "assertion", ".", "get", "(", "\"object\"", ",", "\"\"", ")", ".", "strip", "(", ")", ",", ")", ")", ".", "strip", "(", ")", ")", "assertions", "=", "sorted", "(", "assertions", ")", "hash_list", ".", "extend", "(", "assertions", ")", "# Annotations", "annotations", "=", "[", "]", "for", "anno", "in", "nanopub", "[", "\"nanopub\"", "]", "[", "\"annotations\"", "]", ":", "annotations", ".", "append", "(", "\" \"", ".", "join", "(", "(", "anno", ".", "get", "(", "\"type\"", ",", "\"\"", ")", ".", "strip", "(", ")", ",", "anno", ".", "get", "(", "\"id\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", ")", ".", "strip", "(", ")", ")", "annotations", "=", "sorted", "(", "annotations", ")", "hash_list", ".", "extend", "(", "annotations", ")", "np_string", "=", "\" \"", ".", "join", "(", "[", "l", ".", "lower", "(", ")", "for", "l", "in", "hash_list", "]", ")", "return", "\"{:x}\"", ".", "format", "(", "CityHash64", "(", "np_string", ")", ")" ]
Create CityHash64 from nanopub for duplicate check TODO - check that this hash value is consistent between C# and Python running on laptop and server Build string to hash Collect flat array of (all values.strip()): nanopub.type.name nanopub.type.version One of: nanopub.citation.database.name nanopub.citation.database.id OR nanopub.citation.database.uri OR nanopub.citation.database.reference Extend with sorted list of assertions (SRO as single string with space between S, R and O) Extend with sorted list of annotations (nanopub.annotations.type + ' ' + nanopub.annotations.id) Convert array to string by joining array elements separated by a space Create CityHash64(str) and return
[ "Create", "CityHash64", "from", "nanopub", "for", "duplicate", "check" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L171-L256
belbio/bel
bel/nanopub/nanopubs.py
Nanopub.validate
def validate( self, nanopub: Mapping[str, Any] ) -> Tuple[bool, List[Tuple[str, str]]]: """Validates using the nanopub schema Args: nanopub (Mapping[str, Any]): nanopub dict Returns: Tuple[bool, List[Tuple[str, str]]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg) e.g. [('WARNING', "Context ID not found")] """ # Validate nanopub (is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema) if not is_valid: return messages # Extract BEL Version if nanopub["nanopub"]["type"]["name"].upper() == "BEL": bel_version = nanopub["nanopub"]["type"]["version"] else: is_valid = False return ( is_valid, f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}", ) all_messages = [] # Validate BEL Statements bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint) for edge in nanopub["nanopub"]["edges"]: bel_statement = f"{edge['subject']} {edge['relation']} {edge['object']}" parse_obj = bel_obj.parse(bel_statement) if not parse_obj.valid: all_messages.extend( ( "ERROR", f"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}", ) ) # Validate nanopub.context for context in nanopub["nanopub"]["context"]: (is_valid, messages) = self.validate_context(context) all_messages.extend(messages) is_valid = True for _type, msg in all_messages: if _type == "ERROR": is_valid = False return (is_valid, all_messages)
python
def validate( self, nanopub: Mapping[str, Any] ) -> Tuple[bool, List[Tuple[str, str]]]: """Validates using the nanopub schema Args: nanopub (Mapping[str, Any]): nanopub dict Returns: Tuple[bool, List[Tuple[str, str]]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg) e.g. [('WARNING', "Context ID not found")] """ # Validate nanopub (is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema) if not is_valid: return messages # Extract BEL Version if nanopub["nanopub"]["type"]["name"].upper() == "BEL": bel_version = nanopub["nanopub"]["type"]["version"] else: is_valid = False return ( is_valid, f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}", ) all_messages = [] # Validate BEL Statements bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint) for edge in nanopub["nanopub"]["edges"]: bel_statement = f"{edge['subject']} {edge['relation']} {edge['object']}" parse_obj = bel_obj.parse(bel_statement) if not parse_obj.valid: all_messages.extend( ( "ERROR", f"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}", ) ) # Validate nanopub.context for context in nanopub["nanopub"]["context"]: (is_valid, messages) = self.validate_context(context) all_messages.extend(messages) is_valid = True for _type, msg in all_messages: if _type == "ERROR": is_valid = False return (is_valid, all_messages)
[ "def", "validate", "(", "self", ",", "nanopub", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Tuple", "[", "bool", ",", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", "]", ":", "# Validate nanopub", "(", "is_valid", ",", "messages", ")", "=", "validate_to_schema", "(", "nanopub", ",", "self", ".", "nanopub_schema", ")", "if", "not", "is_valid", ":", "return", "messages", "# Extract BEL Version", "if", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", "[", "\"name\"", "]", ".", "upper", "(", ")", "==", "\"BEL\"", ":", "bel_version", "=", "nanopub", "[", "\"nanopub\"", "]", "[", "\"type\"", "]", "[", "\"version\"", "]", "else", ":", "is_valid", "=", "False", "return", "(", "is_valid", ",", "f\"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}\"", ",", ")", "all_messages", "=", "[", "]", "# Validate BEL Statements", "bel_obj", "=", "bel", ".", "lang", ".", "belobj", ".", "BEL", "(", "bel_version", ",", "self", ".", "endpoint", ")", "for", "edge", "in", "nanopub", "[", "\"nanopub\"", "]", "[", "\"edges\"", "]", ":", "bel_statement", "=", "f\"{edge['subject']} {edge['relation']} {edge['object']}\"", "parse_obj", "=", "bel_obj", ".", "parse", "(", "bel_statement", ")", "if", "not", "parse_obj", ".", "valid", ":", "all_messages", ".", "extend", "(", "(", "\"ERROR\"", ",", "f\"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}\"", ",", ")", ")", "# Validate nanopub.context", "for", "context", "in", "nanopub", "[", "\"nanopub\"", "]", "[", "\"context\"", "]", ":", "(", "is_valid", ",", "messages", ")", "=", "self", ".", "validate_context", "(", "context", ")", "all_messages", ".", "extend", "(", "messages", ")", "is_valid", "=", "True", "for", "_type", ",", "msg", "in", "all_messages", ":", "if", "_type", "==", "\"ERROR\"", ":", "is_valid", "=", "False", "return", "(", "is_valid", ",", "all_messages", ")" ]
Validates using the nanopub schema Args: nanopub (Mapping[str, Any]): nanopub dict Returns: Tuple[bool, List[Tuple[str, str]]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg) e.g. [('WARNING', "Context ID not found")]
[ "Validates", "using", "the", "nanopub", "schema" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L30-L83
belbio/bel
bel/nanopub/nanopubs.py
Nanopub.validate_context
def validate_context( self, context: Mapping[str, Any] ) -> Tuple[bool, List[Tuple[str, str]]]: """ Validate context Args: context (Mapping[str, Any]): context dictionary of type, id and label Returns: Tuple[bool, List[Tuple[str, str]]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg) e.g. [('WARNING', "Context ID not found")] """ url = f'{self.endpoint}/terms/{context["id"]}' res = requests.get(url) if res.status_code == 200: return (True, []) else: return (False, [("WARNING", f'Context {context["id"]} not found at {url}')])
python
def validate_context( self, context: Mapping[str, Any] ) -> Tuple[bool, List[Tuple[str, str]]]: """ Validate context Args: context (Mapping[str, Any]): context dictionary of type, id and label Returns: Tuple[bool, List[Tuple[str, str]]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg) e.g. [('WARNING', "Context ID not found")] """ url = f'{self.endpoint}/terms/{context["id"]}' res = requests.get(url) if res.status_code == 200: return (True, []) else: return (False, [("WARNING", f'Context {context["id"]} not found at {url}')])
[ "def", "validate_context", "(", "self", ",", "context", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "Tuple", "[", "bool", ",", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", "]", ":", "url", "=", "f'{self.endpoint}/terms/{context[\"id\"]}'", "res", "=", "requests", ".", "get", "(", "url", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "(", "True", ",", "[", "]", ")", "else", ":", "return", "(", "False", ",", "[", "(", "\"WARNING\"", ",", "f'Context {context[\"id\"]} not found at {url}'", ")", "]", ")" ]
Validate context Args: context (Mapping[str, Any]): context dictionary of type, id and label Returns: Tuple[bool, List[Tuple[str, str]]]: bool: Is valid? Yes = True, No = False List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg) e.g. [('WARNING', "Context ID not found")]
[ "Validate", "context" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L85-L106
belbio/bel
bel/nanopub/nanopubs.py
Nanopub.bel_edges
def bel_edges( self, nanopub: Mapping[str, Any], namespace_targets: Mapping[str, List[str]] = {}, rules: List[str] = [], orthologize_target: str = None, ) -> List[Mapping[str, Any]]: """Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context) """ edges = bel.edge.edges.create_edges( nanopub, self.endpoint, namespace_targets=namespace_targets, rules=rules, orthologize_target=orthologize_target, ) return edges
python
def bel_edges( self, nanopub: Mapping[str, Any], namespace_targets: Mapping[str, List[str]] = {}, rules: List[str] = [], orthologize_target: str = None, ) -> List[Mapping[str, Any]]: """Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context) """ edges = bel.edge.edges.create_edges( nanopub, self.endpoint, namespace_targets=namespace_targets, rules=rules, orthologize_target=orthologize_target, ) return edges
[ "def", "bel_edges", "(", "self", ",", "nanopub", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "namespace_targets", ":", "Mapping", "[", "str", ",", "List", "[", "str", "]", "]", "=", "{", "}", ",", "rules", ":", "List", "[", "str", "]", "=", "[", "]", ",", "orthologize_target", ":", "str", "=", "None", ",", ")", "->", "List", "[", "Mapping", "[", "str", ",", "Any", "]", "]", ":", "edges", "=", "bel", ".", "edge", ".", "edges", ".", "create_edges", "(", "nanopub", ",", "self", ".", "endpoint", ",", "namespace_targets", "=", "namespace_targets", ",", "rules", "=", "rules", ",", "orthologize_target", "=", "orthologize_target", ",", ")", "return", "edges" ]
Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context)
[ "Create", "BEL", "Edges", "from", "BEL", "nanopub" ]
train
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L108-L138
RockFeng0/rtsf-http
httpdriver/cli.py
main_hrun
def main_hrun(): """ parse command line options and run commands.""" parser = argparse.ArgumentParser(description="Tools for http(s) test. Base on rtsf.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( 'case_file', help="yaml testcase file") color_print("httpdriver {}".format(__version__), "GREEN") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) runner = TestRunner(runner = HttpDriver).run(args.case_file) html_report = runner.gen_html_report() color_print("report: {}".format(html_report))
python
def main_hrun(): """ parse command line options and run commands.""" parser = argparse.ArgumentParser(description="Tools for http(s) test. Base on rtsf.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( 'case_file', help="yaml testcase file") color_print("httpdriver {}".format(__version__), "GREEN") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) runner = TestRunner(runner = HttpDriver).run(args.case_file) html_report = runner.gen_html_report() color_print("report: {}".format(html_report))
[ "def", "main_hrun", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Tools for http(s) test. Base on rtsf.\"", ")", "parser", ".", "add_argument", "(", "'--log-level'", ",", "default", "=", "'INFO'", ",", "help", "=", "\"Specify logging level, default is INFO.\"", ")", "parser", ".", "add_argument", "(", "'--log-file'", ",", "help", "=", "\"Write logs to specified file path.\"", ")", "parser", ".", "add_argument", "(", "'case_file'", ",", "help", "=", "\"yaml testcase file\"", ")", "color_print", "(", "\"httpdriver {}\"", ".", "format", "(", "__version__", ")", ",", "\"GREEN\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "logger", ".", "setup_logger", "(", "args", ".", "log_level", ",", "args", ".", "log_file", ")", "runner", "=", "TestRunner", "(", "runner", "=", "HttpDriver", ")", ".", "run", "(", "args", ".", "case_file", ")", "html_report", "=", "runner", ".", "gen_html_report", "(", ")", "color_print", "(", "\"report: {}\"", ".", "format", "(", "html_report", ")", ")" ]
parse command line options and run commands.
[ "parse", "command", "line", "options", "and", "run", "commands", "." ]
train
https://github.com/RockFeng0/rtsf-http/blob/3280cc9a01b0c92c52d699b0ebc29e55e62611a0/httpdriver/cli.py#L28-L51
malja/zroya
setup.py
find_pyd_file
def find_pyd_file(): """ Return path to .pyd after successful build command. :return: Path to .pyd file or None. """ if not os.path.isdir("./build"): raise NotADirectoryError for path, dirs, files in os.walk("./build"): for file_name in files: file_name_parts = os.path.splitext(file_name) if file_name_parts[1] == ".pyd": return path return None
python
def find_pyd_file(): """ Return path to .pyd after successful build command. :return: Path to .pyd file or None. """ if not os.path.isdir("./build"): raise NotADirectoryError for path, dirs, files in os.walk("./build"): for file_name in files: file_name_parts = os.path.splitext(file_name) if file_name_parts[1] == ".pyd": return path return None
[ "def", "find_pyd_file", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "\"./build\"", ")", ":", "raise", "NotADirectoryError", "for", "path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "\"./build\"", ")", ":", "for", "file_name", "in", "files", ":", "file_name_parts", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "if", "file_name_parts", "[", "1", "]", "==", "\".pyd\"", ":", "return", "path", "return", "None" ]
Return path to .pyd after successful build command. :return: Path to .pyd file or None.
[ "Return", "path", "to", ".", "pyd", "after", "successful", "build", "command", ".", ":", "return", ":", "Path", "to", ".", "pyd", "file", "or", "None", "." ]
train
https://github.com/malja/zroya/blob/41830133a54528e9cd9ef43d9637a576ac849c11/setup.py#L53-L67
urschrei/simplification
simplification/util.py
_void_array_to_nested_list
def _void_array_to_nested_list(res, _func, _args): """ Dereference the FFI result to a list of coordinates """ try: shape = res.coords.len, 2 ptr = cast(res.coords.data, POINTER(c_double)) array = np.ctypeslib.as_array(ptr, shape) return array.tolist() finally: drop_array(res.coords)
python
def _void_array_to_nested_list(res, _func, _args): """ Dereference the FFI result to a list of coordinates """ try: shape = res.coords.len, 2 ptr = cast(res.coords.data, POINTER(c_double)) array = np.ctypeslib.as_array(ptr, shape) return array.tolist() finally: drop_array(res.coords)
[ "def", "_void_array_to_nested_list", "(", "res", ",", "_func", ",", "_args", ")", ":", "try", ":", "shape", "=", "res", ".", "coords", ".", "len", ",", "2", "ptr", "=", "cast", "(", "res", ".", "coords", ".", "data", ",", "POINTER", "(", "c_double", ")", ")", "array", "=", "np", ".", "ctypeslib", ".", "as_array", "(", "ptr", ",", "shape", ")", "return", "array", ".", "tolist", "(", ")", "finally", ":", "drop_array", "(", "res", ".", "coords", ")" ]
Dereference the FFI result to a list of coordinates
[ "Dereference", "the", "FFI", "result", "to", "a", "list", "of", "coordinates" ]
train
https://github.com/urschrei/simplification/blob/58491fc08cffa2fab5fe19d17c2ceb9d442530c3/simplification/util.py#L92-L100
MacHu-GWU/dataIO-project
dataIO/js.py
is_json_file
def is_json_file(abspath): """Parse file extension. - *.json: uncompressed, utf-8 encode json file - *.gz: compressed, utf-8 encode json file """ abspath = abspath.lower() fname, ext = os.path.splitext(abspath) if ext in [".json", ".js"]: is_json = True elif ext == ".gz": is_json = False elif ext == ".tmp": return is_json_file(fname) else: raise JsonExtError( "'%s' is not a valid json file. " "extension has to be '.json' for uncompressed, '.gz' " "for compressed." % abspath) return is_json
python
def is_json_file(abspath): """Parse file extension. - *.json: uncompressed, utf-8 encode json file - *.gz: compressed, utf-8 encode json file """ abspath = abspath.lower() fname, ext = os.path.splitext(abspath) if ext in [".json", ".js"]: is_json = True elif ext == ".gz": is_json = False elif ext == ".tmp": return is_json_file(fname) else: raise JsonExtError( "'%s' is not a valid json file. " "extension has to be '.json' for uncompressed, '.gz' " "for compressed." % abspath) return is_json
[ "def", "is_json_file", "(", "abspath", ")", ":", "abspath", "=", "abspath", ".", "lower", "(", ")", "fname", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "abspath", ")", "if", "ext", "in", "[", "\".json\"", ",", "\".js\"", "]", ":", "is_json", "=", "True", "elif", "ext", "==", "\".gz\"", ":", "is_json", "=", "False", "elif", "ext", "==", "\".tmp\"", ":", "return", "is_json_file", "(", "fname", ")", "else", ":", "raise", "JsonExtError", "(", "\"'%s' is not a valid json file. \"", "\"extension has to be '.json' for uncompressed, '.gz' \"", "\"for compressed.\"", "%", "abspath", ")", "return", "is_json" ]
Parse file extension. - *.json: uncompressed, utf-8 encode json file - *.gz: compressed, utf-8 encode json file
[ "Parse", "file", "extension", "." ]
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L49-L68
MacHu-GWU/dataIO-project
dataIO/js.py
lower_ext
def lower_ext(abspath): """Convert file extension to lowercase. """ fname, ext = os.path.splitext(abspath) return fname + ext.lower()
python
def lower_ext(abspath): """Convert file extension to lowercase. """ fname, ext = os.path.splitext(abspath) return fname + ext.lower()
[ "def", "lower_ext", "(", "abspath", ")", ":", "fname", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "abspath", ")", "return", "fname", "+", "ext", ".", "lower", "(", ")" ]
Convert file extension to lowercase.
[ "Convert", "file", "extension", "to", "lowercase", "." ]
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L71-L75
MacHu-GWU/dataIO-project
dataIO/js.py
load
def load(abspath, default=None, enable_verbose=True): """Load Json from file. If file are not exists, returns ``default``. :param abspath: file path. use absolute path as much as you can. extension has to be ``.json`` or ``.gz`` (for compressed Json). :type abspath: string :param default: default ``dict()``, if ``abspath`` not exists, return the default Python object instead. :param enable_verbose: default ``True``, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> js.load("test.json") # if you have a json file Load from 'test.json' ... Complete! Elapse 0.000432 sec. {'a': 1, 'b': 2} **中文文档** 从Json文件中读取数据 :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值 :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值`` """ if default is None: default = dict() prt("\nLoad from '%s' ..." % abspath, enable_verbose) abspath = lower_ext(str(abspath)) is_json = is_json_file(abspath) if not os.path.exists(abspath): prt(" File not found, use default value: %r" % default, enable_verbose) return default st = time.clock() if is_json: data = json.loads(textfile.read(abspath, encoding="utf-8")) else: data = json.loads(compress.read_gzip(abspath).decode("utf-8")) prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) return data
python
def load(abspath, default=None, enable_verbose=True): """Load Json from file. If file are not exists, returns ``default``. :param abspath: file path. use absolute path as much as you can. extension has to be ``.json`` or ``.gz`` (for compressed Json). :type abspath: string :param default: default ``dict()``, if ``abspath`` not exists, return the default Python object instead. :param enable_verbose: default ``True``, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> js.load("test.json") # if you have a json file Load from 'test.json' ... Complete! Elapse 0.000432 sec. {'a': 1, 'b': 2} **中文文档** 从Json文件中读取数据 :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值 :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值`` """ if default is None: default = dict() prt("\nLoad from '%s' ..." % abspath, enable_verbose) abspath = lower_ext(str(abspath)) is_json = is_json_file(abspath) if not os.path.exists(abspath): prt(" File not found, use default value: %r" % default, enable_verbose) return default st = time.clock() if is_json: data = json.loads(textfile.read(abspath, encoding="utf-8")) else: data = json.loads(compress.read_gzip(abspath).decode("utf-8")) prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) return data
[ "def", "load", "(", "abspath", ",", "default", "=", "None", ",", "enable_verbose", "=", "True", ")", ":", "if", "default", "is", "None", ":", "default", "=", "dict", "(", ")", "prt", "(", "\"\\nLoad from '%s' ...\"", "%", "abspath", ",", "enable_verbose", ")", "abspath", "=", "lower_ext", "(", "str", "(", "abspath", ")", ")", "is_json", "=", "is_json_file", "(", "abspath", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "abspath", ")", ":", "prt", "(", "\" File not found, use default value: %r\"", "%", "default", ",", "enable_verbose", ")", "return", "default", "st", "=", "time", ".", "clock", "(", ")", "if", "is_json", ":", "data", "=", "json", ".", "loads", "(", "textfile", ".", "read", "(", "abspath", ",", "encoding", "=", "\"utf-8\"", ")", ")", "else", ":", "data", "=", "json", ".", "loads", "(", "compress", ".", "read_gzip", "(", "abspath", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "prt", "(", "\" Complete! Elapse %.6f sec.\"", "%", "(", "time", ".", "clock", "(", ")", "-", "st", ")", ",", "enable_verbose", ")", "return", "data" ]
Load Json from file. If file are not exists, returns ``default``. :param abspath: file path. use absolute path as much as you can. extension has to be ``.json`` or ``.gz`` (for compressed Json). :type abspath: string :param default: default ``dict()``, if ``abspath`` not exists, return the default Python object instead. :param enable_verbose: default ``True``, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> js.load("test.json") # if you have a json file Load from 'test.json' ... Complete! Elapse 0.000432 sec. {'a': 1, 'b': 2} **中文文档** 从Json文件中读取数据 :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值 :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值``
[ "Load", "Json", "from", "file", ".", "If", "file", "are", "not", "exists", "returns", "default", "." ]
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L78-L132
MacHu-GWU/dataIO-project
dataIO/js.py
dump
def dump(data, abspath, indent_format=False, float_precision=None, ensure_ascii=True, overwrite=False, enable_verbose=True): """Dump Json serializable object to file. Provides multiple choice to customize the behavior. :param data: Serializable python object. :type data: dict or list :param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz`` (for compressed Json) :type abspath: string :param indent_format: default ``False``, If ``True``, then dump to human readable format, but it's slower, the file is larger :type indent_format: boolean :param float_precision: default ``None``, limit flotas to N-decimal points. :type float_precision: integer :param overwrite: default ``False``, If ``True``, when you dump to existing file, it silently overwrite it. If ``False``, an alert message is shown. Default setting ``False`` is to prevent overwrite file by mistake. :type overwrite: boolean :param enable_verbose: default True, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> data = {"a": 1, "b": 2} >>> dump(data, "test.json", overwrite=True) Dumping to 'test.json'... Complete! Elapse 0.002432 sec **中文文档** 将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件 文件 参数列表 :param js: 可Json化的Python对象 :type js: ``字典`` 或 ``列表`` :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param indent_format: 默认 ``False``, 当为 ``True`` 时, Json编码时会对Key进行 排序, 并进行缩进排版。但是这样写入速度较慢, 文件体积也更大。 :type indent_format: "布尔值" :param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会 自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。 :type overwrite: "布尔值" :param float_precision: 默认 ``None``, 当为任意整数时, 则会保留小数点后N位 :type float_precision: "整数" :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值`` """ prt("\nDump to '%s' ..." % abspath, enable_verbose) abspath = lower_ext(str(abspath)) is_json = is_json_file(abspath) if os.path.exists(abspath): if not overwrite: # 存在, 并且overwrite=False prt(" Stop! File exists and overwrite is not allowed", enable_verbose) return if float_precision is not None: encoder.FLOAT_REPR = lambda x: format(x, ".%sf" % float_precision) indent_format = True else: encoder.FLOAT_REPR = repr if indent_format: sort_keys = True indent = 4 else: sort_keys = False indent = None st = time.clock() js = json.dumps(data, sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii) content = js.encode("utf-8") if is_json: textfile.writebytes(content, abspath) else: compress.write_gzip(content, abspath) prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose)
python
def dump(data, abspath, indent_format=False, float_precision=None, ensure_ascii=True, overwrite=False, enable_verbose=True): """Dump Json serializable object to file. Provides multiple choice to customize the behavior. :param data: Serializable python object. :type data: dict or list :param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz`` (for compressed Json) :type abspath: string :param indent_format: default ``False``, If ``True``, then dump to human readable format, but it's slower, the file is larger :type indent_format: boolean :param float_precision: default ``None``, limit flotas to N-decimal points. :type float_precision: integer :param overwrite: default ``False``, If ``True``, when you dump to existing file, it silently overwrite it. If ``False``, an alert message is shown. Default setting ``False`` is to prevent overwrite file by mistake. :type overwrite: boolean :param enable_verbose: default True, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> data = {"a": 1, "b": 2} >>> dump(data, "test.json", overwrite=True) Dumping to 'test.json'... Complete! Elapse 0.002432 sec **中文文档** 将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件 文件 参数列表 :param js: 可Json化的Python对象 :type js: ``字典`` 或 ``列表`` :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param indent_format: 默认 ``False``, 当为 ``True`` 时, Json编码时会对Key进行 排序, 并进行缩进排版。但是这样写入速度较慢, 文件体积也更大。 :type indent_format: "布尔值" :param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会 自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。 :type overwrite: "布尔值" :param float_precision: 默认 ``None``, 当为任意整数时, 则会保留小数点后N位 :type float_precision: "整数" :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值`` """ prt("\nDump to '%s' ..." % abspath, enable_verbose) abspath = lower_ext(str(abspath)) is_json = is_json_file(abspath) if os.path.exists(abspath): if not overwrite: # 存在, 并且overwrite=False prt(" Stop! File exists and overwrite is not allowed", enable_verbose) return if float_precision is not None: encoder.FLOAT_REPR = lambda x: format(x, ".%sf" % float_precision) indent_format = True else: encoder.FLOAT_REPR = repr if indent_format: sort_keys = True indent = 4 else: sort_keys = False indent = None st = time.clock() js = json.dumps(data, sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii) content = js.encode("utf-8") if is_json: textfile.writebytes(content, abspath) else: compress.write_gzip(content, abspath) prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose)
[ "def", "dump", "(", "data", ",", "abspath", ",", "indent_format", "=", "False", ",", "float_precision", "=", "None", ",", "ensure_ascii", "=", "True", ",", "overwrite", "=", "False", ",", "enable_verbose", "=", "True", ")", ":", "prt", "(", "\"\\nDump to '%s' ...\"", "%", "abspath", ",", "enable_verbose", ")", "abspath", "=", "lower_ext", "(", "str", "(", "abspath", ")", ")", "is_json", "=", "is_json_file", "(", "abspath", ")", "if", "os", ".", "path", ".", "exists", "(", "abspath", ")", ":", "if", "not", "overwrite", ":", "# 存在, 并且overwrite=False", "prt", "(", "\" Stop! File exists and overwrite is not allowed\"", ",", "enable_verbose", ")", "return", "if", "float_precision", "is", "not", "None", ":", "encoder", ".", "FLOAT_REPR", "=", "lambda", "x", ":", "format", "(", "x", ",", "\".%sf\"", "%", "float_precision", ")", "indent_format", "=", "True", "else", ":", "encoder", ".", "FLOAT_REPR", "=", "repr", "if", "indent_format", ":", "sort_keys", "=", "True", "indent", "=", "4", "else", ":", "sort_keys", "=", "False", "indent", "=", "None", "st", "=", "time", ".", "clock", "(", ")", "js", "=", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "sort_keys", ",", "indent", "=", "indent", ",", "ensure_ascii", "=", "ensure_ascii", ")", "content", "=", "js", ".", "encode", "(", "\"utf-8\"", ")", "if", "is_json", ":", "textfile", ".", "writebytes", "(", "content", ",", "abspath", ")", "else", ":", "compress", ".", "write_gzip", "(", "content", ",", "abspath", ")", "prt", "(", "\" Complete! Elapse %.6f sec.\"", "%", "(", "time", ".", "clock", "(", ")", "-", "st", ")", ",", "enable_verbose", ")" ]
Dump Json serializable object to file. Provides multiple choice to customize the behavior. :param data: Serializable python object. :type data: dict or list :param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz`` (for compressed Json) :type abspath: string :param indent_format: default ``False``, If ``True``, then dump to human readable format, but it's slower, the file is larger :type indent_format: boolean :param float_precision: default ``None``, limit flotas to N-decimal points. :type float_precision: integer :param overwrite: default ``False``, If ``True``, when you dump to existing file, it silently overwrite it. If ``False``, an alert message is shown. Default setting ``False`` is to prevent overwrite file by mistake. :type overwrite: boolean :param enable_verbose: default True, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> data = {"a": 1, "b": 2} >>> dump(data, "test.json", overwrite=True) Dumping to 'test.json'... Complete! Elapse 0.002432 sec **中文文档** 将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件 文件 参数列表 :param js: 可Json化的Python对象 :type js: ``字典`` 或 ``列表`` :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param indent_format: 默认 ``False``, 当为 ``True`` 时, Json编码时会对Key进行 排序, 并进行缩进排版。但是这样写入速度较慢, 文件体积也更大。 :type indent_format: "布尔值" :param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会 自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。 :type overwrite: "布尔值" :param float_precision: 默认 ``None``, 当为任意整数时, 则会保留小数点后N位 :type float_precision: "整数" :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值``
[ "Dump", "Json", "serializable", "object", "to", "file", ".", "Provides", "multiple", "choice", "to", "customize", "the", "behavior", "." ]
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L135-L235
MacHu-GWU/dataIO-project
dataIO/js.py
safe_dump
def safe_dump(data, abspath, indent_format=False, float_precision=None, ensure_ascii=True, enable_verbose=True): """A stable version of :func:`dump`, this method will silently overwrite existing file. There's a issue with :func:`dump`: If your program is interrupted while writing, you got an incomplete file, and you also lose the original file. So this method write json to a temporary file first, then rename to what you expect, and silently overwrite old one. This way can guarantee atomic write. **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了覆盖式 写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证写操作的原子性 (要么全部完成, 要么全部都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个 未完成的临时文件而已, 不会影响原文件。 """ abspath = lower_ext(str(abspath)) abspath_temp = "%s.tmp" % abspath dump(data, abspath_temp, indent_format=indent_format, float_precision=float_precision, ensure_ascii=ensure_ascii, overwrite=True, enable_verbose=enable_verbose) shutil.move(abspath_temp, abspath)
python
def safe_dump(data, abspath, indent_format=False, float_precision=None, ensure_ascii=True, enable_verbose=True): """A stable version of :func:`dump`, this method will silently overwrite existing file. There's a issue with :func:`dump`: If your program is interrupted while writing, you got an incomplete file, and you also lose the original file. So this method write json to a temporary file first, then rename to what you expect, and silently overwrite old one. This way can guarantee atomic write. **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了覆盖式 写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证写操作的原子性 (要么全部完成, 要么全部都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个 未完成的临时文件而已, 不会影响原文件。 """ abspath = lower_ext(str(abspath)) abspath_temp = "%s.tmp" % abspath dump(data, abspath_temp, indent_format=indent_format, float_precision=float_precision, ensure_ascii=ensure_ascii, overwrite=True, enable_verbose=enable_verbose) shutil.move(abspath_temp, abspath)
[ "def", "safe_dump", "(", "data", ",", "abspath", ",", "indent_format", "=", "False", ",", "float_precision", "=", "None", ",", "ensure_ascii", "=", "True", ",", "enable_verbose", "=", "True", ")", ":", "abspath", "=", "lower_ext", "(", "str", "(", "abspath", ")", ")", "abspath_temp", "=", "\"%s.tmp\"", "%", "abspath", "dump", "(", "data", ",", "abspath_temp", ",", "indent_format", "=", "indent_format", ",", "float_precision", "=", "float_precision", ",", "ensure_ascii", "=", "ensure_ascii", ",", "overwrite", "=", "True", ",", "enable_verbose", "=", "enable_verbose", ")", "shutil", ".", "move", "(", "abspath_temp", ",", "abspath", ")" ]
A stable version of :func:`dump`, this method will silently overwrite existing file. There's a issue with :func:`dump`: If your program is interrupted while writing, you got an incomplete file, and you also lose the original file. So this method write json to a temporary file first, then rename to what you expect, and silently overwrite old one. This way can guarantee atomic write. **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了覆盖式 写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证写操作的原子性 (要么全部完成, 要么全部都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个 未完成的临时文件而已, 不会影响原文件。
[ "A", "stable", "version", "of", ":", "func", ":", "dump", "this", "method", "will", "silently", "overwrite", "existing", "file", "." ]
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L238-L266
MacHu-GWU/dataIO-project
dataIO/js.py
pretty_dumps
def pretty_dumps(data): """Return json string in pretty format. **中文文档** 将字典转化成格式化后的字符串。 """ try: return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False) except: return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=True)
python
def pretty_dumps(data): """Return json string in pretty format. **中文文档** 将字典转化成格式化后的字符串。 """ try: return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False) except: return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=True)
[ "def", "pretty_dumps", "(", "data", ")", ":", "try", ":", "return", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "ensure_ascii", "=", "False", ")", "except", ":", "return", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "ensure_ascii", "=", "True", ")" ]
Return json string in pretty format. **中文文档** 将字典转化成格式化后的字符串。
[ "Return", "json", "string", "in", "pretty", "format", "." ]
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L269-L279
RI-imaging/nrefocus
nrefocus/pad.py
_get_pad_left_right
def _get_pad_left_right(small, large): """ Compute left and right padding values. Here we use the convention that if the padding size is odd, we pad the odd part to the right and the even part to the left. Parameters ---------- small : int Old size of original 1D array large : int New size off padded 1D array Returns ------- (padleft, padright) : tuple The proposed padding sizes. """ assert small < large, "Can only pad when new size larger than old size" padsize = large - small if padsize % 2 != 0: leftpad = (padsize - 1)/2 else: leftpad = padsize/2 rightpad = padsize-leftpad return int(leftpad), int(rightpad)
python
def _get_pad_left_right(small, large): """ Compute left and right padding values. Here we use the convention that if the padding size is odd, we pad the odd part to the right and the even part to the left. Parameters ---------- small : int Old size of original 1D array large : int New size off padded 1D array Returns ------- (padleft, padright) : tuple The proposed padding sizes. """ assert small < large, "Can only pad when new size larger than old size" padsize = large - small if padsize % 2 != 0: leftpad = (padsize - 1)/2 else: leftpad = padsize/2 rightpad = padsize-leftpad return int(leftpad), int(rightpad)
[ "def", "_get_pad_left_right", "(", "small", ",", "large", ")", ":", "assert", "small", "<", "large", ",", "\"Can only pad when new size larger than old size\"", "padsize", "=", "large", "-", "small", "if", "padsize", "%", "2", "!=", "0", ":", "leftpad", "=", "(", "padsize", "-", "1", ")", "/", "2", "else", ":", "leftpad", "=", "padsize", "/", "2", "rightpad", "=", "padsize", "-", "leftpad", "return", "int", "(", "leftpad", ")", ",", "int", "(", "rightpad", ")" ]
Compute left and right padding values. Here we use the convention that if the padding size is odd, we pad the odd part to the right and the even part to the left. Parameters ---------- small : int Old size of original 1D array large : int New size off padded 1D array Returns ------- (padleft, padright) : tuple The proposed padding sizes.
[ "Compute", "left", "and", "right", "padding", "values", "." ]
train
https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L12-L40
RI-imaging/nrefocus
nrefocus/pad.py
pad_add
def pad_add(av, size=None, stlen=10): """ Perform linear padding for complex array The input array `av` is padded with a linear ramp starting at the edges and going outwards to an average value computed from a band of thickness `stlen` at the outer boundary of the array. Pads will only be appended, not prepended to the array. If the input array is complex, pads will be complex numbers The average is computed for phase and amplitude separately. Parameters ---------- av : complex 1D or 2D ndarray The array that will be padded. size : int or tuple of length 1 (1D) or tuple of length 2 (2D), optional The final size of the padded array. Defaults to double the size of the input array. stlen : int, optional The thickness of the frame within `av` that will be used to compute an average value for padding. Returns ------- pv : complex 1D or 2D ndarray Padded array `av` with pads appended to right and bottom. """ if size is None: size = list() for s in av.shape: size.append(int(2*s)) elif not hasattr(size, "__len__"): size = [size] assert len(av.shape) in [1, 2], "Only 1D and 2D arrays!" assert len(av.shape) == len( size), "`size` must have same length as `av.shape`!" if len(av.shape) == 2: return _pad_add_2d(av, size, stlen) else: return _pad_add_1d(av, size, stlen)
python
def pad_add(av, size=None, stlen=10): """ Perform linear padding for complex array The input array `av` is padded with a linear ramp starting at the edges and going outwards to an average value computed from a band of thickness `stlen` at the outer boundary of the array. Pads will only be appended, not prepended to the array. If the input array is complex, pads will be complex numbers The average is computed for phase and amplitude separately. Parameters ---------- av : complex 1D or 2D ndarray The array that will be padded. size : int or tuple of length 1 (1D) or tuple of length 2 (2D), optional The final size of the padded array. Defaults to double the size of the input array. stlen : int, optional The thickness of the frame within `av` that will be used to compute an average value for padding. Returns ------- pv : complex 1D or 2D ndarray Padded array `av` with pads appended to right and bottom. """ if size is None: size = list() for s in av.shape: size.append(int(2*s)) elif not hasattr(size, "__len__"): size = [size] assert len(av.shape) in [1, 2], "Only 1D and 2D arrays!" assert len(av.shape) == len( size), "`size` must have same length as `av.shape`!" if len(av.shape) == 2: return _pad_add_2d(av, size, stlen) else: return _pad_add_1d(av, size, stlen)
[ "def", "pad_add", "(", "av", ",", "size", "=", "None", ",", "stlen", "=", "10", ")", ":", "if", "size", "is", "None", ":", "size", "=", "list", "(", ")", "for", "s", "in", "av", ".", "shape", ":", "size", ".", "append", "(", "int", "(", "2", "*", "s", ")", ")", "elif", "not", "hasattr", "(", "size", ",", "\"__len__\"", ")", ":", "size", "=", "[", "size", "]", "assert", "len", "(", "av", ".", "shape", ")", "in", "[", "1", ",", "2", "]", ",", "\"Only 1D and 2D arrays!\"", "assert", "len", "(", "av", ".", "shape", ")", "==", "len", "(", "size", ")", ",", "\"`size` must have same length as `av.shape`!\"", "if", "len", "(", "av", ".", "shape", ")", "==", "2", ":", "return", "_pad_add_2d", "(", "av", ",", "size", ",", "stlen", ")", "else", ":", "return", "_pad_add_1d", "(", "av", ",", "size", ",", "stlen", ")" ]
Perform linear padding for complex array The input array `av` is padded with a linear ramp starting at the edges and going outwards to an average value computed from a band of thickness `stlen` at the outer boundary of the array. Pads will only be appended, not prepended to the array. If the input array is complex, pads will be complex numbers The average is computed for phase and amplitude separately. Parameters ---------- av : complex 1D or 2D ndarray The array that will be padded. size : int or tuple of length 1 (1D) or tuple of length 2 (2D), optional The final size of the padded array. Defaults to double the size of the input array. stlen : int, optional The thickness of the frame within `av` that will be used to compute an average value for padding. Returns ------- pv : complex 1D or 2D ndarray Padded array `av` with pads appended to right and bottom.
[ "Perform", "linear", "padding", "for", "complex", "array" ]
train
https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L43-L86
RI-imaging/nrefocus
nrefocus/pad.py
_pad_add_1d
def _pad_add_1d(av, size, stlen): """ 2D component of `pad_add` """ assert len(size) == 1 padx = _get_pad_left_right(av.shape[0], size[0]) mask = np.zeros(av.shape, dtype=bool) mask[stlen:-stlen] = True border = av[~mask] if av.dtype.name.count("complex"): padval = np.average(np.abs(border)) * \ np.exp(1j*np.average(np.angle(border))) else: padval = np.average(border) if np.__version__[:3] in ["1.7", "1.8", "1.9"]: end_values = ((padval, padval),) else: end_values = (padval,) bv = np.pad(av, padx, mode="linear_ramp", end_values=end_values) # roll the array so that the padding values are on the right bv = np.roll(bv, -padx[0], 0) return bv
python
def _pad_add_1d(av, size, stlen): """ 2D component of `pad_add` """ assert len(size) == 1 padx = _get_pad_left_right(av.shape[0], size[0]) mask = np.zeros(av.shape, dtype=bool) mask[stlen:-stlen] = True border = av[~mask] if av.dtype.name.count("complex"): padval = np.average(np.abs(border)) * \ np.exp(1j*np.average(np.angle(border))) else: padval = np.average(border) if np.__version__[:3] in ["1.7", "1.8", "1.9"]: end_values = ((padval, padval),) else: end_values = (padval,) bv = np.pad(av, padx, mode="linear_ramp", end_values=end_values) # roll the array so that the padding values are on the right bv = np.roll(bv, -padx[0], 0) return bv
[ "def", "_pad_add_1d", "(", "av", ",", "size", ",", "stlen", ")", ":", "assert", "len", "(", "size", ")", "==", "1", "padx", "=", "_get_pad_left_right", "(", "av", ".", "shape", "[", "0", "]", ",", "size", "[", "0", "]", ")", "mask", "=", "np", ".", "zeros", "(", "av", ".", "shape", ",", "dtype", "=", "bool", ")", "mask", "[", "stlen", ":", "-", "stlen", "]", "=", "True", "border", "=", "av", "[", "~", "mask", "]", "if", "av", ".", "dtype", ".", "name", ".", "count", "(", "\"complex\"", ")", ":", "padval", "=", "np", ".", "average", "(", "np", ".", "abs", "(", "border", ")", ")", "*", "np", ".", "exp", "(", "1j", "*", "np", ".", "average", "(", "np", ".", "angle", "(", "border", ")", ")", ")", "else", ":", "padval", "=", "np", ".", "average", "(", "border", ")", "if", "np", ".", "__version__", "[", ":", "3", "]", "in", "[", "\"1.7\"", ",", "\"1.8\"", ",", "\"1.9\"", "]", ":", "end_values", "=", "(", "(", "padval", ",", "padval", ")", ",", ")", "else", ":", "end_values", "=", "(", "padval", ",", ")", "bv", "=", "np", ".", "pad", "(", "av", ",", "padx", ",", "mode", "=", "\"linear_ramp\"", ",", "end_values", "=", "end_values", ")", "# roll the array so that the padding values are on the right", "bv", "=", "np", ".", "roll", "(", "bv", ",", "-", "padx", "[", "0", "]", ",", "0", ")", "return", "bv" ]
2D component of `pad_add`
[ "2D", "component", "of", "pad_add" ]
train
https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L89-L114
RI-imaging/nrefocus
nrefocus/pad.py
pad_rem
def pad_rem(pv, size=None): """ Removes linear padding from array This is a convenience function that does the opposite of `pad_add`. Parameters ---------- pv : 1D or 2D ndarray The array from which the padding will be removed. size : tuple of length 1 (1D) or 2 (2D), optional The final size of the un-padded array. Defaults to half the size of the input array. Returns ------- pv : 1D or 2D ndarray Padded array `av` with pads appended to right and bottom. """ if size is None: size = list() for s in pv.shape: assert s % 2 == 0, "Uneven size; specify correct size of output!" size.append(int(s/2)) elif not hasattr(size, "__len__"): size = [size] assert len(pv.shape) in [1, 2], "Only 1D and 2D arrays!" assert len(pv.shape) == len( size), "`size` must have same length as `av.shape`!" if len(pv.shape) == 2: return pv[:size[0], :size[1]] else: return pv[:size[0]]
python
def pad_rem(pv, size=None): """ Removes linear padding from array This is a convenience function that does the opposite of `pad_add`. Parameters ---------- pv : 1D or 2D ndarray The array from which the padding will be removed. size : tuple of length 1 (1D) or 2 (2D), optional The final size of the un-padded array. Defaults to half the size of the input array. Returns ------- pv : 1D or 2D ndarray Padded array `av` with pads appended to right and bottom. """ if size is None: size = list() for s in pv.shape: assert s % 2 == 0, "Uneven size; specify correct size of output!" size.append(int(s/2)) elif not hasattr(size, "__len__"): size = [size] assert len(pv.shape) in [1, 2], "Only 1D and 2D arrays!" assert len(pv.shape) == len( size), "`size` must have same length as `av.shape`!" if len(pv.shape) == 2: return pv[:size[0], :size[1]] else: return pv[:size[0]]
[ "def", "pad_rem", "(", "pv", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "list", "(", ")", "for", "s", "in", "pv", ".", "shape", ":", "assert", "s", "%", "2", "==", "0", ",", "\"Uneven size; specify correct size of output!\"", "size", ".", "append", "(", "int", "(", "s", "/", "2", ")", ")", "elif", "not", "hasattr", "(", "size", ",", "\"__len__\"", ")", ":", "size", "=", "[", "size", "]", "assert", "len", "(", "pv", ".", "shape", ")", "in", "[", "1", ",", "2", "]", ",", "\"Only 1D and 2D arrays!\"", "assert", "len", "(", "pv", ".", "shape", ")", "==", "len", "(", "size", ")", ",", "\"`size` must have same length as `av.shape`!\"", "if", "len", "(", "pv", ".", "shape", ")", "==", "2", ":", "return", "pv", "[", ":", "size", "[", "0", "]", ",", ":", "size", "[", "1", "]", "]", "else", ":", "return", "pv", "[", ":", "size", "[", "0", "]", "]" ]
Removes linear padding from array This is a convenience function that does the opposite of `pad_add`. Parameters ---------- pv : 1D or 2D ndarray The array from which the padding will be removed. size : tuple of length 1 (1D) or 2 (2D), optional The final size of the un-padded array. Defaults to half the size of the input array. Returns ------- pv : 1D or 2D ndarray Padded array `av` with pads appended to right and bottom.
[ "Removes", "linear", "padding", "from", "array" ]
train
https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/pad.py#L147-L182
anteater/anteater
anteater/src/virus_total.py
VirusTotal.rate_limit
def rate_limit(self): """ Simple rate limit function using redis """ rate_limited_msg = False while True: is_rate_limited = self.limit.is_rate_limited(uuid) if is_rate_limited: time.sleep(0.3) # save hammering redis if not rate_limited_msg: self.logger.info('Rate limit active..please wait...') rate_limited_msg = True if not is_rate_limited: self.logger.info('Rate limit clear.') self.limit.attempt(uuid) return True
python
def rate_limit(self): """ Simple rate limit function using redis """ rate_limited_msg = False while True: is_rate_limited = self.limit.is_rate_limited(uuid) if is_rate_limited: time.sleep(0.3) # save hammering redis if not rate_limited_msg: self.logger.info('Rate limit active..please wait...') rate_limited_msg = True if not is_rate_limited: self.logger.info('Rate limit clear.') self.limit.attempt(uuid) return True
[ "def", "rate_limit", "(", "self", ")", ":", "rate_limited_msg", "=", "False", "while", "True", ":", "is_rate_limited", "=", "self", ".", "limit", ".", "is_rate_limited", "(", "uuid", ")", "if", "is_rate_limited", ":", "time", ".", "sleep", "(", "0.3", ")", "# save hammering redis", "if", "not", "rate_limited_msg", ":", "self", ".", "logger", ".", "info", "(", "'Rate limit active..please wait...'", ")", "rate_limited_msg", "=", "True", "if", "not", "is_rate_limited", ":", "self", ".", "logger", ".", "info", "(", "'Rate limit clear.'", ")", "self", ".", "limit", ".", "attempt", "(", "uuid", ")", "return", "True" ]
Simple rate limit function using redis
[ "Simple", "rate", "limit", "function", "using", "redis" ]
train
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L60-L77
anteater/anteater
anteater/src/virus_total.py
VirusTotal.scan_file
def scan_file(self, filename, apikey): """ Sends a file to virus total for assessment """ url = self.base_url + "file/scan" params = {'apikey': apikey} scanfile = {"file": open(filename, 'rb')} response = requests.post(url, files=scanfile, params=params) rate_limit_clear = self.rate_limit() if rate_limit_clear: if response.status_code == self.HTTP_OK: json_response = response.json() return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", filename, response.status_code)
python
def scan_file(self, filename, apikey): """ Sends a file to virus total for assessment """ url = self.base_url + "file/scan" params = {'apikey': apikey} scanfile = {"file": open(filename, 'rb')} response = requests.post(url, files=scanfile, params=params) rate_limit_clear = self.rate_limit() if rate_limit_clear: if response.status_code == self.HTTP_OK: json_response = response.json() return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", filename, response.status_code)
[ "def", "scan_file", "(", "self", ",", "filename", ",", "apikey", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"file/scan\"", "params", "=", "{", "'apikey'", ":", "apikey", "}", "scanfile", "=", "{", "\"file\"", ":", "open", "(", "filename", ",", "'rb'", ")", "}", "response", "=", "requests", ".", "post", "(", "url", ",", "files", "=", "scanfile", ",", "params", "=", "params", ")", "rate_limit_clear", "=", "self", ".", "rate_limit", "(", ")", "if", "rate_limit_clear", ":", "if", "response", ".", "status_code", "==", "self", ".", "HTTP_OK", ":", "json_response", "=", "response", ".", "json", "(", ")", "return", "json_response", "elif", "response", ".", "status_code", "==", "self", ".", "HTTP_RATE_EXCEEDED", ":", "time", ".", "sleep", "(", "20", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "\"sent: %s, HTTP: %d\"", ",", "filename", ",", "response", ".", "status_code", ")" ]
Sends a file to virus total for assessment
[ "Sends", "a", "file", "to", "virus", "total", "for", "assessment" ]
train
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L79-L95
anteater/anteater
anteater/src/virus_total.py
VirusTotal.rescan_file
def rescan_file(self, filename, sha256hash, apikey): """ just send the hash, check the date """ url = self.base_url + "file/rescan" params = { 'apikey': apikey, 'resource': sha256hash } rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.post(url, params=params) if response.status_code == self.HTTP_OK: self.logger.info("sent: %s, HTTP: %d, content: %s", os.path.basename(filename), response.status_code, response.text) elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", os.path.basename(filename), response.status_code) return response
python
def rescan_file(self, filename, sha256hash, apikey): """ just send the hash, check the date """ url = self.base_url + "file/rescan" params = { 'apikey': apikey, 'resource': sha256hash } rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.post(url, params=params) if response.status_code == self.HTTP_OK: self.logger.info("sent: %s, HTTP: %d, content: %s", os.path.basename(filename), response.status_code, response.text) elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", os.path.basename(filename), response.status_code) return response
[ "def", "rescan_file", "(", "self", ",", "filename", ",", "sha256hash", ",", "apikey", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"file/rescan\"", "params", "=", "{", "'apikey'", ":", "apikey", ",", "'resource'", ":", "sha256hash", "}", "rate_limit_clear", "=", "self", ".", "rate_limit", "(", ")", "if", "rate_limit_clear", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "params", "=", "params", ")", "if", "response", ".", "status_code", "==", "self", ".", "HTTP_OK", ":", "self", ".", "logger", ".", "info", "(", "\"sent: %s, HTTP: %d, content: %s\"", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "response", ".", "status_code", ",", "response", ".", "text", ")", "elif", "response", ".", "status_code", "==", "self", ".", "HTTP_RATE_EXCEEDED", ":", "time", ".", "sleep", "(", "20", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "\"sent: %s, HTTP: %d\"", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "response", ".", "status_code", ")", "return", "response" ]
just send the hash, check the date
[ "just", "send", "the", "hash", "check", "the", "date" ]
train
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L97-L116
anteater/anteater
anteater/src/virus_total.py
VirusTotal.binary_report
def binary_report(self, sha256sum, apikey): """ retrieve report from file scan """ url = self.base_url + "file/report" params = {"apikey": apikey, "resource": sha256sum} rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.post(url, data=params) if response.status_code == self.HTTP_OK: json_response = response.json() response_code = json_response['response_code'] return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.warning("retrieve report: %s, HTTP code: %d", os.path.basename(filename), response.status_code)
python
def binary_report(self, sha256sum, apikey): """ retrieve report from file scan """ url = self.base_url + "file/report" params = {"apikey": apikey, "resource": sha256sum} rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.post(url, data=params) if response.status_code == self.HTTP_OK: json_response = response.json() response_code = json_response['response_code'] return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.warning("retrieve report: %s, HTTP code: %d", os.path.basename(filename), response.status_code)
[ "def", "binary_report", "(", "self", ",", "sha256sum", ",", "apikey", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"file/report\"", "params", "=", "{", "\"apikey\"", ":", "apikey", ",", "\"resource\"", ":", "sha256sum", "}", "rate_limit_clear", "=", "self", ".", "rate_limit", "(", ")", "if", "rate_limit_clear", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "params", ")", "if", "response", ".", "status_code", "==", "self", ".", "HTTP_OK", ":", "json_response", "=", "response", ".", "json", "(", ")", "response_code", "=", "json_response", "[", "'response_code'", "]", "return", "json_response", "elif", "response", ".", "status_code", "==", "self", ".", "HTTP_RATE_EXCEEDED", ":", "time", ".", "sleep", "(", "20", ")", "else", ":", "self", ".", "logger", ".", "warning", "(", "\"retrieve report: %s, HTTP code: %d\"", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "response", ".", "status_code", ")" ]
retrieve report from file scan
[ "retrieve", "report", "from", "file", "scan" ]
train
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L118-L136
anteater/anteater
anteater/src/virus_total.py
VirusTotal.send_ip
def send_ip(self, ipaddr, apikey): """ Send IP address for list of past malicous domain associations """ url = self.base_url + "ip-address/report" parameters = {"ip": ipaddr, "apikey": apikey} rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.get(url, params=parameters) if response.status_code == self.HTTP_OK: json_response = response.json() return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", ipaddr, response.status_code) time.sleep(self.public_api_sleep_time)
python
def send_ip(self, ipaddr, apikey): """ Send IP address for list of past malicous domain associations """ url = self.base_url + "ip-address/report" parameters = {"ip": ipaddr, "apikey": apikey} rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.get(url, params=parameters) if response.status_code == self.HTTP_OK: json_response = response.json() return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", ipaddr, response.status_code) time.sleep(self.public_api_sleep_time)
[ "def", "send_ip", "(", "self", ",", "ipaddr", ",", "apikey", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"ip-address/report\"", "parameters", "=", "{", "\"ip\"", ":", "ipaddr", ",", "\"apikey\"", ":", "apikey", "}", "rate_limit_clear", "=", "self", ".", "rate_limit", "(", ")", "if", "rate_limit_clear", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "parameters", ")", "if", "response", ".", "status_code", "==", "self", ".", "HTTP_OK", ":", "json_response", "=", "response", ".", "json", "(", ")", "return", "json_response", "elif", "response", ".", "status_code", "==", "self", ".", "HTTP_RATE_EXCEEDED", ":", "time", ".", "sleep", "(", "20", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "\"sent: %s, HTTP: %d\"", ",", "ipaddr", ",", "response", ".", "status_code", ")", "time", ".", "sleep", "(", "self", ".", "public_api_sleep_time", ")" ]
Send IP address for list of past malicous domain associations
[ "Send", "IP", "address", "for", "list", "of", "past", "malicous", "domain", "associations" ]
train
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L138-L154
anteater/anteater
anteater/src/virus_total.py
VirusTotal.url_report
def url_report(self, scan_url, apikey): """ Send URLS for list of past malicous associations """ url = self.base_url + "url/report" params = {"apikey": apikey, 'resource': scan_url} rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.post(url, params=params, headers=self.headers) if response.status_code == self.HTTP_OK: json_response = response.json() return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", scan_url, response.status_code) time.sleep(self.public_api_sleep_time)
python
def url_report(self, scan_url, apikey): """ Send URLS for list of past malicous associations """ url = self.base_url + "url/report" params = {"apikey": apikey, 'resource': scan_url} rate_limit_clear = self.rate_limit() if rate_limit_clear: response = requests.post(url, params=params, headers=self.headers) if response.status_code == self.HTTP_OK: json_response = response.json() return json_response elif response.status_code == self.HTTP_RATE_EXCEEDED: time.sleep(20) else: self.logger.error("sent: %s, HTTP: %d", scan_url, response.status_code) time.sleep(self.public_api_sleep_time)
[ "def", "url_report", "(", "self", ",", "scan_url", ",", "apikey", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"url/report\"", "params", "=", "{", "\"apikey\"", ":", "apikey", ",", "'resource'", ":", "scan_url", "}", "rate_limit_clear", "=", "self", ".", "rate_limit", "(", ")", "if", "rate_limit_clear", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "params", "=", "params", ",", "headers", "=", "self", ".", "headers", ")", "if", "response", ".", "status_code", "==", "self", ".", "HTTP_OK", ":", "json_response", "=", "response", ".", "json", "(", ")", "return", "json_response", "elif", "response", ".", "status_code", "==", "self", ".", "HTTP_RATE_EXCEEDED", ":", "time", ".", "sleep", "(", "20", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "\"sent: %s, HTTP: %d\"", ",", "scan_url", ",", "response", ".", "status_code", ")", "time", ".", "sleep", "(", "self", ".", "public_api_sleep_time", ")" ]
Send URLS for list of past malicous associations
[ "Send", "URLS", "for", "list", "of", "past", "malicous", "associations" ]
train
https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/virus_total.py#L156-L172
rapidpro/expressions
python/setup.py
_read_requirements
def _read_requirements(filename, extra_packages): """Returns a list of package requirements read from the file.""" requirements_file = open(filename).read() hard_requirements = [] for line in requirements_file.splitlines(): if _is_requirement(line): if line.find(';') > -1: dep, condition = tuple(line.split(';')) extra_packages[condition.strip()].append(dep.strip()) else: hard_requirements.append(line.strip()) return hard_requirements, extra_packages
python
def _read_requirements(filename, extra_packages): """Returns a list of package requirements read from the file.""" requirements_file = open(filename).read() hard_requirements = [] for line in requirements_file.splitlines(): if _is_requirement(line): if line.find(';') > -1: dep, condition = tuple(line.split(';')) extra_packages[condition.strip()].append(dep.strip()) else: hard_requirements.append(line.strip()) return hard_requirements, extra_packages
[ "def", "_read_requirements", "(", "filename", ",", "extra_packages", ")", ":", "requirements_file", "=", "open", "(", "filename", ")", ".", "read", "(", ")", "hard_requirements", "=", "[", "]", "for", "line", "in", "requirements_file", ".", "splitlines", "(", ")", ":", "if", "_is_requirement", "(", "line", ")", ":", "if", "line", ".", "find", "(", "';'", ")", ">", "-", "1", ":", "dep", ",", "condition", "=", "tuple", "(", "line", ".", "split", "(", "';'", ")", ")", "extra_packages", "[", "condition", ".", "strip", "(", ")", "]", ".", "append", "(", "dep", ".", "strip", "(", ")", ")", "else", ":", "hard_requirements", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "return", "hard_requirements", ",", "extra_packages" ]
Returns a list of package requirements read from the file.
[ "Returns", "a", "list", "of", "package", "requirements", "read", "from", "the", "file", "." ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/setup.py#L14-L26
rapidpro/expressions
python/temba_expressions/functions/custom.py
field
def field(ctx, text, index, delimiter=' '): """ Reference a field in string separated by a delimiter """ splits = text.split(delimiter) # remove our delimiters and whitespace splits = [f for f in splits if f != delimiter and len(f.strip()) > 0] index = conversions.to_integer(index, ctx) if index < 1: raise ValueError('Field index cannot be less than 1') if index <= len(splits): return splits[index-1] else: return ''
python
def field(ctx, text, index, delimiter=' '): """ Reference a field in string separated by a delimiter """ splits = text.split(delimiter) # remove our delimiters and whitespace splits = [f for f in splits if f != delimiter and len(f.strip()) > 0] index = conversions.to_integer(index, ctx) if index < 1: raise ValueError('Field index cannot be less than 1') if index <= len(splits): return splits[index-1] else: return ''
[ "def", "field", "(", "ctx", ",", "text", ",", "index", ",", "delimiter", "=", "' '", ")", ":", "splits", "=", "text", ".", "split", "(", "delimiter", ")", "# remove our delimiters and whitespace", "splits", "=", "[", "f", "for", "f", "in", "splits", "if", "f", "!=", "delimiter", "and", "len", "(", "f", ".", "strip", "(", ")", ")", ">", "0", "]", "index", "=", "conversions", ".", "to_integer", "(", "index", ",", "ctx", ")", "if", "index", "<", "1", ":", "raise", "ValueError", "(", "'Field index cannot be less than 1'", ")", "if", "index", "<=", "len", "(", "splits", ")", ":", "return", "splits", "[", "index", "-", "1", "]", "else", ":", "return", "''" ]
Reference a field in string separated by a delimiter
[ "Reference", "a", "field", "in", "string", "separated", "by", "a", "delimiter" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L10-L26
rapidpro/expressions
python/temba_expressions/functions/custom.py
epoch
def epoch(ctx, datetime): """ Converts the given date to the number of seconds since January 1st, 1970 UTC """ return conversions.to_decimal(str(conversions.to_datetime(datetime, ctx).timestamp()), ctx)
python
def epoch(ctx, datetime): """ Converts the given date to the number of seconds since January 1st, 1970 UTC """ return conversions.to_decimal(str(conversions.to_datetime(datetime, ctx).timestamp()), ctx)
[ "def", "epoch", "(", "ctx", ",", "datetime", ")", ":", "return", "conversions", ".", "to_decimal", "(", "str", "(", "conversions", ".", "to_datetime", "(", "datetime", ",", "ctx", ")", ".", "timestamp", "(", ")", ")", ",", "ctx", ")" ]
Converts the given date to the number of seconds since January 1st, 1970 UTC
[ "Converts", "the", "given", "date", "to", "the", "number", "of", "seconds", "since", "January", "1st", "1970", "UTC" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L44-L48
rapidpro/expressions
python/temba_expressions/functions/custom.py
read_digits
def read_digits(ctx, text): """ Formats digits in text for reading in TTS """ def chunk(value, chunk_size): return [value[i: i + chunk_size] for i in range(0, len(value), chunk_size)] text = conversions.to_string(text, ctx).strip() if not text: return '' # trim off the plus for phone numbers if text[0] == '+': text = text[1:] length = len(text) # ssn if length == 9: result = ' '.join(text[:3]) result += ' , ' + ' '.join(text[3:5]) result += ' , ' + ' '.join(text[5:]) return result # triplets, most international phone numbers if length % 3 == 0 and length > 3: chunks = chunk(text, 3) return ' '.join(','.join(chunks)) # quads, credit cards if length % 4 == 0: chunks = chunk(text, 4) return ' '.join(','.join(chunks)) # otherwise, just put a comma between each number return ','.join(text)
python
def read_digits(ctx, text): """ Formats digits in text for reading in TTS """ def chunk(value, chunk_size): return [value[i: i + chunk_size] for i in range(0, len(value), chunk_size)] text = conversions.to_string(text, ctx).strip() if not text: return '' # trim off the plus for phone numbers if text[0] == '+': text = text[1:] length = len(text) # ssn if length == 9: result = ' '.join(text[:3]) result += ' , ' + ' '.join(text[3:5]) result += ' , ' + ' '.join(text[5:]) return result # triplets, most international phone numbers if length % 3 == 0 and length > 3: chunks = chunk(text, 3) return ' '.join(','.join(chunks)) # quads, credit cards if length % 4 == 0: chunks = chunk(text, 4) return ' '.join(','.join(chunks)) # otherwise, just put a comma between each number return ','.join(text)
[ "def", "read_digits", "(", "ctx", ",", "text", ")", ":", "def", "chunk", "(", "value", ",", "chunk_size", ")", ":", "return", "[", "value", "[", "i", ":", "i", "+", "chunk_size", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "value", ")", ",", "chunk_size", ")", "]", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", ".", "strip", "(", ")", "if", "not", "text", ":", "return", "''", "# trim off the plus for phone numbers", "if", "text", "[", "0", "]", "==", "'+'", ":", "text", "=", "text", "[", "1", ":", "]", "length", "=", "len", "(", "text", ")", "# ssn", "if", "length", "==", "9", ":", "result", "=", "' '", ".", "join", "(", "text", "[", ":", "3", "]", ")", "result", "+=", "' , '", "+", "' '", ".", "join", "(", "text", "[", "3", ":", "5", "]", ")", "result", "+=", "' , '", "+", "' '", ".", "join", "(", "text", "[", "5", ":", "]", ")", "return", "result", "# triplets, most international phone numbers", "if", "length", "%", "3", "==", "0", "and", "length", ">", "3", ":", "chunks", "=", "chunk", "(", "text", ",", "3", ")", "return", "' '", ".", "join", "(", "','", ".", "join", "(", "chunks", ")", ")", "# quads, credit cards", "if", "length", "%", "4", "==", "0", ":", "chunks", "=", "chunk", "(", "text", ",", "4", ")", "return", "' '", ".", "join", "(", "','", ".", "join", "(", "chunks", ")", ")", "# otherwise, just put a comma between each number", "return", "','", ".", "join", "(", "text", ")" ]
Formats digits in text for reading in TTS
[ "Formats", "digits", "in", "text", "for", "reading", "in", "TTS" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L51-L86
rapidpro/expressions
python/temba_expressions/functions/custom.py
remove_first_word
def remove_first_word(ctx, text): """ Removes the first word from the given text string """ text = conversions.to_string(text, ctx).lstrip() first = first_word(ctx, text) return text[len(first):].lstrip() if first else ''
python
def remove_first_word(ctx, text): """ Removes the first word from the given text string """ text = conversions.to_string(text, ctx).lstrip() first = first_word(ctx, text) return text[len(first):].lstrip() if first else ''
[ "def", "remove_first_word", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", ".", "lstrip", "(", ")", "first", "=", "first_word", "(", "ctx", ",", "text", ")", "return", "text", "[", "len", "(", "first", ")", ":", "]", ".", "lstrip", "(", ")", "if", "first", "else", "''" ]
Removes the first word from the given text string
[ "Removes", "the", "first", "word", "from", "the", "given", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L89-L95
rapidpro/expressions
python/temba_expressions/functions/custom.py
word
def word(ctx, text, number, by_spaces=False): """ Extracts the nth word from the given text string """ return word_slice(ctx, text, number, conversions.to_integer(number, ctx) + 1, by_spaces)
python
def word(ctx, text, number, by_spaces=False): """ Extracts the nth word from the given text string """ return word_slice(ctx, text, number, conversions.to_integer(number, ctx) + 1, by_spaces)
[ "def", "word", "(", "ctx", ",", "text", ",", "number", ",", "by_spaces", "=", "False", ")", ":", "return", "word_slice", "(", "ctx", ",", "text", ",", "number", ",", "conversions", ".", "to_integer", "(", "number", ",", "ctx", ")", "+", "1", ",", "by_spaces", ")" ]
Extracts the nth word from the given text string
[ "Extracts", "the", "nth", "word", "from", "the", "given", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L98-L102
rapidpro/expressions
python/temba_expressions/functions/custom.py
word_count
def word_count(ctx, text, by_spaces=False): """ Returns the number of words in the given text string """ text = conversions.to_string(text, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) return len(__get_words(text, by_spaces))
python
def word_count(ctx, text, by_spaces=False): """ Returns the number of words in the given text string """ text = conversions.to_string(text, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) return len(__get_words(text, by_spaces))
[ "def", "word_count", "(", "ctx", ",", "text", ",", "by_spaces", "=", "False", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "by_spaces", "=", "conversions", ".", "to_boolean", "(", "by_spaces", ",", "ctx", ")", "return", "len", "(", "__get_words", "(", "text", ",", "by_spaces", ")", ")" ]
Returns the number of words in the given text string
[ "Returns", "the", "number", "of", "words", "in", "the", "given", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L105-L111
rapidpro/expressions
python/temba_expressions/functions/custom.py
word_slice
def word_slice(ctx, text, start, stop=0, by_spaces=False): """ Extracts a substring spanning from start up to but not-including stop """ text = conversions.to_string(text, ctx) start = conversions.to_integer(start, ctx) stop = conversions.to_integer(stop, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) if start == 0: raise ValueError("Start word cannot be zero") elif start > 0: start -= 1 # convert to a zero-based offset if stop == 0: # zero is treated as no end stop = None elif stop > 0: stop -= 1 # convert to a zero-based offset words = __get_words(text, by_spaces) selection = operator.getitem(words, slice(start, stop)) # re-combine selected words with a single space return ' '.join(selection)
python
def word_slice(ctx, text, start, stop=0, by_spaces=False): """ Extracts a substring spanning from start up to but not-including stop """ text = conversions.to_string(text, ctx) start = conversions.to_integer(start, ctx) stop = conversions.to_integer(stop, ctx) by_spaces = conversions.to_boolean(by_spaces, ctx) if start == 0: raise ValueError("Start word cannot be zero") elif start > 0: start -= 1 # convert to a zero-based offset if stop == 0: # zero is treated as no end stop = None elif stop > 0: stop -= 1 # convert to a zero-based offset words = __get_words(text, by_spaces) selection = operator.getitem(words, slice(start, stop)) # re-combine selected words with a single space return ' '.join(selection)
[ "def", "word_slice", "(", "ctx", ",", "text", ",", "start", ",", "stop", "=", "0", ",", "by_spaces", "=", "False", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "start", "=", "conversions", ".", "to_integer", "(", "start", ",", "ctx", ")", "stop", "=", "conversions", ".", "to_integer", "(", "stop", ",", "ctx", ")", "by_spaces", "=", "conversions", ".", "to_boolean", "(", "by_spaces", ",", "ctx", ")", "if", "start", "==", "0", ":", "raise", "ValueError", "(", "\"Start word cannot be zero\"", ")", "elif", "start", ">", "0", ":", "start", "-=", "1", "# convert to a zero-based offset", "if", "stop", "==", "0", ":", "# zero is treated as no end", "stop", "=", "None", "elif", "stop", ">", "0", ":", "stop", "-=", "1", "# convert to a zero-based offset", "words", "=", "__get_words", "(", "text", ",", "by_spaces", ")", "selection", "=", "operator", ".", "getitem", "(", "words", ",", "slice", "(", "start", ",", "stop", ")", ")", "# re-combine selected words with a single space", "return", "' '", ".", "join", "(", "selection", ")" ]
Extracts a substring spanning from start up to but not-including stop
[ "Extracts", "a", "substring", "spanning", "from", "start", "up", "to", "but", "not", "-", "including", "stop" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L114-L138
rapidpro/expressions
python/temba_expressions/functions/custom.py
format_date
def format_date(ctx, text): """ Takes a single parameter (date as string) and returns it in the format defined by the org """ dt = conversions.to_datetime(text, ctx) return dt.astimezone(ctx.timezone).strftime(ctx.get_date_format(True))
python
def format_date(ctx, text): """ Takes a single parameter (date as string) and returns it in the format defined by the org """ dt = conversions.to_datetime(text, ctx) return dt.astimezone(ctx.timezone).strftime(ctx.get_date_format(True))
[ "def", "format_date", "(", "ctx", ",", "text", ")", ":", "dt", "=", "conversions", ".", "to_datetime", "(", "text", ",", "ctx", ")", "return", "dt", ".", "astimezone", "(", "ctx", ".", "timezone", ")", ".", "strftime", "(", "ctx", ".", "get_date_format", "(", "True", ")", ")" ]
Takes a single parameter (date as string) and returns it in the format defined by the org
[ "Takes", "a", "single", "parameter", "(", "date", "as", "string", ")", "and", "returns", "it", "in", "the", "format", "defined", "by", "the", "org" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L141-L146
rapidpro/expressions
python/temba_expressions/functions/custom.py
format_location
def format_location(ctx, text): """ Takes a single parameter (administrative boundary as a string) and returns the name of the leaf boundary """ text = conversions.to_string(text, ctx) return text.split(">")[-1].strip()
python
def format_location(ctx, text): """ Takes a single parameter (administrative boundary as a string) and returns the name of the leaf boundary """ text = conversions.to_string(text, ctx) return text.split(">")[-1].strip()
[ "def", "format_location", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "return", "text", ".", "split", "(", "\">\"", ")", "[", "-", "1", "]", ".", "strip", "(", ")" ]
Takes a single parameter (administrative boundary as a string) and returns the name of the leaf boundary
[ "Takes", "a", "single", "parameter", "(", "administrative", "boundary", "as", "a", "string", ")", "and", "returns", "the", "name", "of", "the", "leaf", "boundary" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L149-L154
rapidpro/expressions
python/temba_expressions/functions/custom.py
regex_group
def regex_group(ctx, text, pattern, group_num): """ Tries to match the text with the given pattern and returns the value of matching group """ text = conversions.to_string(text, ctx) pattern = conversions.to_string(pattern, ctx) group_num = conversions.to_integer(group_num, ctx) expression = regex.compile(pattern, regex.UNICODE | regex.IGNORECASE | regex.MULTILINE | regex.V0) match = expression.search(text) if not match: return "" if group_num < 0 or group_num > len(match.groups()): raise ValueError("No such matching group %d" % group_num) return match.group(group_num)
python
def regex_group(ctx, text, pattern, group_num): """ Tries to match the text with the given pattern and returns the value of matching group """ text = conversions.to_string(text, ctx) pattern = conversions.to_string(pattern, ctx) group_num = conversions.to_integer(group_num, ctx) expression = regex.compile(pattern, regex.UNICODE | regex.IGNORECASE | regex.MULTILINE | regex.V0) match = expression.search(text) if not match: return "" if group_num < 0 or group_num > len(match.groups()): raise ValueError("No such matching group %d" % group_num) return match.group(group_num)
[ "def", "regex_group", "(", "ctx", ",", "text", ",", "pattern", ",", "group_num", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "pattern", "=", "conversions", ".", "to_string", "(", "pattern", ",", "ctx", ")", "group_num", "=", "conversions", ".", "to_integer", "(", "group_num", ",", "ctx", ")", "expression", "=", "regex", ".", "compile", "(", "pattern", ",", "regex", ".", "UNICODE", "|", "regex", ".", "IGNORECASE", "|", "regex", ".", "MULTILINE", "|", "regex", ".", "V0", ")", "match", "=", "expression", ".", "search", "(", "text", ")", "if", "not", "match", ":", "return", "\"\"", "if", "group_num", "<", "0", "or", "group_num", ">", "len", "(", "match", ".", "groups", "(", ")", ")", ":", "raise", "ValueError", "(", "\"No such matching group %d\"", "%", "group_num", ")", "return", "match", ".", "group", "(", "group_num", ")" ]
Tries to match the text with the given pattern and returns the value of matching group
[ "Tries", "to", "match", "the", "text", "with", "the", "given", "pattern", "and", "returns", "the", "value", "of", "matching", "group" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L157-L174
rapidpro/expressions
python/temba_expressions/functions/custom.py
__get_words
def __get_words(text, by_spaces): """ Helper function which splits the given text string into words. If by_spaces is false, then text like '01-02-2014' will be split into 3 separate words. For backwards compatibility, this is the default for all expression functions. :param text: the text to split :param by_spaces: whether words should be split only by spaces or by punctuation like '-', '.' etc """ if by_spaces: splits = regex.split(r'\s+', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0) return [split for split in splits if split] # return only non-empty else: return tokenize(text)
python
def __get_words(text, by_spaces): """ Helper function which splits the given text string into words. If by_spaces is false, then text like '01-02-2014' will be split into 3 separate words. For backwards compatibility, this is the default for all expression functions. :param text: the text to split :param by_spaces: whether words should be split only by spaces or by punctuation like '-', '.' etc """ if by_spaces: splits = regex.split(r'\s+', text, flags=regex.MULTILINE | regex.UNICODE | regex.V0) return [split for split in splits if split] # return only non-empty else: return tokenize(text)
[ "def", "__get_words", "(", "text", ",", "by_spaces", ")", ":", "if", "by_spaces", ":", "splits", "=", "regex", ".", "split", "(", "r'\\s+'", ",", "text", ",", "flags", "=", "regex", ".", "MULTILINE", "|", "regex", ".", "UNICODE", "|", "regex", ".", "V0", ")", "return", "[", "split", "for", "split", "in", "splits", "if", "split", "]", "# return only non-empty", "else", ":", "return", "tokenize", "(", "text", ")" ]
Helper function which splits the given text string into words. If by_spaces is false, then text like '01-02-2014' will be split into 3 separate words. For backwards compatibility, this is the default for all expression functions. :param text: the text to split :param by_spaces: whether words should be split only by spaces or by punctuation like '-', '.' etc
[ "Helper", "function", "which", "splits", "the", "given", "text", "string", "into", "words", ".", "If", "by_spaces", "is", "false", "then", "text", "like", "01", "-", "02", "-", "2014", "will", "be", "split", "into", "3", "separate", "words", ".", "For", "backwards", "compatibility", "this", "is", "the", "default", "for", "all", "expression", "functions", ".", ":", "param", "text", ":", "the", "text", "to", "split", ":", "param", "by_spaces", ":", "whether", "words", "should", "be", "split", "only", "by", "spaces", "or", "by", "punctuation", "like", "-", ".", "etc" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L179-L191
rapidpro/expressions
python/temba_expressions/utils.py
decimal_round
def decimal_round(number, num_digits, rounding=ROUND_HALF_UP): """ Rounding for decimals with support for negative digits """ exp = Decimal(10) ** -num_digits if num_digits >= 0: return number.quantize(exp, rounding) else: return exp * (number / exp).to_integral_value(rounding)
python
def decimal_round(number, num_digits, rounding=ROUND_HALF_UP): """ Rounding for decimals with support for negative digits """ exp = Decimal(10) ** -num_digits if num_digits >= 0: return number.quantize(exp, rounding) else: return exp * (number / exp).to_integral_value(rounding)
[ "def", "decimal_round", "(", "number", ",", "num_digits", ",", "rounding", "=", "ROUND_HALF_UP", ")", ":", "exp", "=", "Decimal", "(", "10", ")", "**", "-", "num_digits", "if", "num_digits", ">=", "0", ":", "return", "number", ".", "quantize", "(", "exp", ",", "rounding", ")", "else", ":", "return", "exp", "*", "(", "number", "/", "exp", ")", ".", "to_integral_value", "(", "rounding", ")" ]
Rounding for decimals with support for negative digits
[ "Rounding", "for", "decimals", "with", "support", "for", "negative", "digits" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/utils.py#L22-L31
rapidpro/expressions
python/temba_expressions/utils.py
parse_json_date
def parse_json_date(value): """ Parses an ISO8601 formatted datetime from a string value """ if not value: return None return datetime.datetime.strptime(value, JSON_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
python
def parse_json_date(value): """ Parses an ISO8601 formatted datetime from a string value """ if not value: return None return datetime.datetime.strptime(value, JSON_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
[ "def", "parse_json_date", "(", "value", ")", ":", "if", "not", "value", ":", "return", "None", "return", "datetime", ".", "datetime", ".", "strptime", "(", "value", ",", "JSON_DATETIME_FORMAT", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "UTC", ")" ]
Parses an ISO8601 formatted datetime from a string value
[ "Parses", "an", "ISO8601", "formatted", "datetime", "from", "a", "string", "value" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/utils.py#L50-L57
rapidpro/expressions
python/temba_expressions/utils.py
format_json_date
def format_json_date(value): """ Formats a datetime as ISO8601 in UTC with millisecond precision, e.g. "2014-10-03T09:41:12.790Z" """ if not value: return None # %f will include 6 microsecond digits micro_precision = value.astimezone(pytz.UTC).strftime(JSON_DATETIME_FORMAT) # only keep the milliseconds portion of the second fraction return micro_precision[:-4] + 'Z'
python
def format_json_date(value): """ Formats a datetime as ISO8601 in UTC with millisecond precision, e.g. "2014-10-03T09:41:12.790Z" """ if not value: return None # %f will include 6 microsecond digits micro_precision = value.astimezone(pytz.UTC).strftime(JSON_DATETIME_FORMAT) # only keep the milliseconds portion of the second fraction return micro_precision[:-4] + 'Z'
[ "def", "format_json_date", "(", "value", ")", ":", "if", "not", "value", ":", "return", "None", "# %f will include 6 microsecond digits", "micro_precision", "=", "value", ".", "astimezone", "(", "pytz", ".", "UTC", ")", ".", "strftime", "(", "JSON_DATETIME_FORMAT", ")", "# only keep the milliseconds portion of the second fraction", "return", "micro_precision", "[", ":", "-", "4", "]", "+", "'Z'" ]
Formats a datetime as ISO8601 in UTC with millisecond precision, e.g. "2014-10-03T09:41:12.790Z"
[ "Formats", "a", "datetime", "as", "ISO8601", "in", "UTC", "with", "millisecond", "precision", "e", ".", "g", ".", "2014", "-", "10", "-", "03T09", ":", "41", ":", "12", ".", "790Z" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/utils.py#L60-L71
rapidpro/expressions
python/temba_expressions/functions/excel.py
clean
def clean(ctx, text): """ Removes all non-printable characters from a text string """ text = conversions.to_string(text, ctx) return ''.join([c for c in text if ord(c) >= 32])
python
def clean(ctx, text): """ Removes all non-printable characters from a text string """ text = conversions.to_string(text, ctx) return ''.join([c for c in text if ord(c) >= 32])
[ "def", "clean", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "return", "''", ".", "join", "(", "[", "c", "for", "c", "in", "text", "if", "ord", "(", "c", ")", ">=", "32", "]", ")" ]
Removes all non-printable characters from a text string
[ "Removes", "all", "non", "-", "printable", "characters", "from", "a", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L22-L27
rapidpro/expressions
python/temba_expressions/functions/excel.py
concatenate
def concatenate(ctx, *text): """ Joins text strings into one text string """ result = '' for arg in text: result += conversions.to_string(arg, ctx) return result
python
def concatenate(ctx, *text): """ Joins text strings into one text string """ result = '' for arg in text: result += conversions.to_string(arg, ctx) return result
[ "def", "concatenate", "(", "ctx", ",", "*", "text", ")", ":", "result", "=", "''", "for", "arg", "in", "text", ":", "result", "+=", "conversions", ".", "to_string", "(", "arg", ",", "ctx", ")", "return", "result" ]
Joins text strings into one text string
[ "Joins", "text", "strings", "into", "one", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L37-L44
rapidpro/expressions
python/temba_expressions/functions/excel.py
fixed
def fixed(ctx, number, decimals=2, no_commas=False): """ Formats the given number in decimal format using a period and commas """ value = _round(ctx, number, decimals) format_str = '{:f}' if no_commas else '{:,f}' return format_str.format(value)
python
def fixed(ctx, number, decimals=2, no_commas=False): """ Formats the given number in decimal format using a period and commas """ value = _round(ctx, number, decimals) format_str = '{:f}' if no_commas else '{:,f}' return format_str.format(value)
[ "def", "fixed", "(", "ctx", ",", "number", ",", "decimals", "=", "2", ",", "no_commas", "=", "False", ")", ":", "value", "=", "_round", "(", "ctx", ",", "number", ",", "decimals", ")", "format_str", "=", "'{:f}'", "if", "no_commas", "else", "'{:,f}'", "return", "format_str", ".", "format", "(", "value", ")" ]
Formats the given number in decimal format using a period and commas
[ "Formats", "the", "given", "number", "in", "decimal", "format", "using", "a", "period", "and", "commas" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L47-L53
rapidpro/expressions
python/temba_expressions/functions/excel.py
left
def left(ctx, text, num_chars): """ Returns the first characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") return conversions.to_string(text, ctx)[0:num_chars]
python
def left(ctx, text, num_chars): """ Returns the first characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") return conversions.to_string(text, ctx)[0:num_chars]
[ "def", "left", "(", "ctx", ",", "text", ",", "num_chars", ")", ":", "num_chars", "=", "conversions", ".", "to_integer", "(", "num_chars", ",", "ctx", ")", "if", "num_chars", "<", "0", ":", "raise", "ValueError", "(", "\"Number of chars can't be negative\"", ")", "return", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "[", "0", ":", "num_chars", "]" ]
Returns the first characters in a text string
[ "Returns", "the", "first", "characters", "in", "a", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L56-L63
rapidpro/expressions
python/temba_expressions/functions/excel.py
rept
def rept(ctx, text, number_times): """ Repeats text a given number of times """ if number_times < 0: raise ValueError("Number of times can't be negative") return conversions.to_string(text, ctx) * conversions.to_integer(number_times, ctx)
python
def rept(ctx, text, number_times): """ Repeats text a given number of times """ if number_times < 0: raise ValueError("Number of times can't be negative") return conversions.to_string(text, ctx) * conversions.to_integer(number_times, ctx)
[ "def", "rept", "(", "ctx", ",", "text", ",", "number_times", ")", ":", "if", "number_times", "<", "0", ":", "raise", "ValueError", "(", "\"Number of times can't be negative\"", ")", "return", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "*", "conversions", ".", "to_integer", "(", "number_times", ",", "ctx", ")" ]
Repeats text a given number of times
[ "Repeats", "text", "a", "given", "number", "of", "times" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L87-L93
rapidpro/expressions
python/temba_expressions/functions/excel.py
right
def right(ctx, text, num_chars): """ Returns the last characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") elif num_chars == 0: return '' else: return conversions.to_string(text, ctx)[-num_chars:]
python
def right(ctx, text, num_chars): """ Returns the last characters in a text string """ num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") elif num_chars == 0: return '' else: return conversions.to_string(text, ctx)[-num_chars:]
[ "def", "right", "(", "ctx", ",", "text", ",", "num_chars", ")", ":", "num_chars", "=", "conversions", ".", "to_integer", "(", "num_chars", ",", "ctx", ")", "if", "num_chars", "<", "0", ":", "raise", "ValueError", "(", "\"Number of chars can't be negative\"", ")", "elif", "num_chars", "==", "0", ":", "return", "''", "else", ":", "return", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "[", "-", "num_chars", ":", "]" ]
Returns the last characters in a text string
[ "Returns", "the", "last", "characters", "in", "a", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L96-L106
rapidpro/expressions
python/temba_expressions/functions/excel.py
substitute
def substitute(ctx, text, old_text, new_text, instance_num=-1): """ Substitutes new_text for old_text in a text string """ text = conversions.to_string(text, ctx) old_text = conversions.to_string(old_text, ctx) new_text = conversions.to_string(new_text, ctx) if instance_num < 0: return text.replace(old_text, new_text) else: splits = text.split(old_text) output = splits[0] instance = 1 for split in splits[1:]: sep = new_text if instance == instance_num else old_text output += sep + split instance += 1 return output
python
def substitute(ctx, text, old_text, new_text, instance_num=-1): """ Substitutes new_text for old_text in a text string """ text = conversions.to_string(text, ctx) old_text = conversions.to_string(old_text, ctx) new_text = conversions.to_string(new_text, ctx) if instance_num < 0: return text.replace(old_text, new_text) else: splits = text.split(old_text) output = splits[0] instance = 1 for split in splits[1:]: sep = new_text if instance == instance_num else old_text output += sep + split instance += 1 return output
[ "def", "substitute", "(", "ctx", ",", "text", ",", "old_text", ",", "new_text", ",", "instance_num", "=", "-", "1", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "old_text", "=", "conversions", ".", "to_string", "(", "old_text", ",", "ctx", ")", "new_text", "=", "conversions", ".", "to_string", "(", "new_text", ",", "ctx", ")", "if", "instance_num", "<", "0", ":", "return", "text", ".", "replace", "(", "old_text", ",", "new_text", ")", "else", ":", "splits", "=", "text", ".", "split", "(", "old_text", ")", "output", "=", "splits", "[", "0", "]", "instance", "=", "1", "for", "split", "in", "splits", "[", "1", ":", "]", ":", "sep", "=", "new_text", "if", "instance", "==", "instance_num", "else", "old_text", "output", "+=", "sep", "+", "split", "instance", "+=", "1", "return", "output" ]
Substitutes new_text for old_text in a text string
[ "Substitutes", "new_text", "for", "old_text", "in", "a", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L109-L127
rapidpro/expressions
python/temba_expressions/functions/excel.py
_unicode
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
python
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
[ "def", "_unicode", "(", "ctx", ",", "text", ")", ":", "text", "=", "conversions", ".", "to_string", "(", "text", ",", "ctx", ")", "if", "len", "(", "text", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Text can't be empty\"", ")", "return", "ord", "(", "text", "[", "0", "]", ")" ]
Returns a numeric code for the first character in a text string
[ "Returns", "a", "numeric", "code", "for", "the", "first", "character", "in", "a", "text", "string" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L137-L144
rapidpro/expressions
python/temba_expressions/functions/excel.py
date
def date(ctx, year, month, day): """ Defines a date value """ return _date(conversions.to_integer(year, ctx), conversions.to_integer(month, ctx), conversions.to_integer(day, ctx))
python
def date(ctx, year, month, day): """ Defines a date value """ return _date(conversions.to_integer(year, ctx), conversions.to_integer(month, ctx), conversions.to_integer(day, ctx))
[ "def", "date", "(", "ctx", ",", "year", ",", "month", ",", "day", ")", ":", "return", "_date", "(", "conversions", ".", "to_integer", "(", "year", ",", "ctx", ")", ",", "conversions", ".", "to_integer", "(", "month", ",", "ctx", ")", ",", "conversions", ".", "to_integer", "(", "day", ",", "ctx", ")", ")" ]
Defines a date value
[ "Defines", "a", "date", "value" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L157-L161
rapidpro/expressions
python/temba_expressions/functions/excel.py
datedif
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
python
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
[ "def", "datedif", "(", "ctx", ",", "start_date", ",", "end_date", ",", "unit", ")", ":", "start_date", "=", "conversions", ".", "to_date", "(", "start_date", ",", "ctx", ")", "end_date", "=", "conversions", ".", "to_date", "(", "end_date", ",", "ctx", ")", "unit", "=", "conversions", ".", "to_string", "(", "unit", ",", "ctx", ")", ".", "lower", "(", ")", "if", "start_date", ">", "end_date", ":", "raise", "ValueError", "(", "\"Start date cannot be after end date\"", ")", "if", "unit", "==", "'y'", ":", "return", "relativedelta", "(", "end_date", ",", "start_date", ")", ".", "years", "elif", "unit", "==", "'m'", ":", "delta", "=", "relativedelta", "(", "end_date", ",", "start_date", ")", "return", "12", "*", "delta", ".", "years", "+", "delta", ".", "months", "elif", "unit", "==", "'d'", ":", "return", "(", "end_date", "-", "start_date", ")", ".", "days", "elif", "unit", "==", "'md'", ":", "return", "relativedelta", "(", "end_date", ",", "start_date", ")", ".", "days", "elif", "unit", "==", "'ym'", ":", "return", "relativedelta", "(", "end_date", ",", "start_date", ")", ".", "months", "elif", "unit", "==", "'yd'", ":", "return", "(", "end_date", "-", "start_date", ".", "replace", "(", "year", "=", "end_date", ".", "year", ")", ")", ".", "days", "raise", "ValueError", "(", "\"Invalid unit value: %s\"", "%", "unit", ")" ]
Calculates the number of days, months, or years between two dates.
[ "Calculates", "the", "number", "of", "days", "months", "or", "years", "between", "two", "dates", "." ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L164-L189
rapidpro/expressions
python/temba_expressions/functions/excel.py
edate
def edate(ctx, date, months): """ Moves a date by the given number of months """ return conversions.to_date_or_datetime(date, ctx) + relativedelta(months=conversions.to_integer(months, ctx))
python
def edate(ctx, date, months): """ Moves a date by the given number of months """ return conversions.to_date_or_datetime(date, ctx) + relativedelta(months=conversions.to_integer(months, ctx))
[ "def", "edate", "(", "ctx", ",", "date", ",", "months", ")", ":", "return", "conversions", ".", "to_date_or_datetime", "(", "date", ",", "ctx", ")", "+", "relativedelta", "(", "months", "=", "conversions", ".", "to_integer", "(", "months", ",", "ctx", ")", ")" ]
Moves a date by the given number of months
[ "Moves", "a", "date", "by", "the", "given", "number", "of", "months" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L213-L217
rapidpro/expressions
python/temba_expressions/functions/excel.py
time
def time(ctx, hours, minutes, seconds): """ Defines a time value """ return _time(conversions.to_integer(hours, ctx), conversions.to_integer(minutes, ctx), conversions.to_integer(seconds, ctx))
python
def time(ctx, hours, minutes, seconds): """ Defines a time value """ return _time(conversions.to_integer(hours, ctx), conversions.to_integer(minutes, ctx), conversions.to_integer(seconds, ctx))
[ "def", "time", "(", "ctx", ",", "hours", ",", "minutes", ",", "seconds", ")", ":", "return", "_time", "(", "conversions", ".", "to_integer", "(", "hours", ",", "ctx", ")", ",", "conversions", ".", "to_integer", "(", "minutes", ",", "ctx", ")", ",", "conversions", ".", "to_integer", "(", "seconds", ",", "ctx", ")", ")" ]
Defines a time value
[ "Defines", "a", "time", "value" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L255-L259
rapidpro/expressions
python/temba_expressions/functions/excel.py
_abs
def _abs(ctx, number): """ Returns the absolute value of a number """ return conversions.to_decimal(abs(conversions.to_decimal(number, ctx)), ctx)
python
def _abs(ctx, number): """ Returns the absolute value of a number """ return conversions.to_decimal(abs(conversions.to_decimal(number, ctx)), ctx)
[ "def", "_abs", "(", "ctx", ",", "number", ")", ":", "return", "conversions", ".", "to_decimal", "(", "abs", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ")", ",", "ctx", ")" ]
Returns the absolute value of a number
[ "Returns", "the", "absolute", "value", "of", "a", "number" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L293-L297
rapidpro/expressions
python/temba_expressions/functions/excel.py
_int
def _int(ctx, number): """ Rounds a number down to the nearest integer """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_FLOOR), ctx)
python
def _int(ctx, number): """ Rounds a number down to the nearest integer """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_FLOOR), ctx)
[ "def", "_int", "(", "ctx", ",", "number", ")", ":", "return", "conversions", ".", "to_integer", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ".", "to_integral_value", "(", "ROUND_FLOOR", ")", ",", "ctx", ")" ]
Rounds a number down to the nearest integer
[ "Rounds", "a", "number", "down", "to", "the", "nearest", "integer" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L314-L318
rapidpro/expressions
python/temba_expressions/functions/excel.py
_max
def _max(ctx, *number): """ Returns the maximum value of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = conversions.to_decimal(number[0], ctx) for arg in number[1:]: arg = conversions.to_decimal(arg, ctx) if arg > result: result = arg return result
python
def _max(ctx, *number): """ Returns the maximum value of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = conversions.to_decimal(number[0], ctx) for arg in number[1:]: arg = conversions.to_decimal(arg, ctx) if arg > result: result = arg return result
[ "def", "_max", "(", "ctx", ",", "*", "number", ")", ":", "if", "len", "(", "number", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Wrong number of arguments\"", ")", "result", "=", "conversions", ".", "to_decimal", "(", "number", "[", "0", "]", ",", "ctx", ")", "for", "arg", "in", "number", "[", "1", ":", "]", ":", "arg", "=", "conversions", ".", "to_decimal", "(", "arg", ",", "ctx", ")", "if", "arg", ">", "result", ":", "result", "=", "arg", "return", "result" ]
Returns the maximum value of all arguments
[ "Returns", "the", "maximum", "value", "of", "all", "arguments" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L321-L333
rapidpro/expressions
python/temba_expressions/functions/excel.py
mod
def mod(ctx, number, divisor): """ Returns the remainder after number is divided by divisor """ number = conversions.to_decimal(number, ctx) divisor = conversions.to_decimal(divisor, ctx) return number - divisor * _int(ctx, number / divisor)
python
def mod(ctx, number, divisor): """ Returns the remainder after number is divided by divisor """ number = conversions.to_decimal(number, ctx) divisor = conversions.to_decimal(divisor, ctx) return number - divisor * _int(ctx, number / divisor)
[ "def", "mod", "(", "ctx", ",", "number", ",", "divisor", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "divisor", "=", "conversions", ".", "to_decimal", "(", "divisor", ",", "ctx", ")", "return", "number", "-", "divisor", "*", "_int", "(", "ctx", ",", "number", "/", "divisor", ")" ]
Returns the remainder after number is divided by divisor
[ "Returns", "the", "remainder", "after", "number", "is", "divided", "by", "divisor" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L351-L357
rapidpro/expressions
python/temba_expressions/functions/excel.py
_power
def _power(ctx, number, power): """ Returns the result of a number raised to a power """ return decimal_pow(conversions.to_decimal(number, ctx), conversions.to_decimal(power, ctx))
python
def _power(ctx, number, power): """ Returns the result of a number raised to a power """ return decimal_pow(conversions.to_decimal(number, ctx), conversions.to_decimal(power, ctx))
[ "def", "_power", "(", "ctx", ",", "number", ",", "power", ")", ":", "return", "decimal_pow", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ",", "conversions", ".", "to_decimal", "(", "power", ",", "ctx", ")", ")" ]
Returns the result of a number raised to a power
[ "Returns", "the", "result", "of", "a", "number", "raised", "to", "a", "power" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L360-L364
rapidpro/expressions
python/temba_expressions/functions/excel.py
randbetween
def randbetween(ctx, bottom, top): """ Returns a random integer number between the numbers you specify """ bottom = conversions.to_integer(bottom, ctx) top = conversions.to_integer(top, ctx) return random.randint(bottom, top)
python
def randbetween(ctx, bottom, top): """ Returns a random integer number between the numbers you specify """ bottom = conversions.to_integer(bottom, ctx) top = conversions.to_integer(top, ctx) return random.randint(bottom, top)
[ "def", "randbetween", "(", "ctx", ",", "bottom", ",", "top", ")", ":", "bottom", "=", "conversions", ".", "to_integer", "(", "bottom", ",", "ctx", ")", "top", "=", "conversions", ".", "to_integer", "(", "top", ",", "ctx", ")", "return", "random", ".", "randint", "(", "bottom", ",", "top", ")" ]
Returns a random integer number between the numbers you specify
[ "Returns", "a", "random", "integer", "number", "between", "the", "numbers", "you", "specify" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L374-L380
rapidpro/expressions
python/temba_expressions/functions/excel.py
_round
def _round(ctx, number, num_digits): """ Rounds a number to a specified number of digits """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_HALF_UP)
python
def _round(ctx, number, num_digits): """ Rounds a number to a specified number of digits """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_HALF_UP)
[ "def", "_round", "(", "ctx", ",", "number", ",", "num_digits", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "num_digits", "=", "conversions", ".", "to_integer", "(", "num_digits", ",", "ctx", ")", "return", "decimal_round", "(", "number", ",", "num_digits", ",", "ROUND_HALF_UP", ")" ]
Rounds a number to a specified number of digits
[ "Rounds", "a", "number", "to", "a", "specified", "number", "of", "digits" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L383-L390
rapidpro/expressions
python/temba_expressions/functions/excel.py
rounddown
def rounddown(ctx, number, num_digits): """ Rounds a number down, toward zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_DOWN)
python
def rounddown(ctx, number, num_digits): """ Rounds a number down, toward zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_DOWN)
[ "def", "rounddown", "(", "ctx", ",", "number", ",", "num_digits", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "num_digits", "=", "conversions", ".", "to_integer", "(", "num_digits", ",", "ctx", ")", "return", "decimal_round", "(", "number", ",", "num_digits", ",", "ROUND_DOWN", ")" ]
Rounds a number down, toward zero
[ "Rounds", "a", "number", "down", "toward", "zero" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L393-L400
rapidpro/expressions
python/temba_expressions/functions/excel.py
roundup
def roundup(ctx, number, num_digits): """ Rounds a number up, away from zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_UP)
python
def roundup(ctx, number, num_digits): """ Rounds a number up, away from zero """ number = conversions.to_decimal(number, ctx) num_digits = conversions.to_integer(num_digits, ctx) return decimal_round(number, num_digits, ROUND_UP)
[ "def", "roundup", "(", "ctx", ",", "number", ",", "num_digits", ")", ":", "number", "=", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", "num_digits", "=", "conversions", ".", "to_integer", "(", "num_digits", ",", "ctx", ")", "return", "decimal_round", "(", "number", ",", "num_digits", ",", "ROUND_UP", ")" ]
Rounds a number up, away from zero
[ "Rounds", "a", "number", "up", "away", "from", "zero" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L403-L410
rapidpro/expressions
python/temba_expressions/functions/excel.py
_sum
def _sum(ctx, *number): """ Returns the sum of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = Decimal(0) for arg in number: result += conversions.to_decimal(arg, ctx) return result
python
def _sum(ctx, *number): """ Returns the sum of all arguments """ if len(number) == 0: raise ValueError("Wrong number of arguments") result = Decimal(0) for arg in number: result += conversions.to_decimal(arg, ctx) return result
[ "def", "_sum", "(", "ctx", ",", "*", "number", ")", ":", "if", "len", "(", "number", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Wrong number of arguments\"", ")", "result", "=", "Decimal", "(", "0", ")", "for", "arg", "in", "number", ":", "result", "+=", "conversions", ".", "to_decimal", "(", "arg", ",", "ctx", ")", "return", "result" ]
Returns the sum of all arguments
[ "Returns", "the", "sum", "of", "all", "arguments" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L413-L423
rapidpro/expressions
python/temba_expressions/functions/excel.py
trunc
def trunc(ctx, number): """ Truncates a number to an integer by removing the fractional part of the number """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_DOWN), ctx)
python
def trunc(ctx, number): """ Truncates a number to an integer by removing the fractional part of the number """ return conversions.to_integer(conversions.to_decimal(number, ctx).to_integral_value(ROUND_DOWN), ctx)
[ "def", "trunc", "(", "ctx", ",", "number", ")", ":", "return", "conversions", ".", "to_integer", "(", "conversions", ".", "to_decimal", "(", "number", ",", "ctx", ")", ".", "to_integral_value", "(", "ROUND_DOWN", ")", ",", "ctx", ")" ]
Truncates a number to an integer by removing the fractional part of the number
[ "Truncates", "a", "number", "to", "an", "integer", "by", "removing", "the", "fractional", "part", "of", "the", "number" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L426-L430
rapidpro/expressions
python/temba_expressions/functions/excel.py
_and
def _and(ctx, *logical): """ Returns TRUE if and only if all its arguments evaluate to TRUE """ for arg in logical: if not conversions.to_boolean(arg, ctx): return False return True
python
def _and(ctx, *logical): """ Returns TRUE if and only if all its arguments evaluate to TRUE """ for arg in logical: if not conversions.to_boolean(arg, ctx): return False return True
[ "def", "_and", "(", "ctx", ",", "*", "logical", ")", ":", "for", "arg", "in", "logical", ":", "if", "not", "conversions", ".", "to_boolean", "(", "arg", ",", "ctx", ")", ":", "return", "False", "return", "True" ]
Returns TRUE if and only if all its arguments evaluate to TRUE
[ "Returns", "TRUE", "if", "and", "only", "if", "all", "its", "arguments", "evaluate", "to", "TRUE" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L435-L442
rapidpro/expressions
python/temba_expressions/functions/excel.py
_if
def _if(ctx, logical_test, value_if_true=0, value_if_false=False): """ Returns one value if the condition evaluates to TRUE, and another value if it evaluates to FALSE """ return value_if_true if conversions.to_boolean(logical_test, ctx) else value_if_false
python
def _if(ctx, logical_test, value_if_true=0, value_if_false=False): """ Returns one value if the condition evaluates to TRUE, and another value if it evaluates to FALSE """ return value_if_true if conversions.to_boolean(logical_test, ctx) else value_if_false
[ "def", "_if", "(", "ctx", ",", "logical_test", ",", "value_if_true", "=", "0", ",", "value_if_false", "=", "False", ")", ":", "return", "value_if_true", "if", "conversions", ".", "to_boolean", "(", "logical_test", ",", "ctx", ")", "else", "value_if_false" ]
Returns one value if the condition evaluates to TRUE, and another value if it evaluates to FALSE
[ "Returns", "one", "value", "if", "the", "condition", "evaluates", "to", "TRUE", "and", "another", "value", "if", "it", "evaluates", "to", "FALSE" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L452-L456
rapidpro/expressions
python/temba_expressions/functions/excel.py
_or
def _or(ctx, *logical): """ Returns TRUE if any argument is TRUE """ for arg in logical: if conversions.to_boolean(arg, ctx): return True return False
python
def _or(ctx, *logical): """ Returns TRUE if any argument is TRUE """ for arg in logical: if conversions.to_boolean(arg, ctx): return True return False
[ "def", "_or", "(", "ctx", ",", "*", "logical", ")", ":", "for", "arg", "in", "logical", ":", "if", "conversions", ".", "to_boolean", "(", "arg", ",", "ctx", ")", ":", "return", "True", "return", "False" ]
Returns TRUE if any argument is TRUE
[ "Returns", "TRUE", "if", "any", "argument", "is", "TRUE" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L459-L466
rainwoodman/kdcount
kdcount/correlate.py
compute_sum_values
def compute_sum_values(i, j, data1, data2): """ Return the sum1_ij and sum2_ij values given the input indices and data instances. Notes ----- This is called in `Binning.update_sums` to compute the `sum1` and `sum2` contributions for indices `(i,j)` Parameters ---------- i,j : array_like the bin indices for these pairs data1, data2 : `points`, `field` instances the two `points` or `field` objects Returns ------- sum1_ij, sum2_ij : float, array_like (N,...) contributions to sum1, sum2 -- either a float or array of shape (N, ...) where N is the length of `i`, `j` """ sum1_ij = 1. for idx, d in zip([i,j], [data1, data2]): if isinstance(d, field): sum1_ij *= d.wvalue[idx] elif isinstance(d, points): sum1_ij *= d.weights[idx] else: raise NotImplementedError("data type not recognized") sum2_ij = data1.weights[i] * data2.weights[j] return sum1_ij, sum2_ij
python
def compute_sum_values(i, j, data1, data2): """ Return the sum1_ij and sum2_ij values given the input indices and data instances. Notes ----- This is called in `Binning.update_sums` to compute the `sum1` and `sum2` contributions for indices `(i,j)` Parameters ---------- i,j : array_like the bin indices for these pairs data1, data2 : `points`, `field` instances the two `points` or `field` objects Returns ------- sum1_ij, sum2_ij : float, array_like (N,...) contributions to sum1, sum2 -- either a float or array of shape (N, ...) where N is the length of `i`, `j` """ sum1_ij = 1. for idx, d in zip([i,j], [data1, data2]): if isinstance(d, field): sum1_ij *= d.wvalue[idx] elif isinstance(d, points): sum1_ij *= d.weights[idx] else: raise NotImplementedError("data type not recognized") sum2_ij = data1.weights[i] * data2.weights[j] return sum1_ij, sum2_ij
[ "def", "compute_sum_values", "(", "i", ",", "j", ",", "data1", ",", "data2", ")", ":", "sum1_ij", "=", "1.", "for", "idx", ",", "d", "in", "zip", "(", "[", "i", ",", "j", "]", ",", "[", "data1", ",", "data2", "]", ")", ":", "if", "isinstance", "(", "d", ",", "field", ")", ":", "sum1_ij", "*=", "d", ".", "wvalue", "[", "idx", "]", "elif", "isinstance", "(", "d", ",", "points", ")", ":", "sum1_ij", "*=", "d", ".", "weights", "[", "idx", "]", "else", ":", "raise", "NotImplementedError", "(", "\"data type not recognized\"", ")", "sum2_ij", "=", "data1", ".", "weights", "[", "i", "]", "*", "data2", ".", "weights", "[", "j", "]", "return", "sum1_ij", ",", "sum2_ij" ]
Return the sum1_ij and sum2_ij values given the input indices and data instances. Notes ----- This is called in `Binning.update_sums` to compute the `sum1` and `sum2` contributions for indices `(i,j)` Parameters ---------- i,j : array_like the bin indices for these pairs data1, data2 : `points`, `field` instances the two `points` or `field` objects Returns ------- sum1_ij, sum2_ij : float, array_like (N,...) contributions to sum1, sum2 -- either a float or array of shape (N, ...) where N is the length of `i`, `j`
[ "Return", "the", "sum1_ij", "and", "sum2_ij", "values", "given", "the", "input", "indices", "and", "data", "instances", "." ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L46-L77
rainwoodman/kdcount
kdcount/correlate.py
Binning._setup
def _setup(self): """ Set the binning info we need from the `edges` """ dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')] dtype = numpy.dtype(dtype) self._info = numpy.empty(self.Ndim, dtype=dtype) self.min = self._info['min'] self.max = self._info['max'] self.N = self._info['N'] self.inv = self._info['inv'] self.spacing = self._info['spacing'] for i, dim in enumerate(self.dims): self.N[i] = len(self.edges[i])-1 self.min[i] = self.edges[i][0] self.max[i] = self.edges[i][-1] # determine the type of spacing self.spacing[i] = None lin_diff = numpy.diff(self.edges[i]) with numpy.errstate(divide='ignore', invalid='ignore'): log_diff = numpy.diff(numpy.log10(self.edges[i])) if numpy.allclose(lin_diff, lin_diff[0]): self.spacing[i] = 'linspace' self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i]) elif numpy.allclose(log_diff, log_diff[0]): self.spacing[i] = 'logspace' self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i]) self.shape = self.N + 2 # store Rmax self.Rmax = self.max[0]
python
def _setup(self): """ Set the binning info we need from the `edges` """ dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')] dtype = numpy.dtype(dtype) self._info = numpy.empty(self.Ndim, dtype=dtype) self.min = self._info['min'] self.max = self._info['max'] self.N = self._info['N'] self.inv = self._info['inv'] self.spacing = self._info['spacing'] for i, dim in enumerate(self.dims): self.N[i] = len(self.edges[i])-1 self.min[i] = self.edges[i][0] self.max[i] = self.edges[i][-1] # determine the type of spacing self.spacing[i] = None lin_diff = numpy.diff(self.edges[i]) with numpy.errstate(divide='ignore', invalid='ignore'): log_diff = numpy.diff(numpy.log10(self.edges[i])) if numpy.allclose(lin_diff, lin_diff[0]): self.spacing[i] = 'linspace' self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i]) elif numpy.allclose(log_diff, log_diff[0]): self.spacing[i] = 'logspace' self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i]) self.shape = self.N + 2 # store Rmax self.Rmax = self.max[0]
[ "def", "_setup", "(", "self", ")", ":", "dtype", "=", "[", "(", "'inv'", ",", "'f8'", ")", ",", "(", "'min'", ",", "'f8'", ")", ",", "(", "'max'", ",", "'f8'", ")", ",", "(", "'N'", ",", "'i4'", ")", ",", "(", "'spacing'", ",", "'object'", ")", "]", "dtype", "=", "numpy", ".", "dtype", "(", "dtype", ")", "self", ".", "_info", "=", "numpy", ".", "empty", "(", "self", ".", "Ndim", ",", "dtype", "=", "dtype", ")", "self", ".", "min", "=", "self", ".", "_info", "[", "'min'", "]", "self", ".", "max", "=", "self", ".", "_info", "[", "'max'", "]", "self", ".", "N", "=", "self", ".", "_info", "[", "'N'", "]", "self", ".", "inv", "=", "self", ".", "_info", "[", "'inv'", "]", "self", ".", "spacing", "=", "self", ".", "_info", "[", "'spacing'", "]", "for", "i", ",", "dim", "in", "enumerate", "(", "self", ".", "dims", ")", ":", "self", ".", "N", "[", "i", "]", "=", "len", "(", "self", ".", "edges", "[", "i", "]", ")", "-", "1", "self", ".", "min", "[", "i", "]", "=", "self", ".", "edges", "[", "i", "]", "[", "0", "]", "self", ".", "max", "[", "i", "]", "=", "self", ".", "edges", "[", "i", "]", "[", "-", "1", "]", "# determine the type of spacing", "self", ".", "spacing", "[", "i", "]", "=", "None", "lin_diff", "=", "numpy", ".", "diff", "(", "self", ".", "edges", "[", "i", "]", ")", "with", "numpy", ".", "errstate", "(", "divide", "=", "'ignore'", ",", "invalid", "=", "'ignore'", ")", ":", "log_diff", "=", "numpy", ".", "diff", "(", "numpy", ".", "log10", "(", "self", ".", "edges", "[", "i", "]", ")", ")", "if", "numpy", ".", "allclose", "(", "lin_diff", ",", "lin_diff", "[", "0", "]", ")", ":", "self", ".", "spacing", "[", "i", "]", "=", "'linspace'", "self", ".", "inv", "[", "i", "]", "=", "self", ".", "N", "[", "i", "]", "*", "1.0", "/", "(", "self", ".", "max", "[", "i", "]", "-", "self", ".", "min", "[", "i", "]", ")", "elif", "numpy", ".", "allclose", "(", "log_diff", ",", "log_diff", "[", "0", "]", ")", ":", "self", ".", "spacing", "[", "i", "]", "=", "'logspace'", "self", ".", "inv", "[", "i", "]", "=", "self", ".", "N", "[", "i", "]", "*", "1.0", "/", "numpy", ".", "log10", "(", "self", ".", "max", "[", "i", "]", "/", "self", ".", "min", "[", "i", "]", ")", "self", ".", "shape", "=", "self", ".", "N", "+", "2", "# store Rmax", "self", ".", "Rmax", "=", "self", ".", "max", "[", "0", "]" ]
Set the binning info we need from the `edges`
[ "Set", "the", "binning", "info", "we", "need", "from", "the", "edges" ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L135-L169
rainwoodman/kdcount
kdcount/correlate.py
Binning.linear
def linear(self, **paircoords): """ Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index """ N = len(paircoords[list(paircoords.keys())[0]]) integer = numpy.empty(N, ('i8', (self.Ndim,))).T # do each dimension for i, dim in enumerate(self.dims): if self.spacing[i] == 'linspace': x = paircoords[dim] - self.min[i] integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] == 'logspace': x = paircoords[dim].copy() x[x == 0] = self.min[i] * 0.9 x = numpy.log10(x / self.min[i]) integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] is None: edge = self.edges if self.Ndim == 1 else self.edges[i] integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left') return numpy.ravel_multi_index(integer, self.shape, mode='clip')
python
def linear(self, **paircoords): """ Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index """ N = len(paircoords[list(paircoords.keys())[0]]) integer = numpy.empty(N, ('i8', (self.Ndim,))).T # do each dimension for i, dim in enumerate(self.dims): if self.spacing[i] == 'linspace': x = paircoords[dim] - self.min[i] integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] == 'logspace': x = paircoords[dim].copy() x[x == 0] = self.min[i] * 0.9 x = numpy.log10(x / self.min[i]) integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] is None: edge = self.edges if self.Ndim == 1 else self.edges[i] integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left') return numpy.ravel_multi_index(integer, self.shape, mode='clip')
[ "def", "linear", "(", "self", ",", "*", "*", "paircoords", ")", ":", "N", "=", "len", "(", "paircoords", "[", "list", "(", "paircoords", ".", "keys", "(", ")", ")", "[", "0", "]", "]", ")", "integer", "=", "numpy", ".", "empty", "(", "N", ",", "(", "'i8'", ",", "(", "self", ".", "Ndim", ",", ")", ")", ")", ".", "T", "# do each dimension", "for", "i", ",", "dim", "in", "enumerate", "(", "self", ".", "dims", ")", ":", "if", "self", ".", "spacing", "[", "i", "]", "==", "'linspace'", ":", "x", "=", "paircoords", "[", "dim", "]", "-", "self", ".", "min", "[", "i", "]", "integer", "[", "i", "]", "=", "numpy", ".", "ceil", "(", "x", "*", "self", ".", "inv", "[", "i", "]", ")", "elif", "self", ".", "spacing", "[", "i", "]", "==", "'logspace'", ":", "x", "=", "paircoords", "[", "dim", "]", ".", "copy", "(", ")", "x", "[", "x", "==", "0", "]", "=", "self", ".", "min", "[", "i", "]", "*", "0.9", "x", "=", "numpy", ".", "log10", "(", "x", "/", "self", ".", "min", "[", "i", "]", ")", "integer", "[", "i", "]", "=", "numpy", ".", "ceil", "(", "x", "*", "self", ".", "inv", "[", "i", "]", ")", "elif", "self", ".", "spacing", "[", "i", "]", "is", "None", ":", "edge", "=", "self", ".", "edges", "if", "self", ".", "Ndim", "==", "1", "else", "self", ".", "edges", "[", "i", "]", "integer", "[", "i", "]", "=", "numpy", ".", "searchsorted", "(", "edge", ",", "paircoords", "[", "dim", "]", ",", "side", "=", "'left'", ")", "return", "numpy", ".", "ravel_multi_index", "(", "integer", ",", "self", ".", "shape", ",", "mode", "=", "'clip'", ")" ]
Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index
[ "Linearize", "bin", "indices", "." ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L172-L208
rainwoodman/kdcount
kdcount/correlate.py
Binning.update_sums
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None): """ The main function that digitizes the pair counts, calls bincount for the appropriate `sum1` and `sum2` values, and adds them to the input arrays, will modify sum1, sum2, N, and centers_sum inplace. """ # the summation values for this (r,i,j) sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2) # digitize digr = self.digitize(r, i, j, data1, data2) if len(digr) == 3 and isinstance(digr[1], dict): dig, paircoords, weights = digr elif len(digr) == 2 and isinstance(digr[1], dict): dig, paircoords = digr weights = None else: dig = digr paircoords = None weights = None # sum 1 def add_one_channel(sum1c, sum1_ijc): if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1: sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size) else: for d in range(sum1c.shape[0]): sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size) if self.channels: if weights is None: raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels") sum1_ij = weights * sum1_ij # sum1_ij[ichannel, dig, dim] for ichannel in range(len(self.channels)): add_one_channel(sum1[ichannel], sum1_ij[ichannel]) else: # sum1_ij[dig, dim] add_one_channel(sum1, sum1_ij) # sum 2, if both data are not points if not numpy.isscalar(sum2): sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size) if N is not None: if not paircoords: raise RuntimeError("Bin center is requested but not returned by digitize") # update the mean coords self._update_mean_coords(dig, N, centers_sum, **paircoords)
python
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None): """ The main function that digitizes the pair counts, calls bincount for the appropriate `sum1` and `sum2` values, and adds them to the input arrays, will modify sum1, sum2, N, and centers_sum inplace. """ # the summation values for this (r,i,j) sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2) # digitize digr = self.digitize(r, i, j, data1, data2) if len(digr) == 3 and isinstance(digr[1], dict): dig, paircoords, weights = digr elif len(digr) == 2 and isinstance(digr[1], dict): dig, paircoords = digr weights = None else: dig = digr paircoords = None weights = None # sum 1 def add_one_channel(sum1c, sum1_ijc): if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1: sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size) else: for d in range(sum1c.shape[0]): sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size) if self.channels: if weights is None: raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels") sum1_ij = weights * sum1_ij # sum1_ij[ichannel, dig, dim] for ichannel in range(len(self.channels)): add_one_channel(sum1[ichannel], sum1_ij[ichannel]) else: # sum1_ij[dig, dim] add_one_channel(sum1, sum1_ij) # sum 2, if both data are not points if not numpy.isscalar(sum2): sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size) if N is not None: if not paircoords: raise RuntimeError("Bin center is requested but not returned by digitize") # update the mean coords self._update_mean_coords(dig, N, centers_sum, **paircoords)
[ "def", "update_sums", "(", "self", ",", "r", ",", "i", ",", "j", ",", "data1", ",", "data2", ",", "sum1", ",", "sum2", ",", "N", "=", "None", ",", "centers_sum", "=", "None", ")", ":", "# the summation values for this (r,i,j)", "sum1_ij", ",", "sum2_ij", "=", "compute_sum_values", "(", "i", ",", "j", ",", "data1", ",", "data2", ")", "# digitize", "digr", "=", "self", ".", "digitize", "(", "r", ",", "i", ",", "j", ",", "data1", ",", "data2", ")", "if", "len", "(", "digr", ")", "==", "3", "and", "isinstance", "(", "digr", "[", "1", "]", ",", "dict", ")", ":", "dig", ",", "paircoords", ",", "weights", "=", "digr", "elif", "len", "(", "digr", ")", "==", "2", "and", "isinstance", "(", "digr", "[", "1", "]", ",", "dict", ")", ":", "dig", ",", "paircoords", "=", "digr", "weights", "=", "None", "else", ":", "dig", "=", "digr", "paircoords", "=", "None", "weights", "=", "None", "# sum 1", "def", "add_one_channel", "(", "sum1c", ",", "sum1_ijc", ")", ":", "if", "numpy", ".", "isscalar", "(", "sum1_ijc", ")", "or", "sum1_ijc", ".", "ndim", "==", "1", ":", "sum1c", ".", "flat", "[", ":", "]", "+=", "utils", ".", "bincount", "(", "dig", ",", "sum1_ijc", ",", "minlength", "=", "sum1c", ".", "size", ")", "else", ":", "for", "d", "in", "range", "(", "sum1c", ".", "shape", "[", "0", "]", ")", ":", "sum1c", "[", "d", "]", ".", "flat", "[", ":", "]", "+=", "utils", ".", "bincount", "(", "dig", ",", "sum1_ijc", "[", "...", ",", "d", "]", ",", "minlength", "=", "sum1c", "[", "d", "]", ".", "size", ")", "if", "self", ".", "channels", ":", "if", "weights", "is", "None", ":", "raise", "RuntimeError", "(", "\"`digitize` of multi channel paircount did not return a weight array for the channels\"", ")", "sum1_ij", "=", "weights", "*", "sum1_ij", "# sum1_ij[ichannel, dig, dim]", "for", "ichannel", "in", "range", "(", "len", "(", "self", ".", "channels", ")", ")", ":", "add_one_channel", "(", "sum1", "[", "ichannel", "]", ",", "sum1_ij", "[", "ichannel", "]", ")", "else", ":", "# sum1_ij[dig, dim]", "add_one_channel", "(", "sum1", ",", "sum1_ij", ")", "# sum 2, if both data are not points", "if", "not", "numpy", ".", "isscalar", "(", "sum2", ")", ":", "sum2", ".", "flat", "[", ":", "]", "+=", "utils", ".", "bincount", "(", "dig", ",", "sum2_ij", ",", "minlength", "=", "sum2", ".", "size", ")", "if", "N", "is", "not", "None", ":", "if", "not", "paircoords", ":", "raise", "RuntimeError", "(", "\"Bin center is requested but not returned by digitize\"", ")", "# update the mean coords", "self", ".", "_update_mean_coords", "(", "dig", ",", "N", ",", "centers_sum", ",", "*", "*", "paircoords", ")" ]
The main function that digitizes the pair counts, calls bincount for the appropriate `sum1` and `sum2` values, and adds them to the input arrays, will modify sum1, sum2, N, and centers_sum inplace.
[ "The", "main", "function", "that", "digitizes", "the", "pair", "counts", "calls", "bincount", "for", "the", "appropriate", "sum1", "and", "sum2", "values", "and", "adds", "them", "to", "the", "input", "arrays" ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L238-L291
rainwoodman/kdcount
kdcount/correlate.py
Binning.sum_shapes
def sum_shapes(self, data1, data2): """ Return the shapes of the summation arrays, given the input data and shape of the bins """ # the linear shape (put extra dimensions first) linearshape = [-1] + list(self.shape) # determine the full shape subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)] subshape = [] if len(subshapes) == 2: assert subshapes[0] == subshapes[1] subshape = subshapes[0] elif len(subshapes) == 1: subshape = subshapes[0] fullshape = subshape + list(self.shape) # prepend the shape for different channels if self.channels: fullshape = [len(self.channels)] + fullshape return linearshape, fullshape
python
def sum_shapes(self, data1, data2): """ Return the shapes of the summation arrays, given the input data and shape of the bins """ # the linear shape (put extra dimensions first) linearshape = [-1] + list(self.shape) # determine the full shape subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)] subshape = [] if len(subshapes) == 2: assert subshapes[0] == subshapes[1] subshape = subshapes[0] elif len(subshapes) == 1: subshape = subshapes[0] fullshape = subshape + list(self.shape) # prepend the shape for different channels if self.channels: fullshape = [len(self.channels)] + fullshape return linearshape, fullshape
[ "def", "sum_shapes", "(", "self", ",", "data1", ",", "data2", ")", ":", "# the linear shape (put extra dimensions first)", "linearshape", "=", "[", "-", "1", "]", "+", "list", "(", "self", ".", "shape", ")", "# determine the full shape", "subshapes", "=", "[", "list", "(", "d", ".", "subshape", ")", "for", "d", "in", "[", "data1", ",", "data2", "]", "if", "isinstance", "(", "d", ",", "field", ")", "]", "subshape", "=", "[", "]", "if", "len", "(", "subshapes", ")", "==", "2", ":", "assert", "subshapes", "[", "0", "]", "==", "subshapes", "[", "1", "]", "subshape", "=", "subshapes", "[", "0", "]", "elif", "len", "(", "subshapes", ")", "==", "1", ":", "subshape", "=", "subshapes", "[", "0", "]", "fullshape", "=", "subshape", "+", "list", "(", "self", ".", "shape", ")", "# prepend the shape for different channels", "if", "self", ".", "channels", ":", "fullshape", "=", "[", "len", "(", "self", ".", "channels", ")", "]", "+", "fullshape", "return", "linearshape", ",", "fullshape" ]
Return the shapes of the summation arrays, given the input data and shape of the bins
[ "Return", "the", "shapes", "of", "the", "summation", "arrays", "given", "the", "input", "data", "and", "shape", "of", "the", "bins" ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L293-L315
rainwoodman/kdcount
kdcount/correlate.py
Binning._update_mean_coords
def _update_mean_coords(self, dig, N, centers_sum, **paircoords): """ Update the mean coordinate sums """ if N is None or centers_sum is None: return N.flat[:] += utils.bincount(dig, 1., minlength=N.size) for i, dim in enumerate(self.dims): size = centers_sum[i].size centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
python
def _update_mean_coords(self, dig, N, centers_sum, **paircoords): """ Update the mean coordinate sums """ if N is None or centers_sum is None: return N.flat[:] += utils.bincount(dig, 1., minlength=N.size) for i, dim in enumerate(self.dims): size = centers_sum[i].size centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
[ "def", "_update_mean_coords", "(", "self", ",", "dig", ",", "N", ",", "centers_sum", ",", "*", "*", "paircoords", ")", ":", "if", "N", "is", "None", "or", "centers_sum", "is", "None", ":", "return", "N", ".", "flat", "[", ":", "]", "+=", "utils", ".", "bincount", "(", "dig", ",", "1.", ",", "minlength", "=", "N", ".", "size", ")", "for", "i", ",", "dim", "in", "enumerate", "(", "self", ".", "dims", ")", ":", "size", "=", "centers_sum", "[", "i", "]", ".", "size", "centers_sum", "[", "i", "]", ".", "flat", "[", ":", "]", "+=", "utils", ".", "bincount", "(", "dig", ",", "paircoords", "[", "dim", "]", ",", "minlength", "=", "size", ")" ]
Update the mean coordinate sums
[ "Update", "the", "mean", "coordinate", "sums" ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L317-L326
rainwoodman/kdcount
kdcount/correlate.py
paircount_queue.work
def work(self, i): """ Internal function that performs the pair-counting """ n1, n2 = self.p[i] # initialize the total arrays for this process sum1 = numpy.zeros_like(self.sum1g) sum2 = 1. if not self.pts_only: sum2 = numpy.zeros_like(self.sum2g) if self.compute_mean_coords: N = numpy.zeros_like(self.N) centers_sum = [numpy.zeros_like(c) for c in self.centers] else: N = None; centers_sum = None if self.bins.enable_fast_node_count: # field x points is not supported. # because it is more likely need to deal # with broadcasting sum1attrs = [ d.attr for d in self.data ] counts, sum1c = n1.count(n2, self.bins.edges, attrs=sum1attrs) sum1[..., :-1] = sum1c sum1[..., -1] = 0 else: def callback(r, i, j): # just call the binning function, passing the # sum arrays to fill in self.bins.update_sums(r, i, j, self.data[0], self.data[1], sum1, sum2, N=N, centers_sum=centers_sum) n1.enum(n2, self.bins.Rmax, process=callback) if not self.compute_mean_coords: return sum1, sum2 else: return sum1, sum2, N, centers_sum
python
def work(self, i): """ Internal function that performs the pair-counting """ n1, n2 = self.p[i] # initialize the total arrays for this process sum1 = numpy.zeros_like(self.sum1g) sum2 = 1. if not self.pts_only: sum2 = numpy.zeros_like(self.sum2g) if self.compute_mean_coords: N = numpy.zeros_like(self.N) centers_sum = [numpy.zeros_like(c) for c in self.centers] else: N = None; centers_sum = None if self.bins.enable_fast_node_count: # field x points is not supported. # because it is more likely need to deal # with broadcasting sum1attrs = [ d.attr for d in self.data ] counts, sum1c = n1.count(n2, self.bins.edges, attrs=sum1attrs) sum1[..., :-1] = sum1c sum1[..., -1] = 0 else: def callback(r, i, j): # just call the binning function, passing the # sum arrays to fill in self.bins.update_sums(r, i, j, self.data[0], self.data[1], sum1, sum2, N=N, centers_sum=centers_sum) n1.enum(n2, self.bins.Rmax, process=callback) if not self.compute_mean_coords: return sum1, sum2 else: return sum1, sum2, N, centers_sum
[ "def", "work", "(", "self", ",", "i", ")", ":", "n1", ",", "n2", "=", "self", ".", "p", "[", "i", "]", "# initialize the total arrays for this process", "sum1", "=", "numpy", ".", "zeros_like", "(", "self", ".", "sum1g", ")", "sum2", "=", "1.", "if", "not", "self", ".", "pts_only", ":", "sum2", "=", "numpy", ".", "zeros_like", "(", "self", ".", "sum2g", ")", "if", "self", ".", "compute_mean_coords", ":", "N", "=", "numpy", ".", "zeros_like", "(", "self", ".", "N", ")", "centers_sum", "=", "[", "numpy", ".", "zeros_like", "(", "c", ")", "for", "c", "in", "self", ".", "centers", "]", "else", ":", "N", "=", "None", "centers_sum", "=", "None", "if", "self", ".", "bins", ".", "enable_fast_node_count", ":", "# field x points is not supported.", "# because it is more likely need to deal", "# with broadcasting", "sum1attrs", "=", "[", "d", ".", "attr", "for", "d", "in", "self", ".", "data", "]", "counts", ",", "sum1c", "=", "n1", ".", "count", "(", "n2", ",", "self", ".", "bins", ".", "edges", ",", "attrs", "=", "sum1attrs", ")", "sum1", "[", "...", ",", ":", "-", "1", "]", "=", "sum1c", "sum1", "[", "...", ",", "-", "1", "]", "=", "0", "else", ":", "def", "callback", "(", "r", ",", "i", ",", "j", ")", ":", "# just call the binning function, passing the", "# sum arrays to fill in", "self", ".", "bins", ".", "update_sums", "(", "r", ",", "i", ",", "j", ",", "self", ".", "data", "[", "0", "]", ",", "self", ".", "data", "[", "1", "]", ",", "sum1", ",", "sum2", ",", "N", "=", "N", ",", "centers_sum", "=", "centers_sum", ")", "n1", ".", "enum", "(", "n2", ",", "self", ".", "bins", ".", "Rmax", ",", "process", "=", "callback", ")", "if", "not", "self", ".", "compute_mean_coords", ":", "return", "sum1", ",", "sum2", "else", ":", "return", "sum1", ",", "sum2", ",", "N", ",", "centers_sum" ]
Internal function that performs the pair-counting
[ "Internal", "function", "that", "performs", "the", "pair", "-", "counting" ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L739-L778
rainwoodman/kdcount
kdcount/correlate.py
paircount_queue.reduce
def reduce(self, sum1, sum2, *args): """ The internal reduce function that sums the results from various processors """ self.sum1g[...] += sum1 if not self.pts_only: self.sum2g[...] += sum2 if self.compute_mean_coords: N, centers_sum = args self.N[...] += N for i in range(self.bins.Ndim): self.centers[i][...] += centers_sum[i]
python
def reduce(self, sum1, sum2, *args): """ The internal reduce function that sums the results from various processors """ self.sum1g[...] += sum1 if not self.pts_only: self.sum2g[...] += sum2 if self.compute_mean_coords: N, centers_sum = args self.N[...] += N for i in range(self.bins.Ndim): self.centers[i][...] += centers_sum[i]
[ "def", "reduce", "(", "self", ",", "sum1", ",", "sum2", ",", "*", "args", ")", ":", "self", ".", "sum1g", "[", "...", "]", "+=", "sum1", "if", "not", "self", ".", "pts_only", ":", "self", ".", "sum2g", "[", "...", "]", "+=", "sum2", "if", "self", ".", "compute_mean_coords", ":", "N", ",", "centers_sum", "=", "args", "self", ".", "N", "[", "...", "]", "+=", "N", "for", "i", "in", "range", "(", "self", ".", "bins", ".", "Ndim", ")", ":", "self", ".", "centers", "[", "i", "]", "[", "...", "]", "+=", "centers_sum", "[", "i", "]" ]
The internal reduce function that sums the results from various processors
[ "The", "internal", "reduce", "function", "that", "sums", "the", "results", "from", "various", "processors" ]
train
https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L780-L792
hammerlab/stanity
stanity/psis.py
psisloo
def psisloo(log_lik, **kwargs): r"""PSIS leave-one-out log predictive densities. Computes the log predictive densities given posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)` in input parameter `log_lik`. Returns a sum of the leave-one-out log predictive densities `loo`, individual leave-one-out log predictive density terms `loos` and an estimate of Pareto tail indeces `ks`. The estimates are unreliable if tail index ``k > 0.7`` (see more in the references listed in the module docstring). Additional keyword arguments are passed to the :meth:`psislw()` function (see the corresponding documentation). Parameters ---------- log_lik : ndarray Array of size n x m containing n posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)`. Returns ------- loo : scalar sum of the leave-one-out log predictive densities loos : ndarray individual leave-one-out log predictive density terms ks : ndarray estimated Pareto tail indeces """ # ensure overwrite flag in passed arguments kwargs['overwrite_lw'] = True # log raw weights from log_lik lw = -log_lik # compute Pareto smoothed log weights given raw log weights lw, ks = psislw(lw, **kwargs) # compute lw += log_lik loos = sumlogs(lw, axis=0) loo = loos.sum() return loo, loos, ks
python
def psisloo(log_lik, **kwargs): r"""PSIS leave-one-out log predictive densities. Computes the log predictive densities given posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)` in input parameter `log_lik`. Returns a sum of the leave-one-out log predictive densities `loo`, individual leave-one-out log predictive density terms `loos` and an estimate of Pareto tail indeces `ks`. The estimates are unreliable if tail index ``k > 0.7`` (see more in the references listed in the module docstring). Additional keyword arguments are passed to the :meth:`psislw()` function (see the corresponding documentation). Parameters ---------- log_lik : ndarray Array of size n x m containing n posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)`. Returns ------- loo : scalar sum of the leave-one-out log predictive densities loos : ndarray individual leave-one-out log predictive density terms ks : ndarray estimated Pareto tail indeces """ # ensure overwrite flag in passed arguments kwargs['overwrite_lw'] = True # log raw weights from log_lik lw = -log_lik # compute Pareto smoothed log weights given raw log weights lw, ks = psislw(lw, **kwargs) # compute lw += log_lik loos = sumlogs(lw, axis=0) loo = loos.sum() return loo, loos, ks
[ "def", "psisloo", "(", "log_lik", ",", "*", "*", "kwargs", ")", ":", "# ensure overwrite flag in passed arguments", "kwargs", "[", "'overwrite_lw'", "]", "=", "True", "# log raw weights from log_lik", "lw", "=", "-", "log_lik", "# compute Pareto smoothed log weights given raw log weights", "lw", ",", "ks", "=", "psislw", "(", "lw", ",", "*", "*", "kwargs", ")", "# compute", "lw", "+=", "log_lik", "loos", "=", "sumlogs", "(", "lw", ",", "axis", "=", "0", ")", "loo", "=", "loos", ".", "sum", "(", ")", "return", "loo", ",", "loos", ",", "ks" ]
r"""PSIS leave-one-out log predictive densities. Computes the log predictive densities given posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)` in input parameter `log_lik`. Returns a sum of the leave-one-out log predictive densities `loo`, individual leave-one-out log predictive density terms `loos` and an estimate of Pareto tail indeces `ks`. The estimates are unreliable if tail index ``k > 0.7`` (see more in the references listed in the module docstring). Additional keyword arguments are passed to the :meth:`psislw()` function (see the corresponding documentation). Parameters ---------- log_lik : ndarray Array of size n x m containing n posterior samples of the log likelihood terms :math:`p(y_i|\theta^s)`. Returns ------- loo : scalar sum of the leave-one-out log predictive densities loos : ndarray individual leave-one-out log predictive density terms ks : ndarray estimated Pareto tail indeces
[ "r", "PSIS", "leave", "-", "one", "-", "out", "log", "predictive", "densities", "." ]
train
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L69-L110
hammerlab/stanity
stanity/psis.py
psislw
def psislw(lw, Reff=1.0, overwrite_lw=False): """Pareto smoothed importance sampling (PSIS). Parameters ---------- lw : ndarray Array of size n x m containing m sets of n log weights. It is also possible to provide one dimensional array of length n. Reff : scalar, optional relative MCMC efficiency ``N_eff / N`` overwrite_lw : bool, optional If True, the input array `lw` is smoothed in-place, assuming the array is F-contiguous. By default, a new array is allocated. Returns ------- lw_out : ndarray smoothed log weights kss : ndarray Pareto tail indices """ if lw.ndim == 2: n, m = lw.shape elif lw.ndim == 1: n = len(lw) m = 1 else: raise ValueError("Argument `lw` must be 1 or 2 dimensional.") if n <= 1: raise ValueError("More than one log-weight needed.") if overwrite_lw and lw.flags.f_contiguous: # in-place operation lw_out = lw else: # allocate new array for output lw_out = np.copy(lw, order='F') # allocate output array for kss kss = np.empty(m) # precalculate constants cutoff_ind = - int(np.ceil(min(0.2 * n, 3 * np.sqrt(n / Reff)))) - 1 cutoffmin = np.log(np.finfo(float).tiny) logn = np.log(n) k_min = 1/3 # loop over sets of log weights for i, x in enumerate(lw_out.T if lw_out.ndim == 2 else lw_out[None, :]): # improve numerical accuracy x -= np.max(x) # sort the array x_sort_ind = np.argsort(x) # divide log weights into body and right tail xcutoff = max( x[x_sort_ind[cutoff_ind]], cutoffmin ) expxcutoff = np.exp(xcutoff) tailinds, = np.where(x > xcutoff) x2 = x[tailinds] n2 = len(x2) if n2 <= 4: # not enough tail samples for gpdfitnew k = np.inf else: # order of tail samples x2si = np.argsort(x2) # fit generalized Pareto distribution to the right tail samples np.exp(x2, out=x2) x2 -= expxcutoff k, sigma = gpdfitnew(x2, sort=x2si) if k >= k_min and not np.isinf(k): # no smoothing if short tail or GPD fit failed # compute ordered statistic for the fit sti = np.arange(0.5, n2) sti /= n2 qq = gpinv(sti, k, sigma) qq += expxcutoff np.log(qq, out=qq) # place the smoothed tail into the output array x[tailinds[x2si]] = qq # truncate smoothed values to the largest raw weight 0 x[x > 0] = 0 # renormalize weights x -= sumlogs(x) # store tail index k kss[i] = k # If the provided input array is one dimensional, return kss as scalar. if lw_out.ndim == 1: kss = kss[0] return lw_out, kss
python
def psislw(lw, Reff=1.0, overwrite_lw=False): """Pareto smoothed importance sampling (PSIS). Parameters ---------- lw : ndarray Array of size n x m containing m sets of n log weights. It is also possible to provide one dimensional array of length n. Reff : scalar, optional relative MCMC efficiency ``N_eff / N`` overwrite_lw : bool, optional If True, the input array `lw` is smoothed in-place, assuming the array is F-contiguous. By default, a new array is allocated. Returns ------- lw_out : ndarray smoothed log weights kss : ndarray Pareto tail indices """ if lw.ndim == 2: n, m = lw.shape elif lw.ndim == 1: n = len(lw) m = 1 else: raise ValueError("Argument `lw` must be 1 or 2 dimensional.") if n <= 1: raise ValueError("More than one log-weight needed.") if overwrite_lw and lw.flags.f_contiguous: # in-place operation lw_out = lw else: # allocate new array for output lw_out = np.copy(lw, order='F') # allocate output array for kss kss = np.empty(m) # precalculate constants cutoff_ind = - int(np.ceil(min(0.2 * n, 3 * np.sqrt(n / Reff)))) - 1 cutoffmin = np.log(np.finfo(float).tiny) logn = np.log(n) k_min = 1/3 # loop over sets of log weights for i, x in enumerate(lw_out.T if lw_out.ndim == 2 else lw_out[None, :]): # improve numerical accuracy x -= np.max(x) # sort the array x_sort_ind = np.argsort(x) # divide log weights into body and right tail xcutoff = max( x[x_sort_ind[cutoff_ind]], cutoffmin ) expxcutoff = np.exp(xcutoff) tailinds, = np.where(x > xcutoff) x2 = x[tailinds] n2 = len(x2) if n2 <= 4: # not enough tail samples for gpdfitnew k = np.inf else: # order of tail samples x2si = np.argsort(x2) # fit generalized Pareto distribution to the right tail samples np.exp(x2, out=x2) x2 -= expxcutoff k, sigma = gpdfitnew(x2, sort=x2si) if k >= k_min and not np.isinf(k): # no smoothing if short tail or GPD fit failed # compute ordered statistic for the fit sti = np.arange(0.5, n2) sti /= n2 qq = gpinv(sti, k, sigma) qq += expxcutoff np.log(qq, out=qq) # place the smoothed tail into the output array x[tailinds[x2si]] = qq # truncate smoothed values to the largest raw weight 0 x[x > 0] = 0 # renormalize weights x -= sumlogs(x) # store tail index k kss[i] = k # If the provided input array is one dimensional, return kss as scalar. if lw_out.ndim == 1: kss = kss[0] return lw_out, kss
[ "def", "psislw", "(", "lw", ",", "Reff", "=", "1.0", ",", "overwrite_lw", "=", "False", ")", ":", "if", "lw", ".", "ndim", "==", "2", ":", "n", ",", "m", "=", "lw", ".", "shape", "elif", "lw", ".", "ndim", "==", "1", ":", "n", "=", "len", "(", "lw", ")", "m", "=", "1", "else", ":", "raise", "ValueError", "(", "\"Argument `lw` must be 1 or 2 dimensional.\"", ")", "if", "n", "<=", "1", ":", "raise", "ValueError", "(", "\"More than one log-weight needed.\"", ")", "if", "overwrite_lw", "and", "lw", ".", "flags", ".", "f_contiguous", ":", "# in-place operation", "lw_out", "=", "lw", "else", ":", "# allocate new array for output", "lw_out", "=", "np", ".", "copy", "(", "lw", ",", "order", "=", "'F'", ")", "# allocate output array for kss", "kss", "=", "np", ".", "empty", "(", "m", ")", "# precalculate constants", "cutoff_ind", "=", "-", "int", "(", "np", ".", "ceil", "(", "min", "(", "0.2", "*", "n", ",", "3", "*", "np", ".", "sqrt", "(", "n", "/", "Reff", ")", ")", ")", ")", "-", "1", "cutoffmin", "=", "np", ".", "log", "(", "np", ".", "finfo", "(", "float", ")", ".", "tiny", ")", "logn", "=", "np", ".", "log", "(", "n", ")", "k_min", "=", "1", "/", "3", "# loop over sets of log weights", "for", "i", ",", "x", "in", "enumerate", "(", "lw_out", ".", "T", "if", "lw_out", ".", "ndim", "==", "2", "else", "lw_out", "[", "None", ",", ":", "]", ")", ":", "# improve numerical accuracy", "x", "-=", "np", ".", "max", "(", "x", ")", "# sort the array", "x_sort_ind", "=", "np", ".", "argsort", "(", "x", ")", "# divide log weights into body and right tail", "xcutoff", "=", "max", "(", "x", "[", "x_sort_ind", "[", "cutoff_ind", "]", "]", ",", "cutoffmin", ")", "expxcutoff", "=", "np", ".", "exp", "(", "xcutoff", ")", "tailinds", ",", "=", "np", ".", "where", "(", "x", ">", "xcutoff", ")", "x2", "=", "x", "[", "tailinds", "]", "n2", "=", "len", "(", "x2", ")", "if", "n2", "<=", "4", ":", "# not enough tail samples for gpdfitnew", "k", "=", "np", ".", "inf", "else", ":", "# order of tail samples", "x2si", "=", "np", ".", "argsort", "(", "x2", ")", "# fit generalized Pareto distribution to the right tail samples", "np", ".", "exp", "(", "x2", ",", "out", "=", "x2", ")", "x2", "-=", "expxcutoff", "k", ",", "sigma", "=", "gpdfitnew", "(", "x2", ",", "sort", "=", "x2si", ")", "if", "k", ">=", "k_min", "and", "not", "np", ".", "isinf", "(", "k", ")", ":", "# no smoothing if short tail or GPD fit failed", "# compute ordered statistic for the fit", "sti", "=", "np", ".", "arange", "(", "0.5", ",", "n2", ")", "sti", "/=", "n2", "qq", "=", "gpinv", "(", "sti", ",", "k", ",", "sigma", ")", "qq", "+=", "expxcutoff", "np", ".", "log", "(", "qq", ",", "out", "=", "qq", ")", "# place the smoothed tail into the output array", "x", "[", "tailinds", "[", "x2si", "]", "]", "=", "qq", "# truncate smoothed values to the largest raw weight 0", "x", "[", "x", ">", "0", "]", "=", "0", "# renormalize weights", "x", "-=", "sumlogs", "(", "x", ")", "# store tail index k", "kss", "[", "i", "]", "=", "k", "# If the provided input array is one dimensional, return kss as scalar.", "if", "lw_out", ".", "ndim", "==", "1", ":", "kss", "=", "kss", "[", "0", "]", "return", "lw_out", ",", "kss" ]
Pareto smoothed importance sampling (PSIS). Parameters ---------- lw : ndarray Array of size n x m containing m sets of n log weights. It is also possible to provide one dimensional array of length n. Reff : scalar, optional relative MCMC efficiency ``N_eff / N`` overwrite_lw : bool, optional If True, the input array `lw` is smoothed in-place, assuming the array is F-contiguous. By default, a new array is allocated. Returns ------- lw_out : ndarray smoothed log weights kss : ndarray Pareto tail indices
[ "Pareto", "smoothed", "importance", "sampling", "(", "PSIS", ")", "." ]
train
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L113-L209
hammerlab/stanity
stanity/psis.py
gpdfitnew
def gpdfitnew(x, sort=True, sort_in_place=False, return_quadrature=False): """Estimate the paramaters for the Generalized Pareto Distribution (GPD) Returns empirical Bayes estimate for the parameters of the two-parameter generalized Parato distribution given the data. Parameters ---------- x : ndarray One dimensional data array sort : bool or ndarray, optional If known in advance, one can provide an array of indices that would sort the input array `x`. If the input array is already sorted, provide False. If True (default behaviour), the array is sorted internally. sort_in_place : bool, optional If `sort` is True and `sort_in_place` is True, the array is sorted in-place (False by default). return_quadrature : bool, optional If True, quadrature points and weight `ks` and `w` of the marginal posterior distribution of k are also calculated and returned. False by default. Returns ------- k, sigma : float estimated parameter values ks, w : ndarray Quadrature points and weights of the marginal posterior distribution of `k`. Returned only if `return_quadrature` is True. Notes ----- This function returns a negative of Zhang and Stephens's k, because it is more common parameterisation. """ if x.ndim != 1 or len(x) <= 1: raise ValueError("Invalid input array.") # check if x should be sorted if sort is True: if sort_in_place: x.sort() xsorted = True else: sort = np.argsort(x) xsorted = False elif sort is False: xsorted = True else: xsorted = False n = len(x) PRIOR = 3 m = 30 + int(np.sqrt(n)) bs = np.arange(1, m + 1, dtype=float) bs -= 0.5 np.divide(m, bs, out=bs) np.sqrt(bs, out=bs) np.subtract(1, bs, out=bs) if xsorted: bs /= PRIOR * x[int(n/4 + 0.5) - 1] bs += 1 / x[-1] else: bs /= PRIOR * x[sort[int(n/4 + 0.5) - 1]] bs += 1 / x[sort[-1]] ks = np.negative(bs) temp = ks[:,None] * x np.log1p(temp, out=temp) np.mean(temp, axis=1, out=ks) L = bs / ks np.negative(L, out=L) np.log(L, out=L) L -= ks L -= 1 L *= n temp = L - L[:,None] np.exp(temp, out=temp) w = np.sum(temp, axis=1) np.divide(1, w, out=w) # remove negligible weights dii = w >= 10 * np.finfo(float).eps if not np.all(dii): w = w[dii] bs = bs[dii] # normalise w w /= w.sum() # posterior mean for b b = np.sum(bs * w) # Estimate for k, note that we return a negative of Zhang and # Stephens's k, because it is more common parameterisation. temp = (-b) * x # pylint: disable=invalid-unary-operand-type np.log1p(temp, out=temp) k = np.mean(temp) if return_quadrature: np.negative(x, out=temp) temp = bs[:, None] * temp np.log1p(temp, out=temp) ks = np.mean(temp, axis=1) # estimate for sigma sigma = -k / b * n / (n - 0) # weakly informative prior for k a = 10 k = k * n / (n+a) + a * 0.5 / (n+a) if return_quadrature: ks *= n / (n+a) ks += a * 0.5 / (n+a) if return_quadrature: return k, sigma, ks, w else: return k, sigma
python
def gpdfitnew(x, sort=True, sort_in_place=False, return_quadrature=False): """Estimate the paramaters for the Generalized Pareto Distribution (GPD) Returns empirical Bayes estimate for the parameters of the two-parameter generalized Parato distribution given the data. Parameters ---------- x : ndarray One dimensional data array sort : bool or ndarray, optional If known in advance, one can provide an array of indices that would sort the input array `x`. If the input array is already sorted, provide False. If True (default behaviour), the array is sorted internally. sort_in_place : bool, optional If `sort` is True and `sort_in_place` is True, the array is sorted in-place (False by default). return_quadrature : bool, optional If True, quadrature points and weight `ks` and `w` of the marginal posterior distribution of k are also calculated and returned. False by default. Returns ------- k, sigma : float estimated parameter values ks, w : ndarray Quadrature points and weights of the marginal posterior distribution of `k`. Returned only if `return_quadrature` is True. Notes ----- This function returns a negative of Zhang and Stephens's k, because it is more common parameterisation. """ if x.ndim != 1 or len(x) <= 1: raise ValueError("Invalid input array.") # check if x should be sorted if sort is True: if sort_in_place: x.sort() xsorted = True else: sort = np.argsort(x) xsorted = False elif sort is False: xsorted = True else: xsorted = False n = len(x) PRIOR = 3 m = 30 + int(np.sqrt(n)) bs = np.arange(1, m + 1, dtype=float) bs -= 0.5 np.divide(m, bs, out=bs) np.sqrt(bs, out=bs) np.subtract(1, bs, out=bs) if xsorted: bs /= PRIOR * x[int(n/4 + 0.5) - 1] bs += 1 / x[-1] else: bs /= PRIOR * x[sort[int(n/4 + 0.5) - 1]] bs += 1 / x[sort[-1]] ks = np.negative(bs) temp = ks[:,None] * x np.log1p(temp, out=temp) np.mean(temp, axis=1, out=ks) L = bs / ks np.negative(L, out=L) np.log(L, out=L) L -= ks L -= 1 L *= n temp = L - L[:,None] np.exp(temp, out=temp) w = np.sum(temp, axis=1) np.divide(1, w, out=w) # remove negligible weights dii = w >= 10 * np.finfo(float).eps if not np.all(dii): w = w[dii] bs = bs[dii] # normalise w w /= w.sum() # posterior mean for b b = np.sum(bs * w) # Estimate for k, note that we return a negative of Zhang and # Stephens's k, because it is more common parameterisation. temp = (-b) * x # pylint: disable=invalid-unary-operand-type np.log1p(temp, out=temp) k = np.mean(temp) if return_quadrature: np.negative(x, out=temp) temp = bs[:, None] * temp np.log1p(temp, out=temp) ks = np.mean(temp, axis=1) # estimate for sigma sigma = -k / b * n / (n - 0) # weakly informative prior for k a = 10 k = k * n / (n+a) + a * 0.5 / (n+a) if return_quadrature: ks *= n / (n+a) ks += a * 0.5 / (n+a) if return_quadrature: return k, sigma, ks, w else: return k, sigma
[ "def", "gpdfitnew", "(", "x", ",", "sort", "=", "True", ",", "sort_in_place", "=", "False", ",", "return_quadrature", "=", "False", ")", ":", "if", "x", ".", "ndim", "!=", "1", "or", "len", "(", "x", ")", "<=", "1", ":", "raise", "ValueError", "(", "\"Invalid input array.\"", ")", "# check if x should be sorted", "if", "sort", "is", "True", ":", "if", "sort_in_place", ":", "x", ".", "sort", "(", ")", "xsorted", "=", "True", "else", ":", "sort", "=", "np", ".", "argsort", "(", "x", ")", "xsorted", "=", "False", "elif", "sort", "is", "False", ":", "xsorted", "=", "True", "else", ":", "xsorted", "=", "False", "n", "=", "len", "(", "x", ")", "PRIOR", "=", "3", "m", "=", "30", "+", "int", "(", "np", ".", "sqrt", "(", "n", ")", ")", "bs", "=", "np", ".", "arange", "(", "1", ",", "m", "+", "1", ",", "dtype", "=", "float", ")", "bs", "-=", "0.5", "np", ".", "divide", "(", "m", ",", "bs", ",", "out", "=", "bs", ")", "np", ".", "sqrt", "(", "bs", ",", "out", "=", "bs", ")", "np", ".", "subtract", "(", "1", ",", "bs", ",", "out", "=", "bs", ")", "if", "xsorted", ":", "bs", "/=", "PRIOR", "*", "x", "[", "int", "(", "n", "/", "4", "+", "0.5", ")", "-", "1", "]", "bs", "+=", "1", "/", "x", "[", "-", "1", "]", "else", ":", "bs", "/=", "PRIOR", "*", "x", "[", "sort", "[", "int", "(", "n", "/", "4", "+", "0.5", ")", "-", "1", "]", "]", "bs", "+=", "1", "/", "x", "[", "sort", "[", "-", "1", "]", "]", "ks", "=", "np", ".", "negative", "(", "bs", ")", "temp", "=", "ks", "[", ":", ",", "None", "]", "*", "x", "np", ".", "log1p", "(", "temp", ",", "out", "=", "temp", ")", "np", ".", "mean", "(", "temp", ",", "axis", "=", "1", ",", "out", "=", "ks", ")", "L", "=", "bs", "/", "ks", "np", ".", "negative", "(", "L", ",", "out", "=", "L", ")", "np", ".", "log", "(", "L", ",", "out", "=", "L", ")", "L", "-=", "ks", "L", "-=", "1", "L", "*=", "n", "temp", "=", "L", "-", "L", "[", ":", ",", "None", "]", "np", ".", "exp", "(", "temp", ",", "out", "=", "temp", ")", "w", "=", "np", ".", "sum", "(", "temp", ",", "axis", "=", "1", ")", "np", ".", "divide", "(", "1", ",", "w", ",", "out", "=", "w", ")", "# remove negligible weights", "dii", "=", "w", ">=", "10", "*", "np", ".", "finfo", "(", "float", ")", ".", "eps", "if", "not", "np", ".", "all", "(", "dii", ")", ":", "w", "=", "w", "[", "dii", "]", "bs", "=", "bs", "[", "dii", "]", "# normalise w", "w", "/=", "w", ".", "sum", "(", ")", "# posterior mean for b", "b", "=", "np", ".", "sum", "(", "bs", "*", "w", ")", "# Estimate for k, note that we return a negative of Zhang and", "# Stephens's k, because it is more common parameterisation.", "temp", "=", "(", "-", "b", ")", "*", "x", "# pylint: disable=invalid-unary-operand-type", "np", ".", "log1p", "(", "temp", ",", "out", "=", "temp", ")", "k", "=", "np", ".", "mean", "(", "temp", ")", "if", "return_quadrature", ":", "np", ".", "negative", "(", "x", ",", "out", "=", "temp", ")", "temp", "=", "bs", "[", ":", ",", "None", "]", "*", "temp", "np", ".", "log1p", "(", "temp", ",", "out", "=", "temp", ")", "ks", "=", "np", ".", "mean", "(", "temp", ",", "axis", "=", "1", ")", "# estimate for sigma", "sigma", "=", "-", "k", "/", "b", "*", "n", "/", "(", "n", "-", "0", ")", "# weakly informative prior for k", "a", "=", "10", "k", "=", "k", "*", "n", "/", "(", "n", "+", "a", ")", "+", "a", "*", "0.5", "/", "(", "n", "+", "a", ")", "if", "return_quadrature", ":", "ks", "*=", "n", "/", "(", "n", "+", "a", ")", "ks", "+=", "a", "*", "0.5", "/", "(", "n", "+", "a", ")", "if", "return_quadrature", ":", "return", "k", ",", "sigma", ",", "ks", ",", "w", "else", ":", "return", "k", ",", "sigma" ]
Estimate the paramaters for the Generalized Pareto Distribution (GPD) Returns empirical Bayes estimate for the parameters of the two-parameter generalized Parato distribution given the data. Parameters ---------- x : ndarray One dimensional data array sort : bool or ndarray, optional If known in advance, one can provide an array of indices that would sort the input array `x`. If the input array is already sorted, provide False. If True (default behaviour), the array is sorted internally. sort_in_place : bool, optional If `sort` is True and `sort_in_place` is True, the array is sorted in-place (False by default). return_quadrature : bool, optional If True, quadrature points and weight `ks` and `w` of the marginal posterior distribution of k are also calculated and returned. False by default. Returns ------- k, sigma : float estimated parameter values ks, w : ndarray Quadrature points and weights of the marginal posterior distribution of `k`. Returned only if `return_quadrature` is True. Notes ----- This function returns a negative of Zhang and Stephens's k, because it is more common parameterisation.
[ "Estimate", "the", "paramaters", "for", "the", "Generalized", "Pareto", "Distribution", "(", "GPD", ")" ]
train
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L212-L332
hammerlab/stanity
stanity/psis.py
gpinv
def gpinv(p, k, sigma): """Inverse Generalised Pareto distribution function.""" x = np.empty(p.shape) x.fill(np.nan) if sigma <= 0: return x ok = (p > 0) & (p < 1) if np.all(ok): if np.abs(k) < np.finfo(float).eps: np.negative(p, out=x) np.log1p(x, out=x) np.negative(x, out=x) else: np.negative(p, out=x) np.log1p(x, out=x) x *= -k np.expm1(x, out=x) x /= k x *= sigma else: if np.abs(k) < np.finfo(float).eps: # x[ok] = - np.log1p(-p[ok]) temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) np.negative(temp, out=temp) x[ok] = temp else: # x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) temp *= -k np.expm1(temp, out=temp) temp /= k x[ok] = temp x *= sigma x[p == 0] = 0 if k >= 0: x[p == 1] = np.inf else: x[p == 1] = -sigma / k return x
python
def gpinv(p, k, sigma): """Inverse Generalised Pareto distribution function.""" x = np.empty(p.shape) x.fill(np.nan) if sigma <= 0: return x ok = (p > 0) & (p < 1) if np.all(ok): if np.abs(k) < np.finfo(float).eps: np.negative(p, out=x) np.log1p(x, out=x) np.negative(x, out=x) else: np.negative(p, out=x) np.log1p(x, out=x) x *= -k np.expm1(x, out=x) x /= k x *= sigma else: if np.abs(k) < np.finfo(float).eps: # x[ok] = - np.log1p(-p[ok]) temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) np.negative(temp, out=temp) x[ok] = temp else: # x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k temp = p[ok] np.negative(temp, out=temp) np.log1p(temp, out=temp) temp *= -k np.expm1(temp, out=temp) temp /= k x[ok] = temp x *= sigma x[p == 0] = 0 if k >= 0: x[p == 1] = np.inf else: x[p == 1] = -sigma / k return x
[ "def", "gpinv", "(", "p", ",", "k", ",", "sigma", ")", ":", "x", "=", "np", ".", "empty", "(", "p", ".", "shape", ")", "x", ".", "fill", "(", "np", ".", "nan", ")", "if", "sigma", "<=", "0", ":", "return", "x", "ok", "=", "(", "p", ">", "0", ")", "&", "(", "p", "<", "1", ")", "if", "np", ".", "all", "(", "ok", ")", ":", "if", "np", ".", "abs", "(", "k", ")", "<", "np", ".", "finfo", "(", "float", ")", ".", "eps", ":", "np", ".", "negative", "(", "p", ",", "out", "=", "x", ")", "np", ".", "log1p", "(", "x", ",", "out", "=", "x", ")", "np", ".", "negative", "(", "x", ",", "out", "=", "x", ")", "else", ":", "np", ".", "negative", "(", "p", ",", "out", "=", "x", ")", "np", ".", "log1p", "(", "x", ",", "out", "=", "x", ")", "x", "*=", "-", "k", "np", ".", "expm1", "(", "x", ",", "out", "=", "x", ")", "x", "/=", "k", "x", "*=", "sigma", "else", ":", "if", "np", ".", "abs", "(", "k", ")", "<", "np", ".", "finfo", "(", "float", ")", ".", "eps", ":", "# x[ok] = - np.log1p(-p[ok])", "temp", "=", "p", "[", "ok", "]", "np", ".", "negative", "(", "temp", ",", "out", "=", "temp", ")", "np", ".", "log1p", "(", "temp", ",", "out", "=", "temp", ")", "np", ".", "negative", "(", "temp", ",", "out", "=", "temp", ")", "x", "[", "ok", "]", "=", "temp", "else", ":", "# x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k", "temp", "=", "p", "[", "ok", "]", "np", ".", "negative", "(", "temp", ",", "out", "=", "temp", ")", "np", ".", "log1p", "(", "temp", ",", "out", "=", "temp", ")", "temp", "*=", "-", "k", "np", ".", "expm1", "(", "temp", ",", "out", "=", "temp", ")", "temp", "/=", "k", "x", "[", "ok", "]", "=", "temp", "x", "*=", "sigma", "x", "[", "p", "==", "0", "]", "=", "0", "if", "k", ">=", "0", ":", "x", "[", "p", "==", "1", "]", "=", "np", ".", "inf", "else", ":", "x", "[", "p", "==", "1", "]", "=", "-", "sigma", "/", "k", "return", "x" ]
Inverse Generalised Pareto distribution function.
[ "Inverse", "Generalised", "Pareto", "distribution", "function", "." ]
train
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L335-L377
hammerlab/stanity
stanity/psis.py
sumlogs
def sumlogs(x, axis=None, out=None): """Sum of vector where numbers are represented by their logarithms. Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that it works even when elements have large magnitude. """ maxx = x.max(axis=axis, keepdims=True) xnorm = x - maxx np.exp(xnorm, out=xnorm) out = np.sum(xnorm, axis=axis, out=out) if isinstance(out, np.ndarray): np.log(out, out=out) else: out = np.log(out) out += np.squeeze(maxx) return out
python
def sumlogs(x, axis=None, out=None): """Sum of vector where numbers are represented by their logarithms. Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that it works even when elements have large magnitude. """ maxx = x.max(axis=axis, keepdims=True) xnorm = x - maxx np.exp(xnorm, out=xnorm) out = np.sum(xnorm, axis=axis, out=out) if isinstance(out, np.ndarray): np.log(out, out=out) else: out = np.log(out) out += np.squeeze(maxx) return out
[ "def", "sumlogs", "(", "x", ",", "axis", "=", "None", ",", "out", "=", "None", ")", ":", "maxx", "=", "x", ".", "max", "(", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "xnorm", "=", "x", "-", "maxx", "np", ".", "exp", "(", "xnorm", ",", "out", "=", "xnorm", ")", "out", "=", "np", ".", "sum", "(", "xnorm", ",", "axis", "=", "axis", ",", "out", "=", "out", ")", "if", "isinstance", "(", "out", ",", "np", ".", "ndarray", ")", ":", "np", ".", "log", "(", "out", ",", "out", "=", "out", ")", "else", ":", "out", "=", "np", ".", "log", "(", "out", ")", "out", "+=", "np", ".", "squeeze", "(", "maxx", ")", "return", "out" ]
Sum of vector where numbers are represented by their logarithms. Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that it works even when elements have large magnitude.
[ "Sum", "of", "vector", "where", "numbers", "are", "represented", "by", "their", "logarithms", "." ]
train
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psis.py#L380-L396
rapidpro/expressions
python/temba_expressions/functions/__init__.py
FunctionManager.add_library
def add_library(self, library): """ Adds functions from a library module :param library: the library module :return: """ for fn in library.__dict__.copy().values(): # ignore imported methods and anything beginning __ if inspect.isfunction(fn) and inspect.getmodule(fn) == library and not fn.__name__.startswith('__'): name = fn.__name__.lower() # strip preceding _ chars used to avoid conflicts with Java keywords if name.startswith('_'): name = name[1:] self._functions[name] = fn
python
def add_library(self, library): """ Adds functions from a library module :param library: the library module :return: """ for fn in library.__dict__.copy().values(): # ignore imported methods and anything beginning __ if inspect.isfunction(fn) and inspect.getmodule(fn) == library and not fn.__name__.startswith('__'): name = fn.__name__.lower() # strip preceding _ chars used to avoid conflicts with Java keywords if name.startswith('_'): name = name[1:] self._functions[name] = fn
[ "def", "add_library", "(", "self", ",", "library", ")", ":", "for", "fn", "in", "library", ".", "__dict__", ".", "copy", "(", ")", ".", "values", "(", ")", ":", "# ignore imported methods and anything beginning __", "if", "inspect", ".", "isfunction", "(", "fn", ")", "and", "inspect", ".", "getmodule", "(", "fn", ")", "==", "library", "and", "not", "fn", ".", "__name__", ".", "startswith", "(", "'__'", ")", ":", "name", "=", "fn", ".", "__name__", ".", "lower", "(", ")", "# strip preceding _ chars used to avoid conflicts with Java keywords", "if", "name", ".", "startswith", "(", "'_'", ")", ":", "name", "=", "name", "[", "1", ":", "]", "self", ".", "_functions", "[", "name", "]", "=", "fn" ]
Adds functions from a library module :param library: the library module :return:
[ "Adds", "functions", "from", "a", "library", "module", ":", "param", "library", ":", "the", "library", "module", ":", "return", ":" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/__init__.py#L9-L24
rapidpro/expressions
python/temba_expressions/functions/__init__.py
FunctionManager.invoke_function
def invoke_function(self, ctx, name, arguments): """ Invokes the given function :param ctx: the evaluation context :param name: the function name (case insensitive) :param arguments: the arguments to be passed to the function :return: the function return value """ from temba_expressions import EvaluationError, conversions # find function with given name func = self.get_function(name) if func is None: raise EvaluationError("Undefined function: %s" % name) args, varargs, defaults = self._get_arg_spec(func) call_args = [] passed_args = list(arguments) for arg in args: if arg == 'ctx': call_args.append(ctx) elif passed_args: call_args.append(passed_args.pop(0)) elif arg in defaults: call_args.append(defaults[arg]) else: raise EvaluationError("Too few arguments provided for function %s" % name) if varargs is not None: call_args.extend(passed_args) passed_args = [] # any unused arguments? if passed_args: raise EvaluationError("Too many arguments provided for function %s" % name) try: return func(*call_args) except Exception as e: pretty_args = [] for arg in arguments: if isinstance(arg, str): pretty = '"%s"' % arg else: try: pretty = conversions.to_string(arg, ctx) except EvaluationError: pretty = str(arg) pretty_args.append(pretty) raise EvaluationError("Error calling function %s with arguments %s" % (name, ', '.join(pretty_args)), e)
python
def invoke_function(self, ctx, name, arguments): """ Invokes the given function :param ctx: the evaluation context :param name: the function name (case insensitive) :param arguments: the arguments to be passed to the function :return: the function return value """ from temba_expressions import EvaluationError, conversions # find function with given name func = self.get_function(name) if func is None: raise EvaluationError("Undefined function: %s" % name) args, varargs, defaults = self._get_arg_spec(func) call_args = [] passed_args = list(arguments) for arg in args: if arg == 'ctx': call_args.append(ctx) elif passed_args: call_args.append(passed_args.pop(0)) elif arg in defaults: call_args.append(defaults[arg]) else: raise EvaluationError("Too few arguments provided for function %s" % name) if varargs is not None: call_args.extend(passed_args) passed_args = [] # any unused arguments? if passed_args: raise EvaluationError("Too many arguments provided for function %s" % name) try: return func(*call_args) except Exception as e: pretty_args = [] for arg in arguments: if isinstance(arg, str): pretty = '"%s"' % arg else: try: pretty = conversions.to_string(arg, ctx) except EvaluationError: pretty = str(arg) pretty_args.append(pretty) raise EvaluationError("Error calling function %s with arguments %s" % (name, ', '.join(pretty_args)), e)
[ "def", "invoke_function", "(", "self", ",", "ctx", ",", "name", ",", "arguments", ")", ":", "from", "temba_expressions", "import", "EvaluationError", ",", "conversions", "# find function with given name", "func", "=", "self", ".", "get_function", "(", "name", ")", "if", "func", "is", "None", ":", "raise", "EvaluationError", "(", "\"Undefined function: %s\"", "%", "name", ")", "args", ",", "varargs", ",", "defaults", "=", "self", ".", "_get_arg_spec", "(", "func", ")", "call_args", "=", "[", "]", "passed_args", "=", "list", "(", "arguments", ")", "for", "arg", "in", "args", ":", "if", "arg", "==", "'ctx'", ":", "call_args", ".", "append", "(", "ctx", ")", "elif", "passed_args", ":", "call_args", ".", "append", "(", "passed_args", ".", "pop", "(", "0", ")", ")", "elif", "arg", "in", "defaults", ":", "call_args", ".", "append", "(", "defaults", "[", "arg", "]", ")", "else", ":", "raise", "EvaluationError", "(", "\"Too few arguments provided for function %s\"", "%", "name", ")", "if", "varargs", "is", "not", "None", ":", "call_args", ".", "extend", "(", "passed_args", ")", "passed_args", "=", "[", "]", "# any unused arguments?", "if", "passed_args", ":", "raise", "EvaluationError", "(", "\"Too many arguments provided for function %s\"", "%", "name", ")", "try", ":", "return", "func", "(", "*", "call_args", ")", "except", "Exception", "as", "e", ":", "pretty_args", "=", "[", "]", "for", "arg", "in", "arguments", ":", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "pretty", "=", "'\"%s\"'", "%", "arg", "else", ":", "try", ":", "pretty", "=", "conversions", ".", "to_string", "(", "arg", ",", "ctx", ")", "except", "EvaluationError", ":", "pretty", "=", "str", "(", "arg", ")", "pretty_args", ".", "append", "(", "pretty", ")", "raise", "EvaluationError", "(", "\"Error calling function %s with arguments %s\"", "%", "(", "name", ",", "', '", ".", "join", "(", "pretty_args", ")", ")", ",", "e", ")" ]
Invokes the given function :param ctx: the evaluation context :param name: the function name (case insensitive) :param arguments: the arguments to be passed to the function :return: the function return value
[ "Invokes", "the", "given", "function", ":", "param", "ctx", ":", "the", "evaluation", "context", ":", "param", "name", ":", "the", "function", "name", "(", "case", "insensitive", ")", ":", "param", "arguments", ":", "the", "arguments", "to", "be", "passed", "to", "the", "function", ":", "return", ":", "the", "function", "return", "value" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/__init__.py#L29-L81
rapidpro/expressions
python/temba_expressions/functions/__init__.py
FunctionManager.build_listing
def build_listing(self): """ Builds a listing of all functions sorted A-Z, with their names and descriptions """ def func_entry(name, func): args, varargs, defaults = self._get_arg_spec(func) # add regular arguments params = [{'name': str(a), 'optional': a in defaults, 'vararg': False} for a in args if a != 'ctx'] # add possible variable argument if varargs: params += [{'name': str(varargs), 'optional': False, 'vararg': True}] return {'name': str(name.upper()), 'description': str(func.__doc__).strip(), 'params': params} listing = [func_entry(f_name, f) for f_name, f in self._functions.items()] return sorted(listing, key=lambda l: l['name'])
python
def build_listing(self): """ Builds a listing of all functions sorted A-Z, with their names and descriptions """ def func_entry(name, func): args, varargs, defaults = self._get_arg_spec(func) # add regular arguments params = [{'name': str(a), 'optional': a in defaults, 'vararg': False} for a in args if a != 'ctx'] # add possible variable argument if varargs: params += [{'name': str(varargs), 'optional': False, 'vararg': True}] return {'name': str(name.upper()), 'description': str(func.__doc__).strip(), 'params': params} listing = [func_entry(f_name, f) for f_name, f in self._functions.items()] return sorted(listing, key=lambda l: l['name'])
[ "def", "build_listing", "(", "self", ")", ":", "def", "func_entry", "(", "name", ",", "func", ")", ":", "args", ",", "varargs", ",", "defaults", "=", "self", ".", "_get_arg_spec", "(", "func", ")", "# add regular arguments", "params", "=", "[", "{", "'name'", ":", "str", "(", "a", ")", ",", "'optional'", ":", "a", "in", "defaults", ",", "'vararg'", ":", "False", "}", "for", "a", "in", "args", "if", "a", "!=", "'ctx'", "]", "# add possible variable argument", "if", "varargs", ":", "params", "+=", "[", "{", "'name'", ":", "str", "(", "varargs", ")", ",", "'optional'", ":", "False", ",", "'vararg'", ":", "True", "}", "]", "return", "{", "'name'", ":", "str", "(", "name", ".", "upper", "(", ")", ")", ",", "'description'", ":", "str", "(", "func", ".", "__doc__", ")", ".", "strip", "(", ")", ",", "'params'", ":", "params", "}", "listing", "=", "[", "func_entry", "(", "f_name", ",", "f", ")", "for", "f_name", ",", "f", "in", "self", ".", "_functions", ".", "items", "(", ")", "]", "return", "sorted", "(", "listing", ",", "key", "=", "lambda", "l", ":", "l", "[", "'name'", "]", ")" ]
Builds a listing of all functions sorted A-Z, with their names and descriptions
[ "Builds", "a", "listing", "of", "all", "functions", "sorted", "A", "-", "Z", "with", "their", "names", "and", "descriptions" ]
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/__init__.py#L83-L102