repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
Aluriak/tergraw
tergraw/view.py
build
def build(matrix): """Yield lines generated from given matrix""" max_x = max(matrix, key=lambda t: t[0])[0] min_x = min(matrix, key=lambda t: t[0])[0] max_y = max(matrix, key=lambda t: t[1])[1] min_y = min(matrix, key=lambda t: t[1])[1] yield from ( # '{}:'.format(j).ljust(4) + ''.join(matrix[i, j] for i in range(min_x, max_x+1)) ''.join(matrix[i, j] for i in range(min_x, max_x+1)) for j in range(min_y, max_y+1) )
python
def build(matrix): """Yield lines generated from given matrix""" max_x = max(matrix, key=lambda t: t[0])[0] min_x = min(matrix, key=lambda t: t[0])[0] max_y = max(matrix, key=lambda t: t[1])[1] min_y = min(matrix, key=lambda t: t[1])[1] yield from ( # '{}:'.format(j).ljust(4) + ''.join(matrix[i, j] for i in range(min_x, max_x+1)) ''.join(matrix[i, j] for i in range(min_x, max_x+1)) for j in range(min_y, max_y+1) )
[ "def", "build", "(", "matrix", ")", ":", "max_x", "=", "max", "(", "matrix", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", "[", "0", "]", "min_x", "=", "min", "(", "matrix", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", "[", "0", "]", "max_y", "=", "max", "(", "matrix", ",", "key", "=", "lambda", "t", ":", "t", "[", "1", "]", ")", "[", "1", "]", "min_y", "=", "min", "(", "matrix", ",", "key", "=", "lambda", "t", ":", "t", "[", "1", "]", ")", "[", "1", "]", "yield", "from", "(", "# '{}:'.format(j).ljust(4) + ''.join(matrix[i, j] for i in range(min_x, max_x+1))", "''", ".", "join", "(", "matrix", "[", "i", ",", "j", "]", "for", "i", "in", "range", "(", "min_x", ",", "max_x", "+", "1", ")", ")", "for", "j", "in", "range", "(", "min_y", ",", "max_y", "+", "1", ")", ")" ]
Yield lines generated from given matrix
[ "Yield", "lines", "generated", "from", "given", "matrix" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/view.py#L19-L29
Aluriak/tergraw
tergraw/view.py
next_unwrittable_on_row
def next_unwrittable_on_row(view, coords): """Return position of the next (in row) letter that is unwrittable""" x, y = coords maxx = max(view.keys(), key=itemgetter(0))[0] for offset in range(x + 1, maxx): letter = view[offset, y] if letter not in REWRITABLE_LETTERS: return offset return None
python
def next_unwrittable_on_row(view, coords): """Return position of the next (in row) letter that is unwrittable""" x, y = coords maxx = max(view.keys(), key=itemgetter(0))[0] for offset in range(x + 1, maxx): letter = view[offset, y] if letter not in REWRITABLE_LETTERS: return offset return None
[ "def", "next_unwrittable_on_row", "(", "view", ",", "coords", ")", ":", "x", ",", "y", "=", "coords", "maxx", "=", "max", "(", "view", ".", "keys", "(", ")", ",", "key", "=", "itemgetter", "(", "0", ")", ")", "[", "0", "]", "for", "offset", "in", "range", "(", "x", "+", "1", ",", "maxx", ")", ":", "letter", "=", "view", "[", "offset", ",", "y", "]", "if", "letter", "not", "in", "REWRITABLE_LETTERS", ":", "return", "offset", "return", "None" ]
Return position of the next (in row) letter that is unwrittable
[ "Return", "position", "of", "the", "next", "(", "in", "row", ")", "letter", "that", "is", "unwrittable" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/view.py#L32-L40
Aluriak/tergraw
tergraw/view.py
next_unwrittable_on_col
def next_unwrittable_on_col(view, coords): """Return position of the next letter (in column) that is unwrittable""" x, y = coords maxy = max(view.keys(), key=itemgetter(1))[1] for offset in range(y + 1, maxy): letter = view[x, offset] if letter not in REWRITABLE_LETTERS: return offset return None
python
def next_unwrittable_on_col(view, coords): """Return position of the next letter (in column) that is unwrittable""" x, y = coords maxy = max(view.keys(), key=itemgetter(1))[1] for offset in range(y + 1, maxy): letter = view[x, offset] if letter not in REWRITABLE_LETTERS: return offset return None
[ "def", "next_unwrittable_on_col", "(", "view", ",", "coords", ")", ":", "x", ",", "y", "=", "coords", "maxy", "=", "max", "(", "view", ".", "keys", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "[", "1", "]", "for", "offset", "in", "range", "(", "y", "+", "1", ",", "maxy", ")", ":", "letter", "=", "view", "[", "x", ",", "offset", "]", "if", "letter", "not", "in", "REWRITABLE_LETTERS", ":", "return", "offset", "return", "None" ]
Return position of the next letter (in column) that is unwrittable
[ "Return", "position", "of", "the", "next", "letter", "(", "in", "column", ")", "that", "is", "unwrittable" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/view.py#L43-L51
Aluriak/tergraw
tergraw/view.py
previous_unwrittable_on_row
def previous_unwrittable_on_row(view, coords): """Return position of the previous (in row) letter that is unwrittable""" x, y = coords minx = -1 for offset in range(x - 1, minx, -1): letter = view[offset, y] if letter not in REWRITABLE_LETTERS: return offset return None
python
def previous_unwrittable_on_row(view, coords): """Return position of the previous (in row) letter that is unwrittable""" x, y = coords minx = -1 for offset in range(x - 1, minx, -1): letter = view[offset, y] if letter not in REWRITABLE_LETTERS: return offset return None
[ "def", "previous_unwrittable_on_row", "(", "view", ",", "coords", ")", ":", "x", ",", "y", "=", "coords", "minx", "=", "-", "1", "for", "offset", "in", "range", "(", "x", "-", "1", ",", "minx", ",", "-", "1", ")", ":", "letter", "=", "view", "[", "offset", ",", "y", "]", "if", "letter", "not", "in", "REWRITABLE_LETTERS", ":", "return", "offset", "return", "None" ]
Return position of the previous (in row) letter that is unwrittable
[ "Return", "position", "of", "the", "previous", "(", "in", "row", ")", "letter", "that", "is", "unwrittable" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/view.py#L54-L62
Aluriak/tergraw
tergraw/view.py
previous_unwrittable_on_col
def previous_unwrittable_on_col(view, coords): """Return position of the previous (in column) letter that is unwrittable""" x, y = coords miny = -1 for offset in range(y - 1, miny, -1): letter = view[x, offset] if letter not in REWRITABLE_LETTERS: return offset return None
python
def previous_unwrittable_on_col(view, coords): """Return position of the previous (in column) letter that is unwrittable""" x, y = coords miny = -1 for offset in range(y - 1, miny, -1): letter = view[x, offset] if letter not in REWRITABLE_LETTERS: return offset return None
[ "def", "previous_unwrittable_on_col", "(", "view", ",", "coords", ")", ":", "x", ",", "y", "=", "coords", "miny", "=", "-", "1", "for", "offset", "in", "range", "(", "y", "-", "1", ",", "miny", ",", "-", "1", ")", ":", "letter", "=", "view", "[", "x", ",", "offset", "]", "if", "letter", "not", "in", "REWRITABLE_LETTERS", ":", "return", "offset", "return", "None" ]
Return position of the previous (in column) letter that is unwrittable
[ "Return", "position", "of", "the", "previous", "(", "in", "column", ")", "letter", "that", "is", "unwrittable" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/view.py#L65-L73
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient._build_base_url
def _build_base_url(self, host, port): """Return the API base URL string based on ``host`` and ``port``. It returns a valid URL when ``host`` isn't. The endling slash is always removed so it always need to be added by the consumer. """ parsed = urlparse(host) if not parsed.scheme: parsed = parsed._replace(scheme="http") parsed = parsed._replace(path="") netloc, parts = host, host.partition(":") if parts[1] == "" and port is not None: netloc = "{}:{}".format(parts[0], port) parsed = parsed._replace(netloc=netloc) parsed = parsed._replace(path=parsed.path.rstrip("/")) return parsed.geturl()
python
def _build_base_url(self, host, port): """Return the API base URL string based on ``host`` and ``port``. It returns a valid URL when ``host`` isn't. The endling slash is always removed so it always need to be added by the consumer. """ parsed = urlparse(host) if not parsed.scheme: parsed = parsed._replace(scheme="http") parsed = parsed._replace(path="") netloc, parts = host, host.partition(":") if parts[1] == "" and port is not None: netloc = "{}:{}".format(parts[0], port) parsed = parsed._replace(netloc=netloc) parsed = parsed._replace(path=parsed.path.rstrip("/")) return parsed.geturl()
[ "def", "_build_base_url", "(", "self", ",", "host", ",", "port", ")", ":", "parsed", "=", "urlparse", "(", "host", ")", "if", "not", "parsed", ".", "scheme", ":", "parsed", "=", "parsed", ".", "_replace", "(", "scheme", "=", "\"http\"", ")", "parsed", "=", "parsed", ".", "_replace", "(", "path", "=", "\"\"", ")", "netloc", ",", "parts", "=", "host", ",", "host", ".", "partition", "(", "\":\"", ")", "if", "parts", "[", "1", "]", "==", "\"\"", "and", "port", "is", "not", "None", ":", "netloc", "=", "\"{}:{}\"", ".", "format", "(", "parts", "[", "0", "]", ",", "port", ")", "parsed", "=", "parsed", ".", "_replace", "(", "netloc", "=", "netloc", ")", "parsed", "=", "parsed", ".", "_replace", "(", "path", "=", "parsed", ".", "path", ".", "rstrip", "(", "\"/\"", ")", ")", "return", "parsed", ".", "geturl", "(", ")" ]
Return the API base URL string based on ``host`` and ``port``. It returns a valid URL when ``host`` isn't. The endling slash is always removed so it always need to be added by the consumer.
[ "Return", "the", "API", "base", "URL", "string", "based", "on", "host", "and", "port", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L86-L101
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient._format_notes
def _format_notes(self, record): """ Extracts notes from a record and reformats them in a simplified format. """ notes = [] for note in record["notes"]: if note.get("type"): n = {} n["type"] = note["type"] try: if note["jsonmodel_type"] == "note_singlepart": n["content"] = note["content"][0] else: n["content"] = note["subnotes"][0]["content"] except (IndexError, KeyError): n["content"] = "" notes.append(n) return notes
python
def _format_notes(self, record): """ Extracts notes from a record and reformats them in a simplified format. """ notes = [] for note in record["notes"]: if note.get("type"): n = {} n["type"] = note["type"] try: if note["jsonmodel_type"] == "note_singlepart": n["content"] = note["content"][0] else: n["content"] = note["subnotes"][0]["content"] except (IndexError, KeyError): n["content"] = "" notes.append(n) return notes
[ "def", "_format_notes", "(", "self", ",", "record", ")", ":", "notes", "=", "[", "]", "for", "note", "in", "record", "[", "\"notes\"", "]", ":", "if", "note", ".", "get", "(", "\"type\"", ")", ":", "n", "=", "{", "}", "n", "[", "\"type\"", "]", "=", "note", "[", "\"type\"", "]", "try", ":", "if", "note", "[", "\"jsonmodel_type\"", "]", "==", "\"note_singlepart\"", ":", "n", "[", "\"content\"", "]", "=", "note", "[", "\"content\"", "]", "[", "0", "]", "else", ":", "n", "[", "\"content\"", "]", "=", "note", "[", "\"subnotes\"", "]", "[", "0", "]", "[", "\"content\"", "]", "except", "(", "IndexError", ",", "KeyError", ")", ":", "n", "[", "\"content\"", "]", "=", "\"\"", "notes", ".", "append", "(", "n", ")", "return", "notes" ]
Extracts notes from a record and reformats them in a simplified format.
[ "Extracts", "notes", "from", "a", "record", "and", "reformats", "them", "in", "a", "simplified", "format", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L188-L207
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient._process_notes
def _process_notes(record, new_record): """ Populate the notes property using the provided new_record. If the new_record field was populated, assume that we want to replace the notes. If there are valid changes to be made, they will be added to the new_notes list. An empty list is counted as a request to delete all notes. Returns a boolean indicating whether changes were made. """ if "notes" not in new_record or not new_record["notes"]: return False # This assumes any notes passed into the edit record are intended to # replace the existing set. new_notes = [] for note in new_record["notes"]: # Whitelist of supported types of notes to edit # A note with an empty string as content is counted as a request to # delete the note, and will not be added to the list. if note["type"] in ("odd", "accessrestrict") and note.get("content"): new_notes.append( { "jsonmodel_type": "note_multipart", "publish": True, "subnotes": [ { "content": note["content"], "jsonmodel_type": "note_text", "publish": True, } ], "type": note["type"], } ) record["notes"] = new_notes return True
python
def _process_notes(record, new_record): """ Populate the notes property using the provided new_record. If the new_record field was populated, assume that we want to replace the notes. If there are valid changes to be made, they will be added to the new_notes list. An empty list is counted as a request to delete all notes. Returns a boolean indicating whether changes were made. """ if "notes" not in new_record or not new_record["notes"]: return False # This assumes any notes passed into the edit record are intended to # replace the existing set. new_notes = [] for note in new_record["notes"]: # Whitelist of supported types of notes to edit # A note with an empty string as content is counted as a request to # delete the note, and will not be added to the list. if note["type"] in ("odd", "accessrestrict") and note.get("content"): new_notes.append( { "jsonmodel_type": "note_multipart", "publish": True, "subnotes": [ { "content": note["content"], "jsonmodel_type": "note_text", "publish": True, } ], "type": note["type"], } ) record["notes"] = new_notes return True
[ "def", "_process_notes", "(", "record", ",", "new_record", ")", ":", "if", "\"notes\"", "not", "in", "new_record", "or", "not", "new_record", "[", "\"notes\"", "]", ":", "return", "False", "# This assumes any notes passed into the edit record are intended to", "# replace the existing set.", "new_notes", "=", "[", "]", "for", "note", "in", "new_record", "[", "\"notes\"", "]", ":", "# Whitelist of supported types of notes to edit", "# A note with an empty string as content is counted as a request to", "# delete the note, and will not be added to the list.", "if", "note", "[", "\"type\"", "]", "in", "(", "\"odd\"", ",", "\"accessrestrict\"", ")", "and", "note", ".", "get", "(", "\"content\"", ")", ":", "new_notes", ".", "append", "(", "{", "\"jsonmodel_type\"", ":", "\"note_multipart\"", ",", "\"publish\"", ":", "True", ",", "\"subnotes\"", ":", "[", "{", "\"content\"", ":", "note", "[", "\"content\"", "]", ",", "\"jsonmodel_type\"", ":", "\"note_text\"", ",", "\"publish\"", ":", "True", ",", "}", "]", ",", "\"type\"", ":", "note", "[", "\"type\"", "]", ",", "}", ")", "record", "[", "\"notes\"", "]", "=", "new_notes", "return", "True" ]
Populate the notes property using the provided new_record. If the new_record field was populated, assume that we want to replace the notes. If there are valid changes to be made, they will be added to the new_notes list. An empty list is counted as a request to delete all notes. Returns a boolean indicating whether changes were made.
[ "Populate", "the", "notes", "property", "using", "the", "provided", "new_record", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L210-L249
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient._escape_solr_query
def _escape_solr_query(query, field="title"): """ Escapes special characters in Solr queries. Note that this omits * - this is intentionally permitted in user queries. The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters """ # Different rules for "title" and "identifier" fields :/ if field == "title": replacement = r"\\\\\1" else: replacement = r"\\\1" return re.sub(r'([\'" +\-!\(\)\{\}\[\]^"~?:\\/]|&&|\|\|)', replacement, query)
python
def _escape_solr_query(query, field="title"): """ Escapes special characters in Solr queries. Note that this omits * - this is intentionally permitted in user queries. The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters """ # Different rules for "title" and "identifier" fields :/ if field == "title": replacement = r"\\\\\1" else: replacement = r"\\\1" return re.sub(r'([\'" +\-!\(\)\{\}\[\]^"~?:\\/]|&&|\|\|)', replacement, query)
[ "def", "_escape_solr_query", "(", "query", ",", "field", "=", "\"title\"", ")", ":", "# Different rules for \"title\" and \"identifier\" fields :/", "if", "field", "==", "\"title\"", ":", "replacement", "=", "r\"\\\\\\\\\\1\"", "else", ":", "replacement", "=", "r\"\\\\\\1\"", "return", "re", ".", "sub", "(", "r'([\\'\" +\\-!\\(\\)\\{\\}\\[\\]^\"~?:\\\\/]|&&|\\|\\|)'", ",", "replacement", ",", "query", ")" ]
Escapes special characters in Solr queries. Note that this omits * - this is intentionally permitted in user queries. The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters
[ "Escapes", "special", "characters", "in", "Solr", "queries", ".", "Note", "that", "this", "omits", "*", "-", "this", "is", "intentionally", "permitted", "in", "user", "queries", ".", "The", "list", "of", "special", "characters", "is", "located", "at", "http", ":", "//", "lucene", ".", "apache", ".", "org", "/", "core", "/", "4_0_0", "/", "queryparser", "/", "org", "/", "apache", "/", "lucene", "/", "queryparser", "/", "classic", "/", "package", "-", "summary", ".", "html#Escaping_Special_Characters" ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L252-L264
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.resource_type
def resource_type(self, resource_id): """ Given an ID, determines whether a given resource is a resource or a resource_component. :param resource_id string: The URI of the resource whose type to determine. :raises ArchivesSpaceError: if the resource_id does not appear to be either type. """ match = re.search( r"repositories/\d+/(resources|archival_objects)/\d+", resource_id ) if match and match.groups(): type_ = match.groups()[0] return "resource" if type_ == "resources" else "resource_component" else: raise ArchivesSpaceError( "Unable to determine type of provided ID: {}".format(resource_id) )
python
def resource_type(self, resource_id): """ Given an ID, determines whether a given resource is a resource or a resource_component. :param resource_id string: The URI of the resource whose type to determine. :raises ArchivesSpaceError: if the resource_id does not appear to be either type. """ match = re.search( r"repositories/\d+/(resources|archival_objects)/\d+", resource_id ) if match and match.groups(): type_ = match.groups()[0] return "resource" if type_ == "resources" else "resource_component" else: raise ArchivesSpaceError( "Unable to determine type of provided ID: {}".format(resource_id) )
[ "def", "resource_type", "(", "self", ",", "resource_id", ")", ":", "match", "=", "re", ".", "search", "(", "r\"repositories/\\d+/(resources|archival_objects)/\\d+\"", ",", "resource_id", ")", "if", "match", "and", "match", ".", "groups", "(", ")", ":", "type_", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "return", "\"resource\"", "if", "type_", "==", "\"resources\"", "else", "\"resource_component\"", "else", ":", "raise", "ArchivesSpaceError", "(", "\"Unable to determine type of provided ID: {}\"", ".", "format", "(", "resource_id", ")", ")" ]
Given an ID, determines whether a given resource is a resource or a resource_component. :param resource_id string: The URI of the resource whose type to determine. :raises ArchivesSpaceError: if the resource_id does not appear to be either type.
[ "Given", "an", "ID", "determines", "whether", "a", "given", "resource", "is", "a", "resource", "or", "a", "resource_component", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L266-L282
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.edit_record
def edit_record(self, new_record): """ Update a record in ArchivesSpace using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * targetfield * notes * start_date * end_date * date_expression :raises ValueError: if the 'id' field isn't specified, or no fields to edit were specified. """ try: record_id = new_record["id"] except KeyError: raise ValueError("No record ID provided!") record = self.get_record(record_id) # TODO: add more fields? field_map = {"title": "title", "level": "levelOfDescription"} fields_updated = False for field, targetfield in field_map.items(): try: record[targetfield] = new_record[field] fields_updated = True except KeyError: continue if self._process_notes(record, new_record): fields_updated = True # Create dates object if any of the date fields is populated if ( "start_date" in new_record or "end_date" in new_record or "date_expression" in new_record ): date = { "jsonmodel_type": "date", "date_type": "inclusive", "label": "creation", } if "date_expression" in new_record: date["expression"] = new_record["date_expression"] if "start_date" in new_record: date["begin"] = new_record["start_date"] if "end_date" in new_record: date["end"] = new_record["end_date"] if len(record["dates"]) == 0: record["dates"] = [date] else: record["dates"][0] = date fields_updated = True if not fields_updated: raise ValueError("No fields to update specified!") self._post(record_id, data=json.dumps(record))
python
def edit_record(self, new_record): """ Update a record in ArchivesSpace using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * targetfield * notes * start_date * end_date * date_expression :raises ValueError: if the 'id' field isn't specified, or no fields to edit were specified. """ try: record_id = new_record["id"] except KeyError: raise ValueError("No record ID provided!") record = self.get_record(record_id) # TODO: add more fields? field_map = {"title": "title", "level": "levelOfDescription"} fields_updated = False for field, targetfield in field_map.items(): try: record[targetfield] = new_record[field] fields_updated = True except KeyError: continue if self._process_notes(record, new_record): fields_updated = True # Create dates object if any of the date fields is populated if ( "start_date" in new_record or "end_date" in new_record or "date_expression" in new_record ): date = { "jsonmodel_type": "date", "date_type": "inclusive", "label": "creation", } if "date_expression" in new_record: date["expression"] = new_record["date_expression"] if "start_date" in new_record: date["begin"] = new_record["start_date"] if "end_date" in new_record: date["end"] = new_record["end_date"] if len(record["dates"]) == 0: record["dates"] = [date] else: record["dates"][0] = date fields_updated = True if not fields_updated: raise ValueError("No fields to update specified!") self._post(record_id, data=json.dumps(record))
[ "def", "edit_record", "(", "self", ",", "new_record", ")", ":", "try", ":", "record_id", "=", "new_record", "[", "\"id\"", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"No record ID provided!\"", ")", "record", "=", "self", ".", "get_record", "(", "record_id", ")", "# TODO: add more fields?", "field_map", "=", "{", "\"title\"", ":", "\"title\"", ",", "\"level\"", ":", "\"levelOfDescription\"", "}", "fields_updated", "=", "False", "for", "field", ",", "targetfield", "in", "field_map", ".", "items", "(", ")", ":", "try", ":", "record", "[", "targetfield", "]", "=", "new_record", "[", "field", "]", "fields_updated", "=", "True", "except", "KeyError", ":", "continue", "if", "self", ".", "_process_notes", "(", "record", ",", "new_record", ")", ":", "fields_updated", "=", "True", "# Create dates object if any of the date fields is populated", "if", "(", "\"start_date\"", "in", "new_record", "or", "\"end_date\"", "in", "new_record", "or", "\"date_expression\"", "in", "new_record", ")", ":", "date", "=", "{", "\"jsonmodel_type\"", ":", "\"date\"", ",", "\"date_type\"", ":", "\"inclusive\"", ",", "\"label\"", ":", "\"creation\"", ",", "}", "if", "\"date_expression\"", "in", "new_record", ":", "date", "[", "\"expression\"", "]", "=", "new_record", "[", "\"date_expression\"", "]", "if", "\"start_date\"", "in", "new_record", ":", "date", "[", "\"begin\"", "]", "=", "new_record", "[", "\"start_date\"", "]", "if", "\"end_date\"", "in", "new_record", ":", "date", "[", "\"end\"", "]", "=", "new_record", "[", "\"end_date\"", "]", "if", "len", "(", "record", "[", "\"dates\"", "]", ")", "==", "0", ":", "record", "[", "\"dates\"", "]", "=", "[", "date", "]", "else", ":", "record", "[", "\"dates\"", "]", "[", "0", "]", "=", "date", "fields_updated", "=", "True", "if", "not", "fields_updated", ":", "raise", "ValueError", "(", "\"No fields to update specified!\"", ")", "self", ".", "_post", "(", "record_id", ",", "data", "=", "json", ".", "dumps", "(", "record", ")", ")" ]
Update a record in ArchivesSpace using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * targetfield * notes * start_date * end_date * date_expression :raises ValueError: if the 'id' field isn't specified, or no fields to edit were specified.
[ "Update", "a", "record", "in", "ArchivesSpace", "using", "the", "provided", "new_record", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L287-L352
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.get_levels_of_description
def get_levels_of_description(self): """Returns an array of all levels of description defined in this ArchivesSpace instance.""" if not hasattr(self, "levels_of_description"): # TODO: * fetch human-formatted strings # * is hardcoding this ID okay? self.levels_of_description = self._get("/config/enumerations/32").json()[ "values" ] return self.levels_of_description
python
def get_levels_of_description(self): """Returns an array of all levels of description defined in this ArchivesSpace instance.""" if not hasattr(self, "levels_of_description"): # TODO: * fetch human-formatted strings # * is hardcoding this ID okay? self.levels_of_description = self._get("/config/enumerations/32").json()[ "values" ] return self.levels_of_description
[ "def", "get_levels_of_description", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"levels_of_description\"", ")", ":", "# TODO: * fetch human-formatted strings", "# * is hardcoding this ID okay?", "self", ".", "levels_of_description", "=", "self", ".", "_get", "(", "\"/config/enumerations/32\"", ")", ".", "json", "(", ")", "[", "\"values\"", "]", "return", "self", ".", "levels_of_description" ]
Returns an array of all levels of description defined in this ArchivesSpace instance.
[ "Returns", "an", "array", "of", "all", "levels", "of", "description", "defined", "in", "this", "ArchivesSpace", "instance", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L354-L364
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.get_resource_component_children
def get_resource_component_children(self, resource_component_id): """ Given a resource component, fetches detailed metadata for it and all of its children. This is implemented using ArchivesSpaceClient.get_resource_component_children and uses its default options when fetching children. :param string resource_component_id: The URL of the resource component from which to fetch metadata. """ resource_type = self.resource_type(resource_component_id) return self.get_resource_component_and_children( resource_component_id, resource_type )
python
def get_resource_component_children(self, resource_component_id): """ Given a resource component, fetches detailed metadata for it and all of its children. This is implemented using ArchivesSpaceClient.get_resource_component_children and uses its default options when fetching children. :param string resource_component_id: The URL of the resource component from which to fetch metadata. """ resource_type = self.resource_type(resource_component_id) return self.get_resource_component_and_children( resource_component_id, resource_type )
[ "def", "get_resource_component_children", "(", "self", ",", "resource_component_id", ")", ":", "resource_type", "=", "self", ".", "resource_type", "(", "resource_component_id", ")", "return", "self", ".", "get_resource_component_and_children", "(", "resource_component_id", ",", "resource_type", ")" ]
Given a resource component, fetches detailed metadata for it and all of its children. This is implemented using ArchivesSpaceClient.get_resource_component_children and uses its default options when fetching children. :param string resource_component_id: The URL of the resource component from which to fetch metadata.
[ "Given", "a", "resource", "component", "fetches", "detailed", "metadata", "for", "it", "and", "all", "of", "its", "children", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L392-L403
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.get_resource_component_and_children
def get_resource_component_and_children( self, resource_id, resource_type="collection", level=1, sort_data={}, recurse_max_level=False, sort_by=None, **kwargs ): """ Fetch detailed metadata for the specified resource_id and all of its children. :param long resource_id: The resource for which to fetch metadata. :param str resource_type: no-op; not required or used in this implementation. :param int recurse_max_level: The maximum depth level to fetch when fetching children. Default is to fetch all of the resource's children, descending as deeply as necessary. Pass 1 to fetch no children. :param string search_pattern: If specified, limits fetched children to those whose titles or IDs match the provided query. See ArchivistsToolkitClient.find_collection_ids for documentation of the query format. :return: A dict containing detailed metadata about both the requested resource and its children. Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format. :rtype dict: """ resource_type = self.resource_type(resource_id) if resource_type == "resource": return self._get_resources( resource_id, recurse_max_level=recurse_max_level, sort_by=sort_by ) else: return self._get_components( resource_id, recurse_max_level=recurse_max_level, sort_by=sort_by )
python
def get_resource_component_and_children( self, resource_id, resource_type="collection", level=1, sort_data={}, recurse_max_level=False, sort_by=None, **kwargs ): """ Fetch detailed metadata for the specified resource_id and all of its children. :param long resource_id: The resource for which to fetch metadata. :param str resource_type: no-op; not required or used in this implementation. :param int recurse_max_level: The maximum depth level to fetch when fetching children. Default is to fetch all of the resource's children, descending as deeply as necessary. Pass 1 to fetch no children. :param string search_pattern: If specified, limits fetched children to those whose titles or IDs match the provided query. See ArchivistsToolkitClient.find_collection_ids for documentation of the query format. :return: A dict containing detailed metadata about both the requested resource and its children. Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format. :rtype dict: """ resource_type = self.resource_type(resource_id) if resource_type == "resource": return self._get_resources( resource_id, recurse_max_level=recurse_max_level, sort_by=sort_by ) else: return self._get_components( resource_id, recurse_max_level=recurse_max_level, sort_by=sort_by )
[ "def", "get_resource_component_and_children", "(", "self", ",", "resource_id", ",", "resource_type", "=", "\"collection\"", ",", "level", "=", "1", ",", "sort_data", "=", "{", "}", ",", "recurse_max_level", "=", "False", ",", "sort_by", "=", "None", ",", "*", "*", "kwargs", ")", ":", "resource_type", "=", "self", ".", "resource_type", "(", "resource_id", ")", "if", "resource_type", "==", "\"resource\"", ":", "return", "self", ".", "_get_resources", "(", "resource_id", ",", "recurse_max_level", "=", "recurse_max_level", ",", "sort_by", "=", "sort_by", ")", "else", ":", "return", "self", ".", "_get_components", "(", "resource_id", ",", "recurse_max_level", "=", "recurse_max_level", ",", "sort_by", "=", "sort_by", ")" ]
Fetch detailed metadata for the specified resource_id and all of its children. :param long resource_id: The resource for which to fetch metadata. :param str resource_type: no-op; not required or used in this implementation. :param int recurse_max_level: The maximum depth level to fetch when fetching children. Default is to fetch all of the resource's children, descending as deeply as necessary. Pass 1 to fetch no children. :param string search_pattern: If specified, limits fetched children to those whose titles or IDs match the provided query. See ArchivistsToolkitClient.find_collection_ids for documentation of the query format. :return: A dict containing detailed metadata about both the requested resource and its children. Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format. :rtype dict:
[ "Fetch", "detailed", "metadata", "for", "the", "specified", "resource_id", "and", "all", "of", "its", "children", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L529-L562
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.find_parent_id_for_component
def find_parent_id_for_component(self, component_id): """ Given the URL to a component, returns the parent component's URL. :param string component_id: The URL of the component. :return: A tuple containing: * The type of the parent record; valid values are ArchivesSpaceClient.RESOURCE and ArchivesSpaceClient.RESOURCE_COMPONENT. * The URL of the parent record. If the provided URL fragment references a resource, this method will simply return the same URL. :rtype tuple: """ response = self.get_record(component_id) if "parent" in response: return (ArchivesSpaceClient.RESOURCE_COMPONENT, response["parent"]["ref"]) # if this is the top archival object, return the resource instead elif "resource" in response: return (ArchivesSpaceClient.RESOURCE, response["resource"]["ref"]) # resource was passed in, which has no higher-up record; # return the same ID else: return (ArchivesSpaceClient.RESOURCE, component_id)
python
def find_parent_id_for_component(self, component_id): """ Given the URL to a component, returns the parent component's URL. :param string component_id: The URL of the component. :return: A tuple containing: * The type of the parent record; valid values are ArchivesSpaceClient.RESOURCE and ArchivesSpaceClient.RESOURCE_COMPONENT. * The URL of the parent record. If the provided URL fragment references a resource, this method will simply return the same URL. :rtype tuple: """ response = self.get_record(component_id) if "parent" in response: return (ArchivesSpaceClient.RESOURCE_COMPONENT, response["parent"]["ref"]) # if this is the top archival object, return the resource instead elif "resource" in response: return (ArchivesSpaceClient.RESOURCE, response["resource"]["ref"]) # resource was passed in, which has no higher-up record; # return the same ID else: return (ArchivesSpaceClient.RESOURCE, component_id)
[ "def", "find_parent_id_for_component", "(", "self", ",", "component_id", ")", ":", "response", "=", "self", ".", "get_record", "(", "component_id", ")", "if", "\"parent\"", "in", "response", ":", "return", "(", "ArchivesSpaceClient", ".", "RESOURCE_COMPONENT", ",", "response", "[", "\"parent\"", "]", "[", "\"ref\"", "]", ")", "# if this is the top archival object, return the resource instead", "elif", "\"resource\"", "in", "response", ":", "return", "(", "ArchivesSpaceClient", ".", "RESOURCE", ",", "response", "[", "\"resource\"", "]", "[", "\"ref\"", "]", ")", "# resource was passed in, which has no higher-up record;", "# return the same ID", "else", ":", "return", "(", "ArchivesSpaceClient", ".", "RESOURCE", ",", "component_id", ")" ]
Given the URL to a component, returns the parent component's URL. :param string component_id: The URL of the component. :return: A tuple containing: * The type of the parent record; valid values are ArchivesSpaceClient.RESOURCE and ArchivesSpaceClient.RESOURCE_COMPONENT. * The URL of the parent record. If the provided URL fragment references a resource, this method will simply return the same URL. :rtype tuple:
[ "Given", "the", "URL", "to", "a", "component", "returns", "the", "parent", "component", "s", "URL", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L575-L595
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.find_collection_ids
def find_collection_ids(self, search_pattern="", identifier="", fetched=0, page=1): """ Fetches a list of resource URLs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list: """ params = {"page": page, "q": "primary_type:resource"} if search_pattern != "": search_pattern = self._escape_solr_query(search_pattern, field="title") params["q"] = params["q"] + " AND title:{}".format(search_pattern) if identifier != "": identifier = self._escape_solr_query(identifier, field="identifier") params["q"] = params["q"] + " AND identifier:{}".format(identifier) response = self._get(self.repository + "/search", params=params) hits = response.json() results = [r["uri"] for r in hits["results"]] results_so_far = fetched + hits["this_page"] if hits["total_hits"] > results_so_far: results.extend( self.find_collection_ids(fetched=results_so_far, page=page + 1) ) return results
python
def find_collection_ids(self, search_pattern="", identifier="", fetched=0, page=1): """ Fetches a list of resource URLs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list: """ params = {"page": page, "q": "primary_type:resource"} if search_pattern != "": search_pattern = self._escape_solr_query(search_pattern, field="title") params["q"] = params["q"] + " AND title:{}".format(search_pattern) if identifier != "": identifier = self._escape_solr_query(identifier, field="identifier") params["q"] = params["q"] + " AND identifier:{}".format(identifier) response = self._get(self.repository + "/search", params=params) hits = response.json() results = [r["uri"] for r in hits["results"]] results_so_far = fetched + hits["this_page"] if hits["total_hits"] > results_so_far: results.extend( self.find_collection_ids(fetched=results_so_far, page=page + 1) ) return results
[ "def", "find_collection_ids", "(", "self", ",", "search_pattern", "=", "\"\"", ",", "identifier", "=", "\"\"", ",", "fetched", "=", "0", ",", "page", "=", "1", ")", ":", "params", "=", "{", "\"page\"", ":", "page", ",", "\"q\"", ":", "\"primary_type:resource\"", "}", "if", "search_pattern", "!=", "\"\"", ":", "search_pattern", "=", "self", ".", "_escape_solr_query", "(", "search_pattern", ",", "field", "=", "\"title\"", ")", "params", "[", "\"q\"", "]", "=", "params", "[", "\"q\"", "]", "+", "\" AND title:{}\"", ".", "format", "(", "search_pattern", ")", "if", "identifier", "!=", "\"\"", ":", "identifier", "=", "self", ".", "_escape_solr_query", "(", "identifier", ",", "field", "=", "\"identifier\"", ")", "params", "[", "\"q\"", "]", "=", "params", "[", "\"q\"", "]", "+", "\" AND identifier:{}\"", ".", "format", "(", "identifier", ")", "response", "=", "self", ".", "_get", "(", "self", ".", "repository", "+", "\"/search\"", ",", "params", "=", "params", ")", "hits", "=", "response", ".", "json", "(", ")", "results", "=", "[", "r", "[", "\"uri\"", "]", "for", "r", "in", "hits", "[", "\"results\"", "]", "]", "results_so_far", "=", "fetched", "+", "hits", "[", "\"this_page\"", "]", "if", "hits", "[", "\"total_hits\"", "]", ">", "results_so_far", ":", "results", ".", "extend", "(", "self", ".", "find_collection_ids", "(", "fetched", "=", "results_so_far", ",", "page", "=", "page", "+", "1", ")", ")", "return", "results" ]
Fetches a list of resource URLs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list:
[ "Fetches", "a", "list", "of", "resource", "URLs", "for", "every", "resource", "in", "the", "database", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L597-L632
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.find_collections
def find_collections( self, search_pattern="", identifier="", fetched=0, page=1, page_size=30, sort_by=None, ): """ Fetches a list of all resource IDs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title or resourceid containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Restrict records to only those with this identifier. This refers to the human-assigned record identifier, not the automatically generated internal ID. This value can contain wildcards. :return: A list containing every matched resource's ID. :rtype: list """ def format_record(record): dates = self._fetch_dates_from_record(record) date_expression = self._fetch_date_expression_from_record(record) identifier = ( record["id_0"] if "id_0" in record else record.get("component_id", "") ) has_children = ( len( self._get(record["uri"] + "/tree", params={"page": 1}).json()[ "children" ] ) > 0 ) return { "id": record["uri"], "type": "resource", "sortPosition": 1, "identifier": identifier, "title": record.get("title", ""), "dates": dates, "date_expression": date_expression, "levelOfDescription": record["level"], "children": [] if has_children else False, "has_children": has_children, "notes": self._format_notes(record), } params = {"page": page, "page_size": page_size, "q": "primary_type:resource"} if search_pattern != "": search_pattern = self._escape_solr_query(search_pattern, field="title") params["q"] = params["q"] + " AND title:{}".format(search_pattern) if identifier != "": identifier = self._escape_solr_query(identifier, field="identifier") params["q"] = params["q"] + " AND identifier:{}".format(identifier) if sort_by is not None: params["sort"] = "title_sort " + sort_by response = self._get(self.repository + "/search", params=params) hits = response.json() return [format_record(json.loads(r["json"])) for r in hits["results"]]
python
def find_collections( self, search_pattern="", identifier="", fetched=0, page=1, page_size=30, sort_by=None, ): """ Fetches a list of all resource IDs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title or resourceid containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Restrict records to only those with this identifier. This refers to the human-assigned record identifier, not the automatically generated internal ID. This value can contain wildcards. :return: A list containing every matched resource's ID. :rtype: list """ def format_record(record): dates = self._fetch_dates_from_record(record) date_expression = self._fetch_date_expression_from_record(record) identifier = ( record["id_0"] if "id_0" in record else record.get("component_id", "") ) has_children = ( len( self._get(record["uri"] + "/tree", params={"page": 1}).json()[ "children" ] ) > 0 ) return { "id": record["uri"], "type": "resource", "sortPosition": 1, "identifier": identifier, "title": record.get("title", ""), "dates": dates, "date_expression": date_expression, "levelOfDescription": record["level"], "children": [] if has_children else False, "has_children": has_children, "notes": self._format_notes(record), } params = {"page": page, "page_size": page_size, "q": "primary_type:resource"} if search_pattern != "": search_pattern = self._escape_solr_query(search_pattern, field="title") params["q"] = params["q"] + " AND title:{}".format(search_pattern) if identifier != "": identifier = self._escape_solr_query(identifier, field="identifier") params["q"] = params["q"] + " AND identifier:{}".format(identifier) if sort_by is not None: params["sort"] = "title_sort " + sort_by response = self._get(self.repository + "/search", params=params) hits = response.json() return [format_record(json.loads(r["json"])) for r in hits["results"]]
[ "def", "find_collections", "(", "self", ",", "search_pattern", "=", "\"\"", ",", "identifier", "=", "\"\"", ",", "fetched", "=", "0", ",", "page", "=", "1", ",", "page_size", "=", "30", ",", "sort_by", "=", "None", ",", ")", ":", "def", "format_record", "(", "record", ")", ":", "dates", "=", "self", ".", "_fetch_dates_from_record", "(", "record", ")", "date_expression", "=", "self", ".", "_fetch_date_expression_from_record", "(", "record", ")", "identifier", "=", "(", "record", "[", "\"id_0\"", "]", "if", "\"id_0\"", "in", "record", "else", "record", ".", "get", "(", "\"component_id\"", ",", "\"\"", ")", ")", "has_children", "=", "(", "len", "(", "self", ".", "_get", "(", "record", "[", "\"uri\"", "]", "+", "\"/tree\"", ",", "params", "=", "{", "\"page\"", ":", "1", "}", ")", ".", "json", "(", ")", "[", "\"children\"", "]", ")", ">", "0", ")", "return", "{", "\"id\"", ":", "record", "[", "\"uri\"", "]", ",", "\"type\"", ":", "\"resource\"", ",", "\"sortPosition\"", ":", "1", ",", "\"identifier\"", ":", "identifier", ",", "\"title\"", ":", "record", ".", "get", "(", "\"title\"", ",", "\"\"", ")", ",", "\"dates\"", ":", "dates", ",", "\"date_expression\"", ":", "date_expression", ",", "\"levelOfDescription\"", ":", "record", "[", "\"level\"", "]", ",", "\"children\"", ":", "[", "]", "if", "has_children", "else", "False", ",", "\"has_children\"", ":", "has_children", ",", "\"notes\"", ":", "self", ".", "_format_notes", "(", "record", ")", ",", "}", "params", "=", "{", "\"page\"", ":", "page", ",", "\"page_size\"", ":", "page_size", ",", "\"q\"", ":", "\"primary_type:resource\"", "}", "if", "search_pattern", "!=", "\"\"", ":", "search_pattern", "=", "self", ".", "_escape_solr_query", "(", "search_pattern", ",", "field", "=", "\"title\"", ")", "params", "[", "\"q\"", "]", "=", "params", "[", "\"q\"", "]", "+", "\" AND title:{}\"", ".", "format", "(", "search_pattern", ")", "if", "identifier", "!=", "\"\"", ":", "identifier", "=", "self", ".", "_escape_solr_query", "(", "identifier", ",", "field", "=", "\"identifier\"", ")", "params", "[", "\"q\"", "]", "=", "params", "[", "\"q\"", "]", "+", "\" AND identifier:{}\"", ".", "format", "(", "identifier", ")", "if", "sort_by", "is", "not", "None", ":", "params", "[", "\"sort\"", "]", "=", "\"title_sort \"", "+", "sort_by", "response", "=", "self", ".", "_get", "(", "self", ".", "repository", "+", "\"/search\"", ",", "params", "=", "params", ")", "hits", "=", "response", ".", "json", "(", ")", "return", "[", "format_record", "(", "json", ".", "loads", "(", "r", "[", "\"json\"", "]", ")", ")", "for", "r", "in", "hits", "[", "\"results\"", "]", "]" ]
Fetches a list of all resource IDs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title or resourceid containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Restrict records to only those with this identifier. This refers to the human-assigned record identifier, not the automatically generated internal ID. This value can contain wildcards. :return: A list containing every matched resource's ID. :rtype: list
[ "Fetches", "a", "list", "of", "all", "resource", "IDs", "for", "every", "resource", "in", "the", "database", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L647-L716
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.find_by_id
def find_by_id(self, object_type, field, value): """ Find resource by a specific ID. Results are a dict in the format: { 'id': <resource URI fragment>, 'identifier': <resource identifier>, 'title': <title of the resource>, 'levelOfDescription': <level of description>, } :param str object_type: One of 'digital_object_components' or 'archival_objects' :param str field: Name of the field to search. One of 'component_id' or 'ref_id'. :param value: Value of the field to search for :return: List of dicts containing results. """ def format_record(record): resolved = record["_resolved"] identifier = ( resolved["ref_id"] if "ref_id" in resolved else resolved.get("component_id", "") ) return { "id": record["ref"], "type": self.resource_type(record["ref"]), "identifier": identifier, "title": resolved.get("title", ""), "levelOfDescription": resolved.get("level", ""), "fullrecord": resolved, } if object_type not in ("digital_object_components", "archival_objects"): raise ValueError( "object_type must be 'digital_object_components' or 'archival_objects'" ) if field not in ("ref_id", "component_id"): raise ValueError("field must be 'component_id' or 'ref_id'") params = {field + "[]": value, "resolve[]": object_type} url = self.repository + "/find_by_id/" + object_type response = self._get(url, params=params) hits = response.json() return [format_record(r) for r in hits[object_type]]
python
def find_by_id(self, object_type, field, value): """ Find resource by a specific ID. Results are a dict in the format: { 'id': <resource URI fragment>, 'identifier': <resource identifier>, 'title': <title of the resource>, 'levelOfDescription': <level of description>, } :param str object_type: One of 'digital_object_components' or 'archival_objects' :param str field: Name of the field to search. One of 'component_id' or 'ref_id'. :param value: Value of the field to search for :return: List of dicts containing results. """ def format_record(record): resolved = record["_resolved"] identifier = ( resolved["ref_id"] if "ref_id" in resolved else resolved.get("component_id", "") ) return { "id": record["ref"], "type": self.resource_type(record["ref"]), "identifier": identifier, "title": resolved.get("title", ""), "levelOfDescription": resolved.get("level", ""), "fullrecord": resolved, } if object_type not in ("digital_object_components", "archival_objects"): raise ValueError( "object_type must be 'digital_object_components' or 'archival_objects'" ) if field not in ("ref_id", "component_id"): raise ValueError("field must be 'component_id' or 'ref_id'") params = {field + "[]": value, "resolve[]": object_type} url = self.repository + "/find_by_id/" + object_type response = self._get(url, params=params) hits = response.json() return [format_record(r) for r in hits[object_type]]
[ "def", "find_by_id", "(", "self", ",", "object_type", ",", "field", ",", "value", ")", ":", "def", "format_record", "(", "record", ")", ":", "resolved", "=", "record", "[", "\"_resolved\"", "]", "identifier", "=", "(", "resolved", "[", "\"ref_id\"", "]", "if", "\"ref_id\"", "in", "resolved", "else", "resolved", ".", "get", "(", "\"component_id\"", ",", "\"\"", ")", ")", "return", "{", "\"id\"", ":", "record", "[", "\"ref\"", "]", ",", "\"type\"", ":", "self", ".", "resource_type", "(", "record", "[", "\"ref\"", "]", ")", ",", "\"identifier\"", ":", "identifier", ",", "\"title\"", ":", "resolved", ".", "get", "(", "\"title\"", ",", "\"\"", ")", ",", "\"levelOfDescription\"", ":", "resolved", ".", "get", "(", "\"level\"", ",", "\"\"", ")", ",", "\"fullrecord\"", ":", "resolved", ",", "}", "if", "object_type", "not", "in", "(", "\"digital_object_components\"", ",", "\"archival_objects\"", ")", ":", "raise", "ValueError", "(", "\"object_type must be 'digital_object_components' or 'archival_objects'\"", ")", "if", "field", "not", "in", "(", "\"ref_id\"", ",", "\"component_id\"", ")", ":", "raise", "ValueError", "(", "\"field must be 'component_id' or 'ref_id'\"", ")", "params", "=", "{", "field", "+", "\"[]\"", ":", "value", ",", "\"resolve[]\"", ":", "object_type", "}", "url", "=", "self", ".", "repository", "+", "\"/find_by_id/\"", "+", "object_type", "response", "=", "self", ".", "_get", "(", "url", ",", "params", "=", "params", ")", "hits", "=", "response", ".", "json", "(", ")", "return", "[", "format_record", "(", "r", ")", "for", "r", "in", "hits", "[", "object_type", "]", "]" ]
Find resource by a specific ID. Results are a dict in the format: { 'id': <resource URI fragment>, 'identifier': <resource identifier>, 'title': <title of the resource>, 'levelOfDescription': <level of description>, } :param str object_type: One of 'digital_object_components' or 'archival_objects' :param str field: Name of the field to search. One of 'component_id' or 'ref_id'. :param value: Value of the field to search for :return: List of dicts containing results.
[ "Find", "resource", "by", "a", "specific", "ID", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L718-L764
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.add_digital_object
def add_digital_object( self, parent_archival_object, identifier, title=None, uri=None, location_of_originals=None, object_type="text", xlink_show="embed", xlink_actuate="onLoad", restricted=False, use_statement="", use_conditions=None, access_conditions=None, size=None, format_name=None, format_version=None, inherit_dates=False, inherit_notes=False, ): """ Creates a new digital object. :param string parent_archival_object: The archival object to which the newly-created digital object will be parented. :param string identifier: A unique identifier for the digital object, in any format. :param string title: The title of the digital object. :param string uri: The URI to an instantiation of the digital object. :param string location_of_originals: If provided, will create an `originalsloc` (location of originals) note in the digital object using this text. :param string object_type: The type of the digital object. Defaults to "text". :param string xlink_show: Controls how the file will be displayed. For supported values, see: http://www.w3.org/TR/xlink/#link-behaviors :param string xlink_actuate: :param string use_statement: :param string use_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing use" note in the digital object. :param string access_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing access" note in the digital object. :param int size: Size in bytes of the digital object :param str format_name: Name of the digital object's format :param str format_version: Name of the digital object's format version :param bool inherit_dates: Inherit dates :param bool inherit_notes: Inherit parent notes """ parent_record = self.get_record(parent_archival_object) repository = parent_record["repository"]["ref"] language = parent_record.get("language", "") if not title: filename = os.path.basename(uri) if uri is not None else "Untitled" title = parent_record.get("display_string", filename) new_object = { "title": title, "digital_object_id": identifier, "digital_object_type": object_type, "language": language, "notes": [], "restrictions": restricted, "subjects": parent_record["subjects"], "linked_agents": parent_record["linked_agents"], } if inherit_dates: new_object["dates"] = parent_record["dates"] if location_of_originals is not None: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "originalsloc", "content": [location_of_originals], "publish": False, } ) if uri is not None: new_object["file_versions"] = [ { "file_uri": uri, "use_statement": use_statement, "xlink_show_attribute": xlink_show, "xlink_actuate_attribute": xlink_actuate, } ] note_digital_object_type = [ "summary", "bioghist", "accessrestrict", "userestrict", "custodhist", "dimensions", "edition", "extent", "altformavail", "originalsloc", "note", "acqinfo", "inscription", "langmaterial", "legalstatus", "physdesc", "prefercite", "processinfo", "relatedmaterial", ] if inherit_notes: for pnote in parent_record["notes"]: if pnote["type"] in note_digital_object_type: dnote = pnote["type"] else: dnote = "note" if "subnotes" in pnote: content = [] for subnote in pnote["subnotes"]: if "content" in subnote: content.append(subnote["content"]) else: LOGGER.info( "No content field in %s, skipping adding to child digital object.", subnote, ) else: content = pnote.get("content", "") new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": dnote, "label": pnote.get("label", ""), "content": content, "publish": pnote["publish"], } ) if use_conditions: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "userestrict", "content": [use_conditions], "publish": True, } ) if access_conditions: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "accessrestrict", "content": [access_conditions], "publish": True, } ) if restricted: new_object["file_versions"][0]["publish"] = False new_object["publish"] = False if size: new_object["file_versions"][0]["file_size_bytes"] = size if format_name: new_object["file_versions"][0]["file_format_name"] = format_name if format_version: new_object["file_versions"][0]["file_format_version"] = format_version new_object_uri = self._post( repository + "/digital_objects", data=json.dumps(new_object) ).json()["uri"] # Now we need to update the parent object with a link to this instance parent_record["instances"].append( { "instance_type": "digital_object", "digital_object": {"ref": new_object_uri}, } ) self._post(parent_archival_object, data=json.dumps(parent_record)) new_object["id"] = new_object_uri return new_object
python
def add_digital_object( self, parent_archival_object, identifier, title=None, uri=None, location_of_originals=None, object_type="text", xlink_show="embed", xlink_actuate="onLoad", restricted=False, use_statement="", use_conditions=None, access_conditions=None, size=None, format_name=None, format_version=None, inherit_dates=False, inherit_notes=False, ): """ Creates a new digital object. :param string parent_archival_object: The archival object to which the newly-created digital object will be parented. :param string identifier: A unique identifier for the digital object, in any format. :param string title: The title of the digital object. :param string uri: The URI to an instantiation of the digital object. :param string location_of_originals: If provided, will create an `originalsloc` (location of originals) note in the digital object using this text. :param string object_type: The type of the digital object. Defaults to "text". :param string xlink_show: Controls how the file will be displayed. For supported values, see: http://www.w3.org/TR/xlink/#link-behaviors :param string xlink_actuate: :param string use_statement: :param string use_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing use" note in the digital object. :param string access_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing access" note in the digital object. :param int size: Size in bytes of the digital object :param str format_name: Name of the digital object's format :param str format_version: Name of the digital object's format version :param bool inherit_dates: Inherit dates :param bool inherit_notes: Inherit parent notes """ parent_record = self.get_record(parent_archival_object) repository = parent_record["repository"]["ref"] language = parent_record.get("language", "") if not title: filename = os.path.basename(uri) if uri is not None else "Untitled" title = parent_record.get("display_string", filename) new_object = { "title": title, "digital_object_id": identifier, "digital_object_type": object_type, "language": language, "notes": [], "restrictions": restricted, "subjects": parent_record["subjects"], "linked_agents": parent_record["linked_agents"], } if inherit_dates: new_object["dates"] = parent_record["dates"] if location_of_originals is not None: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "originalsloc", "content": [location_of_originals], "publish": False, } ) if uri is not None: new_object["file_versions"] = [ { "file_uri": uri, "use_statement": use_statement, "xlink_show_attribute": xlink_show, "xlink_actuate_attribute": xlink_actuate, } ] note_digital_object_type = [ "summary", "bioghist", "accessrestrict", "userestrict", "custodhist", "dimensions", "edition", "extent", "altformavail", "originalsloc", "note", "acqinfo", "inscription", "langmaterial", "legalstatus", "physdesc", "prefercite", "processinfo", "relatedmaterial", ] if inherit_notes: for pnote in parent_record["notes"]: if pnote["type"] in note_digital_object_type: dnote = pnote["type"] else: dnote = "note" if "subnotes" in pnote: content = [] for subnote in pnote["subnotes"]: if "content" in subnote: content.append(subnote["content"]) else: LOGGER.info( "No content field in %s, skipping adding to child digital object.", subnote, ) else: content = pnote.get("content", "") new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": dnote, "label": pnote.get("label", ""), "content": content, "publish": pnote["publish"], } ) if use_conditions: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "userestrict", "content": [use_conditions], "publish": True, } ) if access_conditions: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "accessrestrict", "content": [access_conditions], "publish": True, } ) if restricted: new_object["file_versions"][0]["publish"] = False new_object["publish"] = False if size: new_object["file_versions"][0]["file_size_bytes"] = size if format_name: new_object["file_versions"][0]["file_format_name"] = format_name if format_version: new_object["file_versions"][0]["file_format_version"] = format_version new_object_uri = self._post( repository + "/digital_objects", data=json.dumps(new_object) ).json()["uri"] # Now we need to update the parent object with a link to this instance parent_record["instances"].append( { "instance_type": "digital_object", "digital_object": {"ref": new_object_uri}, } ) self._post(parent_archival_object, data=json.dumps(parent_record)) new_object["id"] = new_object_uri return new_object
[ "def", "add_digital_object", "(", "self", ",", "parent_archival_object", ",", "identifier", ",", "title", "=", "None", ",", "uri", "=", "None", ",", "location_of_originals", "=", "None", ",", "object_type", "=", "\"text\"", ",", "xlink_show", "=", "\"embed\"", ",", "xlink_actuate", "=", "\"onLoad\"", ",", "restricted", "=", "False", ",", "use_statement", "=", "\"\"", ",", "use_conditions", "=", "None", ",", "access_conditions", "=", "None", ",", "size", "=", "None", ",", "format_name", "=", "None", ",", "format_version", "=", "None", ",", "inherit_dates", "=", "False", ",", "inherit_notes", "=", "False", ",", ")", ":", "parent_record", "=", "self", ".", "get_record", "(", "parent_archival_object", ")", "repository", "=", "parent_record", "[", "\"repository\"", "]", "[", "\"ref\"", "]", "language", "=", "parent_record", ".", "get", "(", "\"language\"", ",", "\"\"", ")", "if", "not", "title", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "uri", ")", "if", "uri", "is", "not", "None", "else", "\"Untitled\"", "title", "=", "parent_record", ".", "get", "(", "\"display_string\"", ",", "filename", ")", "new_object", "=", "{", "\"title\"", ":", "title", ",", "\"digital_object_id\"", ":", "identifier", ",", "\"digital_object_type\"", ":", "object_type", ",", "\"language\"", ":", "language", ",", "\"notes\"", ":", "[", "]", ",", "\"restrictions\"", ":", "restricted", ",", "\"subjects\"", ":", "parent_record", "[", "\"subjects\"", "]", ",", "\"linked_agents\"", ":", "parent_record", "[", "\"linked_agents\"", "]", ",", "}", "if", "inherit_dates", ":", "new_object", "[", "\"dates\"", "]", "=", "parent_record", "[", "\"dates\"", "]", "if", "location_of_originals", "is", "not", "None", ":", "new_object", "[", "\"notes\"", "]", ".", "append", "(", "{", "\"jsonmodel_type\"", ":", "\"note_digital_object\"", ",", "\"type\"", ":", "\"originalsloc\"", ",", "\"content\"", ":", "[", "location_of_originals", "]", ",", "\"publish\"", ":", "False", ",", "}", ")", "if", "uri", "is", "not", "None", ":", "new_object", "[", "\"file_versions\"", "]", "=", "[", "{", "\"file_uri\"", ":", "uri", ",", "\"use_statement\"", ":", "use_statement", ",", "\"xlink_show_attribute\"", ":", "xlink_show", ",", "\"xlink_actuate_attribute\"", ":", "xlink_actuate", ",", "}", "]", "note_digital_object_type", "=", "[", "\"summary\"", ",", "\"bioghist\"", ",", "\"accessrestrict\"", ",", "\"userestrict\"", ",", "\"custodhist\"", ",", "\"dimensions\"", ",", "\"edition\"", ",", "\"extent\"", ",", "\"altformavail\"", ",", "\"originalsloc\"", ",", "\"note\"", ",", "\"acqinfo\"", ",", "\"inscription\"", ",", "\"langmaterial\"", ",", "\"legalstatus\"", ",", "\"physdesc\"", ",", "\"prefercite\"", ",", "\"processinfo\"", ",", "\"relatedmaterial\"", ",", "]", "if", "inherit_notes", ":", "for", "pnote", "in", "parent_record", "[", "\"notes\"", "]", ":", "if", "pnote", "[", "\"type\"", "]", "in", "note_digital_object_type", ":", "dnote", "=", "pnote", "[", "\"type\"", "]", "else", ":", "dnote", "=", "\"note\"", "if", "\"subnotes\"", "in", "pnote", ":", "content", "=", "[", "]", "for", "subnote", "in", "pnote", "[", "\"subnotes\"", "]", ":", "if", "\"content\"", "in", "subnote", ":", "content", ".", "append", "(", "subnote", "[", "\"content\"", "]", ")", "else", ":", "LOGGER", ".", "info", "(", "\"No content field in %s, skipping adding to child digital object.\"", ",", "subnote", ",", ")", "else", ":", "content", "=", "pnote", ".", "get", "(", "\"content\"", ",", "\"\"", ")", "new_object", "[", "\"notes\"", "]", ".", "append", "(", "{", "\"jsonmodel_type\"", ":", "\"note_digital_object\"", ",", "\"type\"", ":", "dnote", ",", "\"label\"", ":", "pnote", ".", "get", "(", "\"label\"", ",", "\"\"", ")", ",", "\"content\"", ":", "content", ",", "\"publish\"", ":", "pnote", "[", "\"publish\"", "]", ",", "}", ")", "if", "use_conditions", ":", "new_object", "[", "\"notes\"", "]", ".", "append", "(", "{", "\"jsonmodel_type\"", ":", "\"note_digital_object\"", ",", "\"type\"", ":", "\"userestrict\"", ",", "\"content\"", ":", "[", "use_conditions", "]", ",", "\"publish\"", ":", "True", ",", "}", ")", "if", "access_conditions", ":", "new_object", "[", "\"notes\"", "]", ".", "append", "(", "{", "\"jsonmodel_type\"", ":", "\"note_digital_object\"", ",", "\"type\"", ":", "\"accessrestrict\"", ",", "\"content\"", ":", "[", "access_conditions", "]", ",", "\"publish\"", ":", "True", ",", "}", ")", "if", "restricted", ":", "new_object", "[", "\"file_versions\"", "]", "[", "0", "]", "[", "\"publish\"", "]", "=", "False", "new_object", "[", "\"publish\"", "]", "=", "False", "if", "size", ":", "new_object", "[", "\"file_versions\"", "]", "[", "0", "]", "[", "\"file_size_bytes\"", "]", "=", "size", "if", "format_name", ":", "new_object", "[", "\"file_versions\"", "]", "[", "0", "]", "[", "\"file_format_name\"", "]", "=", "format_name", "if", "format_version", ":", "new_object", "[", "\"file_versions\"", "]", "[", "0", "]", "[", "\"file_format_version\"", "]", "=", "format_version", "new_object_uri", "=", "self", ".", "_post", "(", "repository", "+", "\"/digital_objects\"", ",", "data", "=", "json", ".", "dumps", "(", "new_object", ")", ")", ".", "json", "(", ")", "[", "\"uri\"", "]", "# Now we need to update the parent object with a link to this instance", "parent_record", "[", "\"instances\"", "]", ".", "append", "(", "{", "\"instance_type\"", ":", "\"digital_object\"", ",", "\"digital_object\"", ":", "{", "\"ref\"", ":", "new_object_uri", "}", ",", "}", ")", "self", ".", "_post", "(", "parent_archival_object", ",", "data", "=", "json", ".", "dumps", "(", "parent_record", ")", ")", "new_object", "[", "\"id\"", "]", "=", "new_object_uri", "return", "new_object" ]
Creates a new digital object. :param string parent_archival_object: The archival object to which the newly-created digital object will be parented. :param string identifier: A unique identifier for the digital object, in any format. :param string title: The title of the digital object. :param string uri: The URI to an instantiation of the digital object. :param string location_of_originals: If provided, will create an `originalsloc` (location of originals) note in the digital object using this text. :param string object_type: The type of the digital object. Defaults to "text". :param string xlink_show: Controls how the file will be displayed. For supported values, see: http://www.w3.org/TR/xlink/#link-behaviors :param string xlink_actuate: :param string use_statement: :param string use_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing use" note in the digital object. :param string access_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing access" note in the digital object. :param int size: Size in bytes of the digital object :param str format_name: Name of the digital object's format :param str format_version: Name of the digital object's format version :param bool inherit_dates: Inherit dates :param bool inherit_notes: Inherit parent notes
[ "Creates", "a", "new", "digital", "object", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L785-L965
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
ArchivesSpaceClient.add_child
def add_child( self, parent, title="", level="", start_date="", end_date="", date_expression="", notes=[], ): """ Adds a new resource component parented within `parent`. :param str parent: The ID to a resource or a resource component. :param str title: A title for the record. :param str level: The level of description. :return: The ID of the newly-created record. """ parent_record = self.get_record(parent) record_type = self.resource_type(parent) repository = parent_record["repository"]["ref"] if record_type == "resource": resource = parent else: resource = parent_record["resource"]["ref"] new_object = { "title": title, "level": level, "jsonmodel_type": "archival_object", "resource": {"ref": resource}, } # Create dates object if any of the date fields is populated if date_expression or start_date or end_date: date = { "jsonmodel_type": "date", "date_type": "inclusive", "label": "creation", } if date_expression: date["expression"] = date_expression if start_date: date["begin"] = start_date if end_date: date["end"] = end_date new_object["dates"] = [date] new_object["notes"] = [] for note in notes: note_type = note.get("type", "odd") # If there is a note, but it's an empty string, skip this; # ArchivesSpace doesn't allow subnote content to be empty. content = note.get("content") if not content: continue new_note = { "jsonmodel_type": "note_multipart", "publish": True, "subnotes": [ {"content": content, "jsonmodel_type": "note_text", "publish": True} ], "type": note_type, } new_object["notes"].append(new_note) # "parent" always refers to an archival_object instance; if this is rooted # directly to a resource, leave it out. if record_type == "resource_component": new_object["parent"] = {"ref": parent} return self._post( repository + "/archival_objects", data=json.dumps(new_object) ).json()["uri"]
python
def add_child( self, parent, title="", level="", start_date="", end_date="", date_expression="", notes=[], ): """ Adds a new resource component parented within `parent`. :param str parent: The ID to a resource or a resource component. :param str title: A title for the record. :param str level: The level of description. :return: The ID of the newly-created record. """ parent_record = self.get_record(parent) record_type = self.resource_type(parent) repository = parent_record["repository"]["ref"] if record_type == "resource": resource = parent else: resource = parent_record["resource"]["ref"] new_object = { "title": title, "level": level, "jsonmodel_type": "archival_object", "resource": {"ref": resource}, } # Create dates object if any of the date fields is populated if date_expression or start_date or end_date: date = { "jsonmodel_type": "date", "date_type": "inclusive", "label": "creation", } if date_expression: date["expression"] = date_expression if start_date: date["begin"] = start_date if end_date: date["end"] = end_date new_object["dates"] = [date] new_object["notes"] = [] for note in notes: note_type = note.get("type", "odd") # If there is a note, but it's an empty string, skip this; # ArchivesSpace doesn't allow subnote content to be empty. content = note.get("content") if not content: continue new_note = { "jsonmodel_type": "note_multipart", "publish": True, "subnotes": [ {"content": content, "jsonmodel_type": "note_text", "publish": True} ], "type": note_type, } new_object["notes"].append(new_note) # "parent" always refers to an archival_object instance; if this is rooted # directly to a resource, leave it out. if record_type == "resource_component": new_object["parent"] = {"ref": parent} return self._post( repository + "/archival_objects", data=json.dumps(new_object) ).json()["uri"]
[ "def", "add_child", "(", "self", ",", "parent", ",", "title", "=", "\"\"", ",", "level", "=", "\"\"", ",", "start_date", "=", "\"\"", ",", "end_date", "=", "\"\"", ",", "date_expression", "=", "\"\"", ",", "notes", "=", "[", "]", ",", ")", ":", "parent_record", "=", "self", ".", "get_record", "(", "parent", ")", "record_type", "=", "self", ".", "resource_type", "(", "parent", ")", "repository", "=", "parent_record", "[", "\"repository\"", "]", "[", "\"ref\"", "]", "if", "record_type", "==", "\"resource\"", ":", "resource", "=", "parent", "else", ":", "resource", "=", "parent_record", "[", "\"resource\"", "]", "[", "\"ref\"", "]", "new_object", "=", "{", "\"title\"", ":", "title", ",", "\"level\"", ":", "level", ",", "\"jsonmodel_type\"", ":", "\"archival_object\"", ",", "\"resource\"", ":", "{", "\"ref\"", ":", "resource", "}", ",", "}", "# Create dates object if any of the date fields is populated", "if", "date_expression", "or", "start_date", "or", "end_date", ":", "date", "=", "{", "\"jsonmodel_type\"", ":", "\"date\"", ",", "\"date_type\"", ":", "\"inclusive\"", ",", "\"label\"", ":", "\"creation\"", ",", "}", "if", "date_expression", ":", "date", "[", "\"expression\"", "]", "=", "date_expression", "if", "start_date", ":", "date", "[", "\"begin\"", "]", "=", "start_date", "if", "end_date", ":", "date", "[", "\"end\"", "]", "=", "end_date", "new_object", "[", "\"dates\"", "]", "=", "[", "date", "]", "new_object", "[", "\"notes\"", "]", "=", "[", "]", "for", "note", "in", "notes", ":", "note_type", "=", "note", ".", "get", "(", "\"type\"", ",", "\"odd\"", ")", "# If there is a note, but it's an empty string, skip this;", "# ArchivesSpace doesn't allow subnote content to be empty.", "content", "=", "note", ".", "get", "(", "\"content\"", ")", "if", "not", "content", ":", "continue", "new_note", "=", "{", "\"jsonmodel_type\"", ":", "\"note_multipart\"", ",", "\"publish\"", ":", "True", ",", "\"subnotes\"", ":", "[", "{", "\"content\"", ":", "content", ",", "\"jsonmodel_type\"", ":", "\"note_text\"", ",", "\"publish\"", ":", "True", "}", "]", ",", "\"type\"", ":", "note_type", ",", "}", "new_object", "[", "\"notes\"", "]", ".", "append", "(", "new_note", ")", "# \"parent\" always refers to an archival_object instance; if this is rooted", "# directly to a resource, leave it out.", "if", "record_type", "==", "\"resource_component\"", ":", "new_object", "[", "\"parent\"", "]", "=", "{", "\"ref\"", ":", "parent", "}", "return", "self", ".", "_post", "(", "repository", "+", "\"/archival_objects\"", ",", "data", "=", "json", ".", "dumps", "(", "new_object", ")", ")", ".", "json", "(", ")", "[", "\"uri\"", "]" ]
Adds a new resource component parented within `parent`. :param str parent: The ID to a resource or a resource component. :param str title: A title for the record. :param str level: The level of description. :return: The ID of the newly-created record.
[ "Adds", "a", "new", "resource", "component", "parented", "within", "parent", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L995-L1071
kevinconway/iface
iface/decorators.py
attribute
def attribute(func): """Wrap a function as an attribute.""" attr = abc.abstractmethod(func) attr.__iattribute__ = True attr = _property(attr) return attr
python
def attribute(func): """Wrap a function as an attribute.""" attr = abc.abstractmethod(func) attr.__iattribute__ = True attr = _property(attr) return attr
[ "def", "attribute", "(", "func", ")", ":", "attr", "=", "abc", ".", "abstractmethod", "(", "func", ")", "attr", ".", "__iattribute__", "=", "True", "attr", "=", "_property", "(", "attr", ")", "return", "attr" ]
Wrap a function as an attribute.
[ "Wrap", "a", "function", "as", "an", "attribute", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/decorators.py#L18-L23
kevinconway/iface
iface/decorators.py
property
def property(func): """Wrap a function as a property. This differs from attribute by identifying properties explicitly listed in the class definition rather than named attributes defined on instances of a class at init time. """ attr = abc.abstractmethod(func) attr.__iproperty__ = True attr = Property(attr) return attr
python
def property(func): """Wrap a function as a property. This differs from attribute by identifying properties explicitly listed in the class definition rather than named attributes defined on instances of a class at init time. """ attr = abc.abstractmethod(func) attr.__iproperty__ = True attr = Property(attr) return attr
[ "def", "property", "(", "func", ")", ":", "attr", "=", "abc", ".", "abstractmethod", "(", "func", ")", "attr", ".", "__iproperty__", "=", "True", "attr", "=", "Property", "(", "attr", ")", "return", "attr" ]
Wrap a function as a property. This differs from attribute by identifying properties explicitly listed in the class definition rather than named attributes defined on instances of a class at init time.
[ "Wrap", "a", "function", "as", "a", "property", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/decorators.py#L26-L36
kevinconway/iface
iface/decorators.py
classattribute
def classattribute(func): """Wrap a function as a class attribute. This differs from attribute by identifying attributes explicitly listed in a class definition rather than those only defined on instances of a class. """ attr = abc.abstractmethod(func) attr.__iclassattribute__ = True attr = _property(attr) return attr
python
def classattribute(func): """Wrap a function as a class attribute. This differs from attribute by identifying attributes explicitly listed in a class definition rather than those only defined on instances of a class. """ attr = abc.abstractmethod(func) attr.__iclassattribute__ = True attr = _property(attr) return attr
[ "def", "classattribute", "(", "func", ")", ":", "attr", "=", "abc", ".", "abstractmethod", "(", "func", ")", "attr", ".", "__iclassattribute__", "=", "True", "attr", "=", "_property", "(", "attr", ")", "return", "attr" ]
Wrap a function as a class attribute. This differs from attribute by identifying attributes explicitly listed in a class definition rather than those only defined on instances of a class.
[ "Wrap", "a", "function", "as", "a", "class", "attribute", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/decorators.py#L39-L49
kevinconway/iface
iface/decorators.py
method
def method(func): """Wrap a function as a method.""" attr = abc.abstractmethod(func) attr.__imethod__ = True return attr
python
def method(func): """Wrap a function as a method.""" attr = abc.abstractmethod(func) attr.__imethod__ = True return attr
[ "def", "method", "(", "func", ")", ":", "attr", "=", "abc", ".", "abstractmethod", "(", "func", ")", "attr", ".", "__imethod__", "=", "True", "return", "attr" ]
Wrap a function as a method.
[ "Wrap", "a", "function", "as", "a", "method", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/decorators.py#L52-L56
kevinconway/iface
iface/decorators.py
classmethod
def classmethod(func): """Wrap a function as a classmethod. This applies the classmethod decorator. """ attr = abc.abstractmethod(func) attr.__iclassmethod__ = True attr = _classmethod(attr) return attr
python
def classmethod(func): """Wrap a function as a classmethod. This applies the classmethod decorator. """ attr = abc.abstractmethod(func) attr.__iclassmethod__ = True attr = _classmethod(attr) return attr
[ "def", "classmethod", "(", "func", ")", ":", "attr", "=", "abc", ".", "abstractmethod", "(", "func", ")", "attr", ".", "__iclassmethod__", "=", "True", "attr", "=", "_classmethod", "(", "attr", ")", "return", "attr" ]
Wrap a function as a classmethod. This applies the classmethod decorator.
[ "Wrap", "a", "function", "as", "a", "classmethod", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/decorators.py#L59-L67
guillermo-carrasco/bcbio-nextgen-monitor
bcbio_monitor/config/__init__.py
parse_config
def parse_config(config_file): """Parse a YAML configuration file""" try: with open(config_file, 'r') as f: return yaml.load(f) except IOError: print "Configuration file {} not found or not readable.".format(config_file) raise
python
def parse_config(config_file): """Parse a YAML configuration file""" try: with open(config_file, 'r') as f: return yaml.load(f) except IOError: print "Configuration file {} not found or not readable.".format(config_file) raise
[ "def", "parse_config", "(", "config_file", ")", ":", "try", ":", "with", "open", "(", "config_file", ",", "'r'", ")", "as", "f", ":", "return", "yaml", ".", "load", "(", "f", ")", "except", "IOError", ":", "print", "\"Configuration file {} not found or not readable.\"", ".", "format", "(", "config_file", ")", "raise" ]
Parse a YAML configuration file
[ "Parse", "a", "YAML", "configuration", "file" ]
train
https://github.com/guillermo-carrasco/bcbio-nextgen-monitor/blob/6d059154d774140e1fd03a0e3625f607cef06f5a/bcbio_monitor/config/__init__.py#L4-L11
kevinconway/iface
iface/checks.py
_ensure_ifaces_tuple
def _ensure_ifaces_tuple(ifaces): """Convert to a tuple of interfaces and raise if not interfaces.""" try: ifaces = tuple(ifaces) except TypeError: ifaces = (ifaces,) for iface in ifaces: if not _issubclass(iface, ibc.Iface): raise TypeError('Can only compare against interfaces.') return ifaces
python
def _ensure_ifaces_tuple(ifaces): """Convert to a tuple of interfaces and raise if not interfaces.""" try: ifaces = tuple(ifaces) except TypeError: ifaces = (ifaces,) for iface in ifaces: if not _issubclass(iface, ibc.Iface): raise TypeError('Can only compare against interfaces.') return ifaces
[ "def", "_ensure_ifaces_tuple", "(", "ifaces", ")", ":", "try", ":", "ifaces", "=", "tuple", "(", "ifaces", ")", "except", "TypeError", ":", "ifaces", "=", "(", "ifaces", ",", ")", "for", "iface", "in", "ifaces", ":", "if", "not", "_issubclass", "(", "iface", ",", "ibc", ".", "Iface", ")", ":", "raise", "TypeError", "(", "'Can only compare against interfaces.'", ")", "return", "ifaces" ]
Convert to a tuple of interfaces and raise if not interfaces.
[ "Convert", "to", "a", "tuple", "of", "interfaces", "and", "raise", "if", "not", "interfaces", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/checks.py#L9-L25
kevinconway/iface
iface/checks.py
_check_for_definition
def _check_for_definition(iface, cls, tag, defines): """Check for a valid definition of a value. Args: iface (Iface): An Iface specification. cls (type): Some type to check for a definition. tag (str): The name of the tag attribute used to mark the abstract methods. defines (callable): A callable that accepts an attribute and returns True if the attribute is a valid definition. Returns: bool: Whether or not the definition is found. """ attributes = ( attr for attr in iface.__abstractmethods__ if hasattr(getattr(iface, attr), tag) ) for attribute in attributes: for node in cls.__mro__: if hasattr(node, attribute) and defines(getattr(node, attribute)): return True try: attribute return False except NameError: # Pass the test if the loop was never executed. This indicates there # were no iface elements defined in the search. return True
python
def _check_for_definition(iface, cls, tag, defines): """Check for a valid definition of a value. Args: iface (Iface): An Iface specification. cls (type): Some type to check for a definition. tag (str): The name of the tag attribute used to mark the abstract methods. defines (callable): A callable that accepts an attribute and returns True if the attribute is a valid definition. Returns: bool: Whether or not the definition is found. """ attributes = ( attr for attr in iface.__abstractmethods__ if hasattr(getattr(iface, attr), tag) ) for attribute in attributes: for node in cls.__mro__: if hasattr(node, attribute) and defines(getattr(node, attribute)): return True try: attribute return False except NameError: # Pass the test if the loop was never executed. This indicates there # were no iface elements defined in the search. return True
[ "def", "_check_for_definition", "(", "iface", ",", "cls", ",", "tag", ",", "defines", ")", ":", "attributes", "=", "(", "attr", "for", "attr", "in", "iface", ".", "__abstractmethods__", "if", "hasattr", "(", "getattr", "(", "iface", ",", "attr", ")", ",", "tag", ")", ")", "for", "attribute", "in", "attributes", ":", "for", "node", "in", "cls", ".", "__mro__", ":", "if", "hasattr", "(", "node", ",", "attribute", ")", "and", "defines", "(", "getattr", "(", "node", ",", "attribute", ")", ")", ":", "return", "True", "try", ":", "attribute", "return", "False", "except", "NameError", ":", "# Pass the test if the loop was never executed. This indicates there", "# were no iface elements defined in the search.", "return", "True" ]
Check for a valid definition of a value. Args: iface (Iface): An Iface specification. cls (type): Some type to check for a definition. tag (str): The name of the tag attribute used to mark the abstract methods. defines (callable): A callable that accepts an attribute and returns True if the attribute is a valid definition. Returns: bool: Whether or not the definition is found.
[ "Check", "for", "a", "valid", "definition", "of", "a", "value", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/checks.py#L28-L64
kevinconway/iface
iface/checks.py
issubclass
def issubclass(cls, ifaces): """Check if the given class is an implementation of the given iface.""" ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: return all(( _check_for_definition( iface, cls, '__iclassattribute__', _is_attribute, ), _check_for_definition( iface, cls, '__iproperty__', _is_property, ), _check_for_definition( iface, cls, '__imethod__', _is_method, ), _check_for_definition( iface, cls, '__iclassmethod__', _is_classmethod, ), ))
python
def issubclass(cls, ifaces): """Check if the given class is an implementation of the given iface.""" ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: return all(( _check_for_definition( iface, cls, '__iclassattribute__', _is_attribute, ), _check_for_definition( iface, cls, '__iproperty__', _is_property, ), _check_for_definition( iface, cls, '__imethod__', _is_method, ), _check_for_definition( iface, cls, '__iclassmethod__', _is_classmethod, ), ))
[ "def", "issubclass", "(", "cls", ",", "ifaces", ")", ":", "ifaces", "=", "_ensure_ifaces_tuple", "(", "ifaces", ")", "for", "iface", "in", "ifaces", ":", "return", "all", "(", "(", "_check_for_definition", "(", "iface", ",", "cls", ",", "'__iclassattribute__'", ",", "_is_attribute", ",", ")", ",", "_check_for_definition", "(", "iface", ",", "cls", ",", "'__iproperty__'", ",", "_is_property", ",", ")", ",", "_check_for_definition", "(", "iface", ",", "cls", ",", "'__imethod__'", ",", "_is_method", ",", ")", ",", "_check_for_definition", "(", "iface", ",", "cls", ",", "'__iclassmethod__'", ",", "_is_classmethod", ",", ")", ",", ")", ")" ]
Check if the given class is an implementation of the given iface.
[ "Check", "if", "the", "given", "class", "is", "an", "implementation", "of", "the", "given", "iface", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/checks.py#L91-L121
kevinconway/iface
iface/checks.py
isinstance
def isinstance(instance, ifaces): """Check if a given instance is an implementation of the interface.""" ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: attributes = ( attr for attr in iface.__abstractmethods__ if hasattr(getattr(iface, attr), '__iattribute__') ) for attribute in attributes: if not hasattr(instance, attribute): return False if not issubclass(type(instance), ifaces): return False return True
python
def isinstance(instance, ifaces): """Check if a given instance is an implementation of the interface.""" ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: attributes = ( attr for attr in iface.__abstractmethods__ if hasattr(getattr(iface, attr), '__iattribute__') ) for attribute in attributes: if not hasattr(instance, attribute): return False if not issubclass(type(instance), ifaces): return False return True
[ "def", "isinstance", "(", "instance", ",", "ifaces", ")", ":", "ifaces", "=", "_ensure_ifaces_tuple", "(", "ifaces", ")", "for", "iface", "in", "ifaces", ":", "attributes", "=", "(", "attr", "for", "attr", "in", "iface", ".", "__abstractmethods__", "if", "hasattr", "(", "getattr", "(", "iface", ",", "attr", ")", ",", "'__iattribute__'", ")", ")", "for", "attribute", "in", "attributes", ":", "if", "not", "hasattr", "(", "instance", ",", "attribute", ")", ":", "return", "False", "if", "not", "issubclass", "(", "type", "(", "instance", ")", ",", "ifaces", ")", ":", "return", "False", "return", "True" ]
Check if a given instance is an implementation of the interface.
[ "Check", "if", "a", "given", "instance", "is", "an", "implementation", "of", "the", "interface", "." ]
train
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/checks.py#L124-L144
mirca/vaneska
vaneska/photometry.py
PSFPhotometry.fit
def fit(self, pixel_flux, data_placeholder, var_list, session, feed_dict={}): """ Parameters ---------- pixel_flux : ndarray The TPF-like pixel flux time series. The first dimension must represent time, and the remaining two dimensions must represent the spatial dimensions. data_placeholder : tf.placeholder A placeholder which will be used to pass the n-th time stamp to `self.optimizer.minimize`. var_list : list The list of parameters (as tensors) to optimize for. session : instance of tf.Session feed_dict : dict Dictionary of additional arguments used to feed the loss function. """ opt_params = [] cadences = range(pixel_flux.shape[0]) for n in tqdm.tqdm(cadences): feed_dict[data_placeholder] = pixel_flux[n] self.optimizer.minimize(session=session, feed_dict=feed_dict) opt_params.append([session.run(var) for var in var_list]) return opt_params
python
def fit(self, pixel_flux, data_placeholder, var_list, session, feed_dict={}): """ Parameters ---------- pixel_flux : ndarray The TPF-like pixel flux time series. The first dimension must represent time, and the remaining two dimensions must represent the spatial dimensions. data_placeholder : tf.placeholder A placeholder which will be used to pass the n-th time stamp to `self.optimizer.minimize`. var_list : list The list of parameters (as tensors) to optimize for. session : instance of tf.Session feed_dict : dict Dictionary of additional arguments used to feed the loss function. """ opt_params = [] cadences = range(pixel_flux.shape[0]) for n in tqdm.tqdm(cadences): feed_dict[data_placeholder] = pixel_flux[n] self.optimizer.minimize(session=session, feed_dict=feed_dict) opt_params.append([session.run(var) for var in var_list]) return opt_params
[ "def", "fit", "(", "self", ",", "pixel_flux", ",", "data_placeholder", ",", "var_list", ",", "session", ",", "feed_dict", "=", "{", "}", ")", ":", "opt_params", "=", "[", "]", "cadences", "=", "range", "(", "pixel_flux", ".", "shape", "[", "0", "]", ")", "for", "n", "in", "tqdm", ".", "tqdm", "(", "cadences", ")", ":", "feed_dict", "[", "data_placeholder", "]", "=", "pixel_flux", "[", "n", "]", "self", ".", "optimizer", ".", "minimize", "(", "session", "=", "session", ",", "feed_dict", "=", "feed_dict", ")", "opt_params", ".", "append", "(", "[", "session", ".", "run", "(", "var", ")", "for", "var", "in", "var_list", "]", ")", "return", "opt_params" ]
Parameters ---------- pixel_flux : ndarray The TPF-like pixel flux time series. The first dimension must represent time, and the remaining two dimensions must represent the spatial dimensions. data_placeholder : tf.placeholder A placeholder which will be used to pass the n-th time stamp to `self.optimizer.minimize`. var_list : list The list of parameters (as tensors) to optimize for. session : instance of tf.Session feed_dict : dict Dictionary of additional arguments used to feed the loss function.
[ "Parameters", "----------", "pixel_flux", ":", "ndarray", "The", "TPF", "-", "like", "pixel", "flux", "time", "series", ".", "The", "first", "dimension", "must", "represent", "time", "and", "the", "remaining", "two", "dimensions", "must", "represent", "the", "spatial", "dimensions", ".", "data_placeholder", ":", "tf", ".", "placeholder", "A", "placeholder", "which", "will", "be", "used", "to", "pass", "the", "n", "-", "th", "time", "stamp", "to", "self", ".", "optimizer", ".", "minimize", ".", "var_list", ":", "list", "The", "list", "of", "parameters", "(", "as", "tensors", ")", "to", "optimize", "for", ".", "session", ":", "instance", "of", "tf", ".", "Session", "feed_dict", ":", "dict", "Dictionary", "of", "additional", "arguments", "used", "to", "feed", "the", "loss", "function", "." ]
train
https://github.com/mirca/vaneska/blob/9bbf0b16957ec765e5f30872c8d22470c66bfd83/vaneska/photometry.py#L21-L46
moonso/extract_vcf
extract_vcf/plugin.py
Plugin.get_entry
def get_entry(self, variant_line=None, variant_dict=None, raw_entry=None, vcf_header=None, csq_format=None, dict_key=None, individual_id=None): """Return the splitted entry from variant information Args: variant_line (str): A vcf formated variant line vcf_header (list): A list with the vcf header line csq_format (list): A list with the csq headers family_id (str): The family id that should be searched. If no id the first family found will be used Returns: entry (list): A list with the splitted entry """ if not raw_entry: raw_entry = self.get_raw_entry( variant_line=variant_line, variant_dict=variant_dict, vcf_header=vcf_header, individual_id=individual_id, dict_key=dict_key ) entry = [] if raw_entry: if self.field in ['CHROM', 'POS', 'REF', 'QUAL']: # We know these fields allways has one entry entry = [raw_entry] elif self.field in ['ID', 'FILTER']: # We know ID is allways splitted on ';' entry = raw_entry.split(';') elif self.field == 'ALT': # We know ALT is allways splitted on ',' entry = raw_entry.split(',') elif self.field == 'FORMAT': entry = raw_entry.split(':') elif self.field == 'INFO': # We are going to treat csq fields separately if self.info_key == 'CSQ': if not csq_format: raise IOError("If CSQ the csq format must be provided") if not self.csq_key: raise IOError("If CSQ a csq key must be provided") for i, head in enumerate(csq_format): if head == self.csq_key: # This is the csq entry we are looking for csq_column = i # CSQ entries are allways splitted on ',' for csq_entry in raw_entry.split(','): entry += split_strings(csq_entry.split('|')[csq_column], self.separators) else: if self.dict_entry: separators = self.separators[2:] else: separators = self.separators entry = split_strings(raw_entry, separators) elif self.field == 'sample_id': if not self.separators: entry = split_strings(raw_entry, '/') #If variant calls are phased we need to split on '|' if len(entry) == 1: entry = split_strings(raw_entry, '|') else: entry = split_strings(raw_entry, self.separators) return entry
python
def get_entry(self, variant_line=None, variant_dict=None, raw_entry=None, vcf_header=None, csq_format=None, dict_key=None, individual_id=None): """Return the splitted entry from variant information Args: variant_line (str): A vcf formated variant line vcf_header (list): A list with the vcf header line csq_format (list): A list with the csq headers family_id (str): The family id that should be searched. If no id the first family found will be used Returns: entry (list): A list with the splitted entry """ if not raw_entry: raw_entry = self.get_raw_entry( variant_line=variant_line, variant_dict=variant_dict, vcf_header=vcf_header, individual_id=individual_id, dict_key=dict_key ) entry = [] if raw_entry: if self.field in ['CHROM', 'POS', 'REF', 'QUAL']: # We know these fields allways has one entry entry = [raw_entry] elif self.field in ['ID', 'FILTER']: # We know ID is allways splitted on ';' entry = raw_entry.split(';') elif self.field == 'ALT': # We know ALT is allways splitted on ',' entry = raw_entry.split(',') elif self.field == 'FORMAT': entry = raw_entry.split(':') elif self.field == 'INFO': # We are going to treat csq fields separately if self.info_key == 'CSQ': if not csq_format: raise IOError("If CSQ the csq format must be provided") if not self.csq_key: raise IOError("If CSQ a csq key must be provided") for i, head in enumerate(csq_format): if head == self.csq_key: # This is the csq entry we are looking for csq_column = i # CSQ entries are allways splitted on ',' for csq_entry in raw_entry.split(','): entry += split_strings(csq_entry.split('|')[csq_column], self.separators) else: if self.dict_entry: separators = self.separators[2:] else: separators = self.separators entry = split_strings(raw_entry, separators) elif self.field == 'sample_id': if not self.separators: entry = split_strings(raw_entry, '/') #If variant calls are phased we need to split on '|' if len(entry) == 1: entry = split_strings(raw_entry, '|') else: entry = split_strings(raw_entry, self.separators) return entry
[ "def", "get_entry", "(", "self", ",", "variant_line", "=", "None", ",", "variant_dict", "=", "None", ",", "raw_entry", "=", "None", ",", "vcf_header", "=", "None", ",", "csq_format", "=", "None", ",", "dict_key", "=", "None", ",", "individual_id", "=", "None", ")", ":", "if", "not", "raw_entry", ":", "raw_entry", "=", "self", ".", "get_raw_entry", "(", "variant_line", "=", "variant_line", ",", "variant_dict", "=", "variant_dict", ",", "vcf_header", "=", "vcf_header", ",", "individual_id", "=", "individual_id", ",", "dict_key", "=", "dict_key", ")", "entry", "=", "[", "]", "if", "raw_entry", ":", "if", "self", ".", "field", "in", "[", "'CHROM'", ",", "'POS'", ",", "'REF'", ",", "'QUAL'", "]", ":", "# We know these fields allways has one entry", "entry", "=", "[", "raw_entry", "]", "elif", "self", ".", "field", "in", "[", "'ID'", ",", "'FILTER'", "]", ":", "# We know ID is allways splitted on ';'", "entry", "=", "raw_entry", ".", "split", "(", "';'", ")", "elif", "self", ".", "field", "==", "'ALT'", ":", "# We know ALT is allways splitted on ','", "entry", "=", "raw_entry", ".", "split", "(", "','", ")", "elif", "self", ".", "field", "==", "'FORMAT'", ":", "entry", "=", "raw_entry", ".", "split", "(", "':'", ")", "elif", "self", ".", "field", "==", "'INFO'", ":", "# We are going to treat csq fields separately", "if", "self", ".", "info_key", "==", "'CSQ'", ":", "if", "not", "csq_format", ":", "raise", "IOError", "(", "\"If CSQ the csq format must be provided\"", ")", "if", "not", "self", ".", "csq_key", ":", "raise", "IOError", "(", "\"If CSQ a csq key must be provided\"", ")", "for", "i", ",", "head", "in", "enumerate", "(", "csq_format", ")", ":", "if", "head", "==", "self", ".", "csq_key", ":", "# This is the csq entry we are looking for", "csq_column", "=", "i", "# CSQ entries are allways splitted on ','", "for", "csq_entry", "in", "raw_entry", ".", "split", "(", "','", ")", ":", "entry", "+=", "split_strings", "(", "csq_entry", ".", "split", "(", "'|'", ")", "[", "csq_column", "]", ",", "self", ".", "separators", ")", "else", ":", "if", "self", ".", "dict_entry", ":", "separators", "=", "self", ".", "separators", "[", "2", ":", "]", "else", ":", "separators", "=", "self", ".", "separators", "entry", "=", "split_strings", "(", "raw_entry", ",", "separators", ")", "elif", "self", ".", "field", "==", "'sample_id'", ":", "if", "not", "self", ".", "separators", ":", "entry", "=", "split_strings", "(", "raw_entry", ",", "'/'", ")", "#If variant calls are phased we need to split on '|'", "if", "len", "(", "entry", ")", "==", "1", ":", "entry", "=", "split_strings", "(", "raw_entry", ",", "'|'", ")", "else", ":", "entry", "=", "split_strings", "(", "raw_entry", ",", "self", ".", "separators", ")", "return", "entry" ]
Return the splitted entry from variant information Args: variant_line (str): A vcf formated variant line vcf_header (list): A list with the vcf header line csq_format (list): A list with the csq headers family_id (str): The family id that should be searched. If no id the first family found will be used Returns: entry (list): A list with the splitted entry
[ "Return", "the", "splitted", "entry", "from", "variant", "information", "Args", ":", "variant_line", "(", "str", ")", ":", "A", "vcf", "formated", "variant", "line", "vcf_header", "(", "list", ")", ":", "A", "list", "with", "the", "vcf", "header", "line", "csq_format", "(", "list", ")", ":", "A", "list", "with", "the", "csq", "headers", "family_id", "(", "str", ")", ":", "The", "family", "id", "that", "should", "be", "searched", ".", "If", "no", "id", "the", "first", "family", "found", "will", "be", "used", "Returns", ":", "entry", "(", "list", ")", ":", "A", "list", "with", "the", "splitted", "entry" ]
train
https://github.com/moonso/extract_vcf/blob/c8381b362fa6734cd2ee65ef260738868d981aaf/extract_vcf/plugin.py#L82-L154
moonso/extract_vcf
extract_vcf/plugin.py
Plugin.get_raw_entry
def get_raw_entry(self, variant_line=None, variant_dict=None, vcf_header=None, individual_id=None, dict_key=None): """Return the raw entry from the vcf field If no entry was found return None Args: variant_line (str): A vcf formated variant line vcf_header (list): A list with the vcf header line individual_id (str): The individual id to get gt call Returns: The raw entry found in variant line """ if variant_line: variant_line = variant_line.rstrip().split() entry = None if self.field == 'CHROM': if variant_line: entry = variant_line[0] elif variant_dict: entry = variant_dict['CHROM'] elif self.field == 'POS': if variant_line: entry = variant_line[1] elif variant_dict: entry = variant_dict['POS'] elif self.field == 'ID': if variant_line: entry = variant_line[2] elif variant_dict: entry = variant_dict['ID'] elif self.field == 'REF': if variant_line: entry = variant_line[3] elif variant_dict: entry = variant_dict['REF'] elif self.field == 'ALT': if variant_line: entry = variant_line[4] elif variant_dict: entry = variant_dict['ALT'] elif self.field == 'QUAL': if variant_line: entry = variant_line[5] elif variant_dict: entry = variant_dict['QUAL'] elif self.field == 'FILTER': if variant_line: entry = variant_line[6] elif variant_dict: entry = variant_dict['FILTER'] elif self.field == 'INFO': if variant_line: for info_annotation in variant_line[7].split(';'): splitted_annotation = info_annotation.split('=') if self.info_key == splitted_annotation[0]: if len(splitted_annotation) == 2: entry = splitted_annotation[1] elif variant_dict: entry = variant_dict.get('info_dict',{}).get(self.info_key) if self.dict_entry and entry: #First we split the "dictionaries" first_split = entry.split(self.separators[0]) for annotation in first_split: # Then we search for the dict key splitted_entry = annotation.split(self.separators[1]) key = splitted_entry[0] value = splitted_entry[1] if dict_key: if key == dict_key: entry = value #If no key we just return the last entry else: entry = value elif self.field == 'FORMAT': if variant_line: entry = variant_line[8] elif variant_dict: entry = variant_dict['FORMAT'] elif self.field == "sample_id": if not individual_id: raise IOError("If 'sample_id' a individual id must be provided") if not self.gt_key: raise IOError("If 'sample_id' a genotype key must be provided") if variant_line: if not vcf_header: raise IOError("If 'sample_id' the vcf header must be provided") format_info = variant_line[8] for i, head in enumerate(vcf_header): if head == individual_id: raw_gt_call = variant_line[i] elif variant_dict: format_info = variant_dict['FORMAT'] raw_gt_call = variant_dict[individual_id] entry_dict = dict(zip( format_info.split(':'), raw_gt_call.split(':') )) entry = entry_dict.get(self.gt_key, '.') return entry
python
def get_raw_entry(self, variant_line=None, variant_dict=None, vcf_header=None, individual_id=None, dict_key=None): """Return the raw entry from the vcf field If no entry was found return None Args: variant_line (str): A vcf formated variant line vcf_header (list): A list with the vcf header line individual_id (str): The individual id to get gt call Returns: The raw entry found in variant line """ if variant_line: variant_line = variant_line.rstrip().split() entry = None if self.field == 'CHROM': if variant_line: entry = variant_line[0] elif variant_dict: entry = variant_dict['CHROM'] elif self.field == 'POS': if variant_line: entry = variant_line[1] elif variant_dict: entry = variant_dict['POS'] elif self.field == 'ID': if variant_line: entry = variant_line[2] elif variant_dict: entry = variant_dict['ID'] elif self.field == 'REF': if variant_line: entry = variant_line[3] elif variant_dict: entry = variant_dict['REF'] elif self.field == 'ALT': if variant_line: entry = variant_line[4] elif variant_dict: entry = variant_dict['ALT'] elif self.field == 'QUAL': if variant_line: entry = variant_line[5] elif variant_dict: entry = variant_dict['QUAL'] elif self.field == 'FILTER': if variant_line: entry = variant_line[6] elif variant_dict: entry = variant_dict['FILTER'] elif self.field == 'INFO': if variant_line: for info_annotation in variant_line[7].split(';'): splitted_annotation = info_annotation.split('=') if self.info_key == splitted_annotation[0]: if len(splitted_annotation) == 2: entry = splitted_annotation[1] elif variant_dict: entry = variant_dict.get('info_dict',{}).get(self.info_key) if self.dict_entry and entry: #First we split the "dictionaries" first_split = entry.split(self.separators[0]) for annotation in first_split: # Then we search for the dict key splitted_entry = annotation.split(self.separators[1]) key = splitted_entry[0] value = splitted_entry[1] if dict_key: if key == dict_key: entry = value #If no key we just return the last entry else: entry = value elif self.field == 'FORMAT': if variant_line: entry = variant_line[8] elif variant_dict: entry = variant_dict['FORMAT'] elif self.field == "sample_id": if not individual_id: raise IOError("If 'sample_id' a individual id must be provided") if not self.gt_key: raise IOError("If 'sample_id' a genotype key must be provided") if variant_line: if not vcf_header: raise IOError("If 'sample_id' the vcf header must be provided") format_info = variant_line[8] for i, head in enumerate(vcf_header): if head == individual_id: raw_gt_call = variant_line[i] elif variant_dict: format_info = variant_dict['FORMAT'] raw_gt_call = variant_dict[individual_id] entry_dict = dict(zip( format_info.split(':'), raw_gt_call.split(':') )) entry = entry_dict.get(self.gt_key, '.') return entry
[ "def", "get_raw_entry", "(", "self", ",", "variant_line", "=", "None", ",", "variant_dict", "=", "None", ",", "vcf_header", "=", "None", ",", "individual_id", "=", "None", ",", "dict_key", "=", "None", ")", ":", "if", "variant_line", ":", "variant_line", "=", "variant_line", ".", "rstrip", "(", ")", ".", "split", "(", ")", "entry", "=", "None", "if", "self", ".", "field", "==", "'CHROM'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "0", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'CHROM'", "]", "elif", "self", ".", "field", "==", "'POS'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "1", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'POS'", "]", "elif", "self", ".", "field", "==", "'ID'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "2", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'ID'", "]", "elif", "self", ".", "field", "==", "'REF'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "3", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'REF'", "]", "elif", "self", ".", "field", "==", "'ALT'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "4", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'ALT'", "]", "elif", "self", ".", "field", "==", "'QUAL'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "5", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'QUAL'", "]", "elif", "self", ".", "field", "==", "'FILTER'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "6", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'FILTER'", "]", "elif", "self", ".", "field", "==", "'INFO'", ":", "if", "variant_line", ":", "for", "info_annotation", "in", "variant_line", "[", "7", "]", ".", "split", "(", "';'", ")", ":", "splitted_annotation", "=", "info_annotation", ".", "split", "(", "'='", ")", "if", "self", ".", "info_key", "==", "splitted_annotation", "[", "0", "]", ":", "if", "len", "(", "splitted_annotation", ")", "==", "2", ":", "entry", "=", "splitted_annotation", "[", "1", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", ".", "get", "(", "'info_dict'", ",", "{", "}", ")", ".", "get", "(", "self", ".", "info_key", ")", "if", "self", ".", "dict_entry", "and", "entry", ":", "#First we split the \"dictionaries\"", "first_split", "=", "entry", ".", "split", "(", "self", ".", "separators", "[", "0", "]", ")", "for", "annotation", "in", "first_split", ":", "# Then we search for the dict key", "splitted_entry", "=", "annotation", ".", "split", "(", "self", ".", "separators", "[", "1", "]", ")", "key", "=", "splitted_entry", "[", "0", "]", "value", "=", "splitted_entry", "[", "1", "]", "if", "dict_key", ":", "if", "key", "==", "dict_key", ":", "entry", "=", "value", "#If no key we just return the last entry", "else", ":", "entry", "=", "value", "elif", "self", ".", "field", "==", "'FORMAT'", ":", "if", "variant_line", ":", "entry", "=", "variant_line", "[", "8", "]", "elif", "variant_dict", ":", "entry", "=", "variant_dict", "[", "'FORMAT'", "]", "elif", "self", ".", "field", "==", "\"sample_id\"", ":", "if", "not", "individual_id", ":", "raise", "IOError", "(", "\"If 'sample_id' a individual id must be provided\"", ")", "if", "not", "self", ".", "gt_key", ":", "raise", "IOError", "(", "\"If 'sample_id' a genotype key must be provided\"", ")", "if", "variant_line", ":", "if", "not", "vcf_header", ":", "raise", "IOError", "(", "\"If 'sample_id' the vcf header must be provided\"", ")", "format_info", "=", "variant_line", "[", "8", "]", "for", "i", ",", "head", "in", "enumerate", "(", "vcf_header", ")", ":", "if", "head", "==", "individual_id", ":", "raw_gt_call", "=", "variant_line", "[", "i", "]", "elif", "variant_dict", ":", "format_info", "=", "variant_dict", "[", "'FORMAT'", "]", "raw_gt_call", "=", "variant_dict", "[", "individual_id", "]", "entry_dict", "=", "dict", "(", "zip", "(", "format_info", ".", "split", "(", "':'", ")", ",", "raw_gt_call", ".", "split", "(", "':'", ")", ")", ")", "entry", "=", "entry_dict", ".", "get", "(", "self", ".", "gt_key", ",", "'.'", ")", "return", "entry" ]
Return the raw entry from the vcf field If no entry was found return None Args: variant_line (str): A vcf formated variant line vcf_header (list): A list with the vcf header line individual_id (str): The individual id to get gt call Returns: The raw entry found in variant line
[ "Return", "the", "raw", "entry", "from", "the", "vcf", "field", "If", "no", "entry", "was", "found", "return", "None", "Args", ":", "variant_line", "(", "str", ")", ":", "A", "vcf", "formated", "variant", "line", "vcf_header", "(", "list", ")", ":", "A", "list", "with", "the", "vcf", "header", "line", "individual_id", "(", "str", ")", ":", "The", "individual", "id", "to", "get", "gt", "call", "Returns", ":", "The", "raw", "entry", "found", "in", "variant", "line" ]
train
https://github.com/moonso/extract_vcf/blob/c8381b362fa6734cd2ee65ef260738868d981aaf/extract_vcf/plugin.py#L156-L274
moonso/extract_vcf
extract_vcf/plugin.py
Plugin.get_value
def get_value(self, variant_line=None, variant_dict=None, entry=None, raw_entry=None, vcf_header=None, csq_format=None, dict_key=None, individual_id=None): """ Return the value as specified by plugin Get value will return one value or None if no correct value is found. Arguments: variant_line (str): A vcf variant line variant_dict (dict): A variant dictionary entry (list): A splitted entry raw_entry (str): The raw entry from the vcf file vcf_header (list): The vcf header line with sample ids csq_format (list): The CSQ format family_id (str): The family id individual_id (str): The individual id Returns: value (str): A string that represents the correct value """ value = None raw_entry = self.get_raw_entry( variant_line = variant_line, variant_dict = variant_dict, vcf_header=vcf_header, individual_id=individual_id, dict_key=dict_key ) # If data type is flag we only need to check if any entry exists if self.data_type == 'flag': if self.field == 'INFO': if variant_line: for info_entry in variant_line.split()[7].split(';'): if self.info_key == info_entry.split('=')[0]: value = True elif variant_dict: if self.info_key in variant_dict.get('info_dict',{}): value = True else: if raw_entry != '.': value = True # If we have a record rule we need to return the correct value elif raw_entry: # If there was no raw entry we will return None if self.record_rule: if self.data_type == 'string': if self.record_rule == 'max': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1), reverse=True ) if self.record_rule == 'min': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1) ) for string_rule in sorted_strings: if string_rule[0].lower() in raw_entry.lower(): value = string_rule[0] break else: typed_annotations = [] for value in self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id): if self.data_type == 'float': try: typed_annotations.append(float(value)) except ValueError: pass elif self.data_type == 'integer': try: typed_annotations.append(int(value)) except ValueError: pass if typed_annotations: if self.record_rule == 'max': value = max(typed_annotations) elif self.record_rule == 'min': value = min(typed_annotations) else: value = None # If no record rule is given we return the raw annotation # Here the data_type is not flag, and there is no record rule # We know that there exists a raw annotation else: # We will just return the first annotation found value = self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id)[0] if self.data_type == 'float': try: value = float(value) except ValueError: pass elif self.data_type == 'integer': try: value = int(value) except ValueError: pass return value
python
def get_value(self, variant_line=None, variant_dict=None, entry=None, raw_entry=None, vcf_header=None, csq_format=None, dict_key=None, individual_id=None): """ Return the value as specified by plugin Get value will return one value or None if no correct value is found. Arguments: variant_line (str): A vcf variant line variant_dict (dict): A variant dictionary entry (list): A splitted entry raw_entry (str): The raw entry from the vcf file vcf_header (list): The vcf header line with sample ids csq_format (list): The CSQ format family_id (str): The family id individual_id (str): The individual id Returns: value (str): A string that represents the correct value """ value = None raw_entry = self.get_raw_entry( variant_line = variant_line, variant_dict = variant_dict, vcf_header=vcf_header, individual_id=individual_id, dict_key=dict_key ) # If data type is flag we only need to check if any entry exists if self.data_type == 'flag': if self.field == 'INFO': if variant_line: for info_entry in variant_line.split()[7].split(';'): if self.info_key == info_entry.split('=')[0]: value = True elif variant_dict: if self.info_key in variant_dict.get('info_dict',{}): value = True else: if raw_entry != '.': value = True # If we have a record rule we need to return the correct value elif raw_entry: # If there was no raw entry we will return None if self.record_rule: if self.data_type == 'string': if self.record_rule == 'max': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1), reverse=True ) if self.record_rule == 'min': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1) ) for string_rule in sorted_strings: if string_rule[0].lower() in raw_entry.lower(): value = string_rule[0] break else: typed_annotations = [] for value in self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id): if self.data_type == 'float': try: typed_annotations.append(float(value)) except ValueError: pass elif self.data_type == 'integer': try: typed_annotations.append(int(value)) except ValueError: pass if typed_annotations: if self.record_rule == 'max': value = max(typed_annotations) elif self.record_rule == 'min': value = min(typed_annotations) else: value = None # If no record rule is given we return the raw annotation # Here the data_type is not flag, and there is no record rule # We know that there exists a raw annotation else: # We will just return the first annotation found value = self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id)[0] if self.data_type == 'float': try: value = float(value) except ValueError: pass elif self.data_type == 'integer': try: value = int(value) except ValueError: pass return value
[ "def", "get_value", "(", "self", ",", "variant_line", "=", "None", ",", "variant_dict", "=", "None", ",", "entry", "=", "None", ",", "raw_entry", "=", "None", ",", "vcf_header", "=", "None", ",", "csq_format", "=", "None", ",", "dict_key", "=", "None", ",", "individual_id", "=", "None", ")", ":", "value", "=", "None", "raw_entry", "=", "self", ".", "get_raw_entry", "(", "variant_line", "=", "variant_line", ",", "variant_dict", "=", "variant_dict", ",", "vcf_header", "=", "vcf_header", ",", "individual_id", "=", "individual_id", ",", "dict_key", "=", "dict_key", ")", "# If data type is flag we only need to check if any entry exists", "if", "self", ".", "data_type", "==", "'flag'", ":", "if", "self", ".", "field", "==", "'INFO'", ":", "if", "variant_line", ":", "for", "info_entry", "in", "variant_line", ".", "split", "(", ")", "[", "7", "]", ".", "split", "(", "';'", ")", ":", "if", "self", ".", "info_key", "==", "info_entry", ".", "split", "(", "'='", ")", "[", "0", "]", ":", "value", "=", "True", "elif", "variant_dict", ":", "if", "self", ".", "info_key", "in", "variant_dict", ".", "get", "(", "'info_dict'", ",", "{", "}", ")", ":", "value", "=", "True", "else", ":", "if", "raw_entry", "!=", "'.'", ":", "value", "=", "True", "# If we have a record rule we need to return the correct value", "elif", "raw_entry", ":", "# If there was no raw entry we will return None", "if", "self", ".", "record_rule", ":", "if", "self", ".", "data_type", "==", "'string'", ":", "if", "self", ".", "record_rule", "==", "'max'", ":", "sorted_strings", "=", "sorted", "(", "self", ".", "string_rules", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "if", "self", ".", "record_rule", "==", "'min'", ":", "sorted_strings", "=", "sorted", "(", "self", ".", "string_rules", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", "for", "string_rule", "in", "sorted_strings", ":", "if", "string_rule", "[", "0", "]", ".", "lower", "(", ")", "in", "raw_entry", ".", "lower", "(", ")", ":", "value", "=", "string_rule", "[", "0", "]", "break", "else", ":", "typed_annotations", "=", "[", "]", "for", "value", "in", "self", ".", "get_entry", "(", "raw_entry", "=", "raw_entry", ",", "vcf_header", "=", "vcf_header", ",", "csq_format", "=", "csq_format", ",", "dict_key", "=", "dict_key", ",", "individual_id", "=", "individual_id", ")", ":", "if", "self", ".", "data_type", "==", "'float'", ":", "try", ":", "typed_annotations", ".", "append", "(", "float", "(", "value", ")", ")", "except", "ValueError", ":", "pass", "elif", "self", ".", "data_type", "==", "'integer'", ":", "try", ":", "typed_annotations", ".", "append", "(", "int", "(", "value", ")", ")", "except", "ValueError", ":", "pass", "if", "typed_annotations", ":", "if", "self", ".", "record_rule", "==", "'max'", ":", "value", "=", "max", "(", "typed_annotations", ")", "elif", "self", ".", "record_rule", "==", "'min'", ":", "value", "=", "min", "(", "typed_annotations", ")", "else", ":", "value", "=", "None", "# If no record rule is given we return the raw annotation", "# Here the data_type is not flag, and there is no record rule", "# We know that there exists a raw annotation", "else", ":", "# We will just return the first annotation found", "value", "=", "self", ".", "get_entry", "(", "raw_entry", "=", "raw_entry", ",", "vcf_header", "=", "vcf_header", ",", "csq_format", "=", "csq_format", ",", "dict_key", "=", "dict_key", ",", "individual_id", "=", "individual_id", ")", "[", "0", "]", "if", "self", ".", "data_type", "==", "'float'", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "pass", "elif", "self", ".", "data_type", "==", "'integer'", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "pass", "return", "value" ]
Return the value as specified by plugin Get value will return one value or None if no correct value is found. Arguments: variant_line (str): A vcf variant line variant_dict (dict): A variant dictionary entry (list): A splitted entry raw_entry (str): The raw entry from the vcf file vcf_header (list): The vcf header line with sample ids csq_format (list): The CSQ format family_id (str): The family id individual_id (str): The individual id Returns: value (str): A string that represents the correct value
[ "Return", "the", "value", "as", "specified", "by", "plugin", "Get", "value", "will", "return", "one", "value", "or", "None", "if", "no", "correct", "value", "is", "found", ".", "Arguments", ":", "variant_line", "(", "str", ")", ":", "A", "vcf", "variant", "line", "variant_dict", "(", "dict", ")", ":", "A", "variant", "dictionary", "entry", "(", "list", ")", ":", "A", "splitted", "entry", "raw_entry", "(", "str", ")", ":", "The", "raw", "entry", "from", "the", "vcf", "file", "vcf_header", "(", "list", ")", ":", "The", "vcf", "header", "line", "with", "sample", "ids", "csq_format", "(", "list", ")", ":", "The", "CSQ", "format", "family_id", "(", "str", ")", ":", "The", "family", "id", "individual_id", "(", "str", ")", ":", "The", "individual", "id", "Returns", ":", "value", "(", "str", ")", ":", "A", "string", "that", "represents", "the", "correct", "value" ]
train
https://github.com/moonso/extract_vcf/blob/c8381b362fa6734cd2ee65ef260738868d981aaf/extract_vcf/plugin.py#L276-L401
Aluriak/tergraw
tergraw/tergraw.py
create_layout
def create_layout(graph, graphviz_prog=DEFAULT_GRAPHVIZ_PROG): """Return {node: position} for given graph""" graphviz_layout = graphutils.graphviz_layout(graph, prog=graphviz_prog) # print('GRAPHIZ LAYOUT:', graphviz_layout) layout = {k: (int(x // 10), int(y // 10)) for k, (x, y) in graphviz_layout.items()} # apply an offset for layouts to get all position >= 0 max_x = max(layout.values(), key=lambda t: t[0])[0] min_x = min(layout.values(), key=lambda t: t[0])[0] max_y = max(layout.values(), key=lambda t: t[1])[1] min_y = min(layout.values(), key=lambda t: t[1])[1] offset_x = - min(0, min_x) offset_y = - min(0, min_y) return { node: (offset_x + x, offset_y + y) for node, (x, y) in layout.items() }
python
def create_layout(graph, graphviz_prog=DEFAULT_GRAPHVIZ_PROG): """Return {node: position} for given graph""" graphviz_layout = graphutils.graphviz_layout(graph, prog=graphviz_prog) # print('GRAPHIZ LAYOUT:', graphviz_layout) layout = {k: (int(x // 10), int(y // 10)) for k, (x, y) in graphviz_layout.items()} # apply an offset for layouts to get all position >= 0 max_x = max(layout.values(), key=lambda t: t[0])[0] min_x = min(layout.values(), key=lambda t: t[0])[0] max_y = max(layout.values(), key=lambda t: t[1])[1] min_y = min(layout.values(), key=lambda t: t[1])[1] offset_x = - min(0, min_x) offset_y = - min(0, min_y) return { node: (offset_x + x, offset_y + y) for node, (x, y) in layout.items() }
[ "def", "create_layout", "(", "graph", ",", "graphviz_prog", "=", "DEFAULT_GRAPHVIZ_PROG", ")", ":", "graphviz_layout", "=", "graphutils", ".", "graphviz_layout", "(", "graph", ",", "prog", "=", "graphviz_prog", ")", "# print('GRAPHIZ LAYOUT:', graphviz_layout)", "layout", "=", "{", "k", ":", "(", "int", "(", "x", "//", "10", ")", ",", "int", "(", "y", "//", "10", ")", ")", "for", "k", ",", "(", "x", ",", "y", ")", "in", "graphviz_layout", ".", "items", "(", ")", "}", "# apply an offset for layouts to get all position >= 0", "max_x", "=", "max", "(", "layout", ".", "values", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", "[", "0", "]", "min_x", "=", "min", "(", "layout", ".", "values", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", "[", "0", "]", "max_y", "=", "max", "(", "layout", ".", "values", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "1", "]", ")", "[", "1", "]", "min_y", "=", "min", "(", "layout", ".", "values", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "1", "]", ")", "[", "1", "]", "offset_x", "=", "-", "min", "(", "0", ",", "min_x", ")", "offset_y", "=", "-", "min", "(", "0", ",", "min_y", ")", "return", "{", "node", ":", "(", "offset_x", "+", "x", ",", "offset_y", "+", "y", ")", "for", "node", ",", "(", "x", ",", "y", ")", "in", "layout", ".", "items", "(", ")", "}" ]
Return {node: position} for given graph
[ "Return", "{", "node", ":", "position", "}", "for", "given", "graph" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/tergraw.py#L33-L49
Aluriak/tergraw
tergraw/tergraw.py
pretty_view
def pretty_view(graph, oriented=False, construction=False, graphviz_prog=DEFAULT_GRAPHVIZ_PROG): """Yield strings, printable view of given graph""" layout = create_layout(graph, graphviz_prog=graphviz_prog) matrix_view = defaultdict(lambda: ' ') # Add the edge to the view # print('GRAPH EDGES:', tuple(graph.edges())) # print('LAYOUT:', layout) edges = ((layout[source], layout[target]) for source, target in graph.edges()) # print('EDGES:', tuple(edges)) for source, target in edges: previous_edge_char = None previous_position = source while source != target: for is_dir, transform, edge_char in DIRECTIONS: first_loop = previous_edge_char is None if is_dir(source, target): previous_source = source source = transform(source) if not first_loop: # first loop: no previous char char = CHARACTER[previous_edge_char, edge_char] if not isinstance(char, str): char = char.value matrix_view[previous_source] = char if construction: old = defaultdict(lambda: ' ', matrix_view) yield view.build(matrix_view) assert isinstance(matrix_view[previous_source], str) if source != target: previous_edge_char = edge_char previous_position = previous_source break # for loop ; don't test the remain directions if oriented: matrix_view[previous_position] = ORIENTATION[previous_edge_char, edge_char] if construction: yield view.build(matrix_view) # mark the place where nodes labels will be added # for node, coords in layout.items(): # matrix_view[coords] = node[0] # Add the node labels to the view # matrix_view = view.clean(matrix_view) for node, (x, y) in layout.items(): if len(node) == 1: matrix_view[x, y] = node continue row_min, row_max = (view.previous_unwrittable_on_row(matrix_view, (x, y)), view.next_unwrittable_on_row(matrix_view, (x, y))) col_min, col_max = (view.previous_unwrittable_on_col(matrix_view, (x, y)), view.next_unwrittable_on_col(matrix_view, (x, y))) # print('NODE ' + node + ':', # '→row: [{};{}]'.format(row_min, row_max).ljust(20), # '↓col: [{};{}]'.format(col_min, col_max)) print_coords = [itertools.count(x), itertools.cycle((y,))] if row_min is None: # write left to right, end at (x, y) if row_max is None or row_max > (x + len(node) / 2): # enough space at the right factor = 2 else: factor = 1 print_coords[0] = tuple( x - (len(node) // factor) + offset + 1 for offset in range(len(node)) ) # print('DEBUG 1:', y, len(node), print_coords[0]) elif row_max is None: # write left to right, beginning at (x, y) if row_min < (x - len(node) / 2): # enough space at the left factor = 1 else: factor = 0 print_coords[0] = tuple( x + offset - (len(node) // 2) * factor for offset in range(len(node)) ) # print('DEBUG 2:', print_coords[0]) elif (row_max - row_min) > len(node) + 1: # write left to right, if enough place print_coords[0] = tuple( x + offset for offset in range(len(node)) ) # print('DEBUG 3:', print_coords[0]) elif col_min is None: # write up to down, end at (x, y) if col_max is None or col_max > (x + len(node) / 2): # enough space at the right factor = 2 else: factor = 1 print_coords = (itertools.cycle((x,)), tuple( y - (len(node) // factor) + offset + 1 for offset in range(len(node)) )) # print('DEBUG 4:', y, len(node), print_coords[1]) elif col_max is None: # write up to down, beginning at (x, y) if col_min < (x - len(node) / 2): # enough space at the left factor = 1 else: factor = 0 print_coords = (itertools.cycle((x,)), tuple( y + offset - (len(node) // 2) * factor for offset in range(len(node)) )) # print('DEBUG 5:', print_coords[1]) elif (col_max - col_min) > len(node) + 1: # write up to down, if enough place print_coords = (itertools.cycle((x,)), tuple( y + offset for offset in range(len(node)) )) # print('DEBUG 6:', print_coords[1]) else: # not enough space if (row_max - row_min) > (col_max - col_min): # more space on Y axis node = node[:row_max - row_min] # cut the node print_coords = (itertools.cycle((x,)), tuple( x + offset for offset in range(len(node)) )) # print('DEBUG 7:', print_coords[1]) else: # more space on X axis node = node[:col_max - col_min] # cut the node print_coords[0] = tuple( x + offset for offset in range(len(node)) ) # print('DEBUG 8:', print_coords[0]) for letter, i, j in zip(node, *print_coords): matrix_view[i, j] = letter if construction: yield view.build(matrix_view) else: yield from view.build(matrix_view)
python
def pretty_view(graph, oriented=False, construction=False, graphviz_prog=DEFAULT_GRAPHVIZ_PROG): """Yield strings, printable view of given graph""" layout = create_layout(graph, graphviz_prog=graphviz_prog) matrix_view = defaultdict(lambda: ' ') # Add the edge to the view # print('GRAPH EDGES:', tuple(graph.edges())) # print('LAYOUT:', layout) edges = ((layout[source], layout[target]) for source, target in graph.edges()) # print('EDGES:', tuple(edges)) for source, target in edges: previous_edge_char = None previous_position = source while source != target: for is_dir, transform, edge_char in DIRECTIONS: first_loop = previous_edge_char is None if is_dir(source, target): previous_source = source source = transform(source) if not first_loop: # first loop: no previous char char = CHARACTER[previous_edge_char, edge_char] if not isinstance(char, str): char = char.value matrix_view[previous_source] = char if construction: old = defaultdict(lambda: ' ', matrix_view) yield view.build(matrix_view) assert isinstance(matrix_view[previous_source], str) if source != target: previous_edge_char = edge_char previous_position = previous_source break # for loop ; don't test the remain directions if oriented: matrix_view[previous_position] = ORIENTATION[previous_edge_char, edge_char] if construction: yield view.build(matrix_view) # mark the place where nodes labels will be added # for node, coords in layout.items(): # matrix_view[coords] = node[0] # Add the node labels to the view # matrix_view = view.clean(matrix_view) for node, (x, y) in layout.items(): if len(node) == 1: matrix_view[x, y] = node continue row_min, row_max = (view.previous_unwrittable_on_row(matrix_view, (x, y)), view.next_unwrittable_on_row(matrix_view, (x, y))) col_min, col_max = (view.previous_unwrittable_on_col(matrix_view, (x, y)), view.next_unwrittable_on_col(matrix_view, (x, y))) # print('NODE ' + node + ':', # '→row: [{};{}]'.format(row_min, row_max).ljust(20), # '↓col: [{};{}]'.format(col_min, col_max)) print_coords = [itertools.count(x), itertools.cycle((y,))] if row_min is None: # write left to right, end at (x, y) if row_max is None or row_max > (x + len(node) / 2): # enough space at the right factor = 2 else: factor = 1 print_coords[0] = tuple( x - (len(node) // factor) + offset + 1 for offset in range(len(node)) ) # print('DEBUG 1:', y, len(node), print_coords[0]) elif row_max is None: # write left to right, beginning at (x, y) if row_min < (x - len(node) / 2): # enough space at the left factor = 1 else: factor = 0 print_coords[0] = tuple( x + offset - (len(node) // 2) * factor for offset in range(len(node)) ) # print('DEBUG 2:', print_coords[0]) elif (row_max - row_min) > len(node) + 1: # write left to right, if enough place print_coords[0] = tuple( x + offset for offset in range(len(node)) ) # print('DEBUG 3:', print_coords[0]) elif col_min is None: # write up to down, end at (x, y) if col_max is None or col_max > (x + len(node) / 2): # enough space at the right factor = 2 else: factor = 1 print_coords = (itertools.cycle((x,)), tuple( y - (len(node) // factor) + offset + 1 for offset in range(len(node)) )) # print('DEBUG 4:', y, len(node), print_coords[1]) elif col_max is None: # write up to down, beginning at (x, y) if col_min < (x - len(node) / 2): # enough space at the left factor = 1 else: factor = 0 print_coords = (itertools.cycle((x,)), tuple( y + offset - (len(node) // 2) * factor for offset in range(len(node)) )) # print('DEBUG 5:', print_coords[1]) elif (col_max - col_min) > len(node) + 1: # write up to down, if enough place print_coords = (itertools.cycle((x,)), tuple( y + offset for offset in range(len(node)) )) # print('DEBUG 6:', print_coords[1]) else: # not enough space if (row_max - row_min) > (col_max - col_min): # more space on Y axis node = node[:row_max - row_min] # cut the node print_coords = (itertools.cycle((x,)), tuple( x + offset for offset in range(len(node)) )) # print('DEBUG 7:', print_coords[1]) else: # more space on X axis node = node[:col_max - col_min] # cut the node print_coords[0] = tuple( x + offset for offset in range(len(node)) ) # print('DEBUG 8:', print_coords[0]) for letter, i, j in zip(node, *print_coords): matrix_view[i, j] = letter if construction: yield view.build(matrix_view) else: yield from view.build(matrix_view)
[ "def", "pretty_view", "(", "graph", ",", "oriented", "=", "False", ",", "construction", "=", "False", ",", "graphviz_prog", "=", "DEFAULT_GRAPHVIZ_PROG", ")", ":", "layout", "=", "create_layout", "(", "graph", ",", "graphviz_prog", "=", "graphviz_prog", ")", "matrix_view", "=", "defaultdict", "(", "lambda", ":", "' '", ")", "# Add the edge to the view", "# print('GRAPH EDGES:', tuple(graph.edges()))", "# print('LAYOUT:', layout)", "edges", "=", "(", "(", "layout", "[", "source", "]", ",", "layout", "[", "target", "]", ")", "for", "source", ",", "target", "in", "graph", ".", "edges", "(", ")", ")", "# print('EDGES:', tuple(edges))", "for", "source", ",", "target", "in", "edges", ":", "previous_edge_char", "=", "None", "previous_position", "=", "source", "while", "source", "!=", "target", ":", "for", "is_dir", ",", "transform", ",", "edge_char", "in", "DIRECTIONS", ":", "first_loop", "=", "previous_edge_char", "is", "None", "if", "is_dir", "(", "source", ",", "target", ")", ":", "previous_source", "=", "source", "source", "=", "transform", "(", "source", ")", "if", "not", "first_loop", ":", "# first loop: no previous char", "char", "=", "CHARACTER", "[", "previous_edge_char", ",", "edge_char", "]", "if", "not", "isinstance", "(", "char", ",", "str", ")", ":", "char", "=", "char", ".", "value", "matrix_view", "[", "previous_source", "]", "=", "char", "if", "construction", ":", "old", "=", "defaultdict", "(", "lambda", ":", "' '", ",", "matrix_view", ")", "yield", "view", ".", "build", "(", "matrix_view", ")", "assert", "isinstance", "(", "matrix_view", "[", "previous_source", "]", ",", "str", ")", "if", "source", "!=", "target", ":", "previous_edge_char", "=", "edge_char", "previous_position", "=", "previous_source", "break", "# for loop ; don't test the remain directions", "if", "oriented", ":", "matrix_view", "[", "previous_position", "]", "=", "ORIENTATION", "[", "previous_edge_char", ",", "edge_char", "]", "if", "construction", ":", "yield", "view", ".", "build", "(", "matrix_view", ")", "# mark the place where nodes labels will be added", "# for node, coords in layout.items():", "# matrix_view[coords] = node[0]", "# Add the node labels to the view", "# matrix_view = view.clean(matrix_view)", "for", "node", ",", "(", "x", ",", "y", ")", "in", "layout", ".", "items", "(", ")", ":", "if", "len", "(", "node", ")", "==", "1", ":", "matrix_view", "[", "x", ",", "y", "]", "=", "node", "continue", "row_min", ",", "row_max", "=", "(", "view", ".", "previous_unwrittable_on_row", "(", "matrix_view", ",", "(", "x", ",", "y", ")", ")", ",", "view", ".", "next_unwrittable_on_row", "(", "matrix_view", ",", "(", "x", ",", "y", ")", ")", ")", "col_min", ",", "col_max", "=", "(", "view", ".", "previous_unwrittable_on_col", "(", "matrix_view", ",", "(", "x", ",", "y", ")", ")", ",", "view", ".", "next_unwrittable_on_col", "(", "matrix_view", ",", "(", "x", ",", "y", ")", ")", ")", "# print('NODE ' + node + ':',", "# '→row: [{};{}]'.format(row_min, row_max).ljust(20),", "# '↓col: [{};{}]'.format(col_min, col_max))", "print_coords", "=", "[", "itertools", ".", "count", "(", "x", ")", ",", "itertools", ".", "cycle", "(", "(", "y", ",", ")", ")", "]", "if", "row_min", "is", "None", ":", "# write left to right, end at (x, y)", "if", "row_max", "is", "None", "or", "row_max", ">", "(", "x", "+", "len", "(", "node", ")", "/", "2", ")", ":", "# enough space at the right", "factor", "=", "2", "else", ":", "factor", "=", "1", "print_coords", "[", "0", "]", "=", "tuple", "(", "x", "-", "(", "len", "(", "node", ")", "//", "factor", ")", "+", "offset", "+", "1", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", "# print('DEBUG 1:', y, len(node), print_coords[0])", "elif", "row_max", "is", "None", ":", "# write left to right, beginning at (x, y)", "if", "row_min", "<", "(", "x", "-", "len", "(", "node", ")", "/", "2", ")", ":", "# enough space at the left", "factor", "=", "1", "else", ":", "factor", "=", "0", "print_coords", "[", "0", "]", "=", "tuple", "(", "x", "+", "offset", "-", "(", "len", "(", "node", ")", "//", "2", ")", "*", "factor", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", "# print('DEBUG 2:', print_coords[0])", "elif", "(", "row_max", "-", "row_min", ")", ">", "len", "(", "node", ")", "+", "1", ":", "# write left to right, if enough place", "print_coords", "[", "0", "]", "=", "tuple", "(", "x", "+", "offset", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", "# print('DEBUG 3:', print_coords[0])", "elif", "col_min", "is", "None", ":", "# write up to down, end at (x, y)", "if", "col_max", "is", "None", "or", "col_max", ">", "(", "x", "+", "len", "(", "node", ")", "/", "2", ")", ":", "# enough space at the right", "factor", "=", "2", "else", ":", "factor", "=", "1", "print_coords", "=", "(", "itertools", ".", "cycle", "(", "(", "x", ",", ")", ")", ",", "tuple", "(", "y", "-", "(", "len", "(", "node", ")", "//", "factor", ")", "+", "offset", "+", "1", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", ")", "# print('DEBUG 4:', y, len(node), print_coords[1])", "elif", "col_max", "is", "None", ":", "# write up to down, beginning at (x, y)", "if", "col_min", "<", "(", "x", "-", "len", "(", "node", ")", "/", "2", ")", ":", "# enough space at the left", "factor", "=", "1", "else", ":", "factor", "=", "0", "print_coords", "=", "(", "itertools", ".", "cycle", "(", "(", "x", ",", ")", ")", ",", "tuple", "(", "y", "+", "offset", "-", "(", "len", "(", "node", ")", "//", "2", ")", "*", "factor", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", ")", "# print('DEBUG 5:', print_coords[1])", "elif", "(", "col_max", "-", "col_min", ")", ">", "len", "(", "node", ")", "+", "1", ":", "# write up to down, if enough place", "print_coords", "=", "(", "itertools", ".", "cycle", "(", "(", "x", ",", ")", ")", ",", "tuple", "(", "y", "+", "offset", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", ")", "# print('DEBUG 6:', print_coords[1])", "else", ":", "# not enough space", "if", "(", "row_max", "-", "row_min", ")", ">", "(", "col_max", "-", "col_min", ")", ":", "# more space on Y axis", "node", "=", "node", "[", ":", "row_max", "-", "row_min", "]", "# cut the node", "print_coords", "=", "(", "itertools", ".", "cycle", "(", "(", "x", ",", ")", ")", ",", "tuple", "(", "x", "+", "offset", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", ")", "# print('DEBUG 7:', print_coords[1])", "else", ":", "# more space on X axis", "node", "=", "node", "[", ":", "col_max", "-", "col_min", "]", "# cut the node", "print_coords", "[", "0", "]", "=", "tuple", "(", "x", "+", "offset", "for", "offset", "in", "range", "(", "len", "(", "node", ")", ")", ")", "# print('DEBUG 8:', print_coords[0])", "for", "letter", ",", "i", ",", "j", "in", "zip", "(", "node", ",", "*", "print_coords", ")", ":", "matrix_view", "[", "i", ",", "j", "]", "=", "letter", "if", "construction", ":", "yield", "view", ".", "build", "(", "matrix_view", ")", "else", ":", "yield", "from", "view", ".", "build", "(", "matrix_view", ")" ]
Yield strings, printable view of given graph
[ "Yield", "strings", "printable", "view", "of", "given", "graph" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/tergraw.py#L53-L193
jasonkeene/python-ubersmith
ubersmith/calls/__init__.py
_get_call_class
def _get_call_class(method): """Find the call class for method if it exists else create one.""" call_base, call_name = method.split('.', 1) # import the call class's module mod = __import__('ubersmith.calls.{0}'.format(call_base), fromlist=['']) # grab all the public members of the module gen = (getattr(mod, x) for x in dir(mod) if not x.startswith('_')) # filter them down to subclasses of BaseCall gen = (x for x in gen if type(x) is type and issubclass(x, BaseCall)) # return first one that matches our method for call_class in gen: if call_class.method == method: return call_class else: class GenericCall(BaseCall): method = '.'.join((call_base, call_name)) return GenericCall
python
def _get_call_class(method): """Find the call class for method if it exists else create one.""" call_base, call_name = method.split('.', 1) # import the call class's module mod = __import__('ubersmith.calls.{0}'.format(call_base), fromlist=['']) # grab all the public members of the module gen = (getattr(mod, x) for x in dir(mod) if not x.startswith('_')) # filter them down to subclasses of BaseCall gen = (x for x in gen if type(x) is type and issubclass(x, BaseCall)) # return first one that matches our method for call_class in gen: if call_class.method == method: return call_class else: class GenericCall(BaseCall): method = '.'.join((call_base, call_name)) return GenericCall
[ "def", "_get_call_class", "(", "method", ")", ":", "call_base", ",", "call_name", "=", "method", ".", "split", "(", "'.'", ",", "1", ")", "# import the call class's module", "mod", "=", "__import__", "(", "'ubersmith.calls.{0}'", ".", "format", "(", "call_base", ")", ",", "fromlist", "=", "[", "''", "]", ")", "# grab all the public members of the module", "gen", "=", "(", "getattr", "(", "mod", ",", "x", ")", "for", "x", "in", "dir", "(", "mod", ")", "if", "not", "x", ".", "startswith", "(", "'_'", ")", ")", "# filter them down to subclasses of BaseCall", "gen", "=", "(", "x", "for", "x", "in", "gen", "if", "type", "(", "x", ")", "is", "type", "and", "issubclass", "(", "x", ",", "BaseCall", ")", ")", "# return first one that matches our method", "for", "call_class", "in", "gen", ":", "if", "call_class", ".", "method", "==", "method", ":", "return", "call_class", "else", ":", "class", "GenericCall", "(", "BaseCall", ")", ":", "method", "=", "'.'", ".", "join", "(", "(", "call_base", ",", "call_name", ")", ")", "return", "GenericCall" ]
Find the call class for method if it exists else create one.
[ "Find", "the", "call", "class", "for", "method", "if", "it", "exists", "else", "create", "one", "." ]
train
https://github.com/jasonkeene/python-ubersmith/blob/0c594e2eb41066d1fe7860e3a6f04b14c14f6e6a/ubersmith/calls/__init__.py#L85-L101
jasonkeene/python-ubersmith
ubersmith/calls/__init__.py
BaseCall.render
def render(self): """Validate, process, clean and return the result of the call.""" if not self.validate(): raise ValidationError self.process_request() self.clean() return self.response
python
def render(self): """Validate, process, clean and return the result of the call.""" if not self.validate(): raise ValidationError self.process_request() self.clean() return self.response
[ "def", "render", "(", "self", ")", ":", "if", "not", "self", ".", "validate", "(", ")", ":", "raise", "ValidationError", "self", ".", "process_request", "(", ")", "self", ".", "clean", "(", ")", "return", "self", ".", "response" ]
Validate, process, clean and return the result of the call.
[ "Validate", "process", "clean", "and", "return", "the", "result", "of", "the", "call", "." ]
train
https://github.com/jasonkeene/python-ubersmith/blob/0c594e2eb41066d1fe7860e3a6f04b14c14f6e6a/ubersmith/calls/__init__.py#L44-L52
jasonkeene/python-ubersmith
ubersmith/calls/__init__.py
BaseCall.validate
def validate(self): """Validate request data before sending it out. Return True/False.""" # check if required_fields aren't present for field in set(self.required_fields) - set(self.request_data): if not isinstance(field, string_types): # field was a collection, iterate over it and check by OR return bool(set(field) & set(self.request_data)) return False return True
python
def validate(self): """Validate request data before sending it out. Return True/False.""" # check if required_fields aren't present for field in set(self.required_fields) - set(self.request_data): if not isinstance(field, string_types): # field was a collection, iterate over it and check by OR return bool(set(field) & set(self.request_data)) return False return True
[ "def", "validate", "(", "self", ")", ":", "# check if required_fields aren't present", "for", "field", "in", "set", "(", "self", ".", "required_fields", ")", "-", "set", "(", "self", ".", "request_data", ")", ":", "if", "not", "isinstance", "(", "field", ",", "string_types", ")", ":", "# field was a collection, iterate over it and check by OR", "return", "bool", "(", "set", "(", "field", ")", "&", "set", "(", "self", ".", "request_data", ")", ")", "return", "False", "return", "True" ]
Validate request data before sending it out. Return True/False.
[ "Validate", "request", "data", "before", "sending", "it", "out", ".", "Return", "True", "/", "False", "." ]
train
https://github.com/jasonkeene/python-ubersmith/blob/0c594e2eb41066d1fe7860e3a6f04b14c14f6e6a/ubersmith/calls/__init__.py#L54-L62
jasonkeene/python-ubersmith
ubersmith/calls/__init__.py
BaseCall.process_request
def process_request(self): """Processing the call and set response_data.""" self.response = self.request_handler.process_request( self.method, self.request_data)
python
def process_request(self): """Processing the call and set response_data.""" self.response = self.request_handler.process_request( self.method, self.request_data)
[ "def", "process_request", "(", "self", ")", ":", "self", ".", "response", "=", "self", ".", "request_handler", ".", "process_request", "(", "self", ".", "method", ",", "self", ".", "request_data", ")" ]
Processing the call and set response_data.
[ "Processing", "the", "call", "and", "set", "response_data", "." ]
train
https://github.com/jasonkeene/python-ubersmith/blob/0c594e2eb41066d1fe7860e3a6f04b14c14f6e6a/ubersmith/calls/__init__.py#L64-L67
jasonkeene/python-ubersmith
ubersmith/calls/__init__.py
BaseCall.clean
def clean(self): """Clean response.""" if self.response.type == 'application/json': cleaned = copy.deepcopy(self.response.data) if self.cleaner is not None: cleaned = self.cleaner(cleaned) typed_response = { dict: DictResponse, int: IntResponse, }.get(type(cleaned), BaseResponse) self.response = typed_response.from_cleaned(self.response, cleaned) else: self.response = FileResponse(self.response.response)
python
def clean(self): """Clean response.""" if self.response.type == 'application/json': cleaned = copy.deepcopy(self.response.data) if self.cleaner is not None: cleaned = self.cleaner(cleaned) typed_response = { dict: DictResponse, int: IntResponse, }.get(type(cleaned), BaseResponse) self.response = typed_response.from_cleaned(self.response, cleaned) else: self.response = FileResponse(self.response.response)
[ "def", "clean", "(", "self", ")", ":", "if", "self", ".", "response", ".", "type", "==", "'application/json'", ":", "cleaned", "=", "copy", ".", "deepcopy", "(", "self", ".", "response", ".", "data", ")", "if", "self", ".", "cleaner", "is", "not", "None", ":", "cleaned", "=", "self", ".", "cleaner", "(", "cleaned", ")", "typed_response", "=", "{", "dict", ":", "DictResponse", ",", "int", ":", "IntResponse", ",", "}", ".", "get", "(", "type", "(", "cleaned", ")", ",", "BaseResponse", ")", "self", ".", "response", "=", "typed_response", ".", "from_cleaned", "(", "self", ".", "response", ",", "cleaned", ")", "else", ":", "self", ".", "response", "=", "FileResponse", "(", "self", ".", "response", ".", "response", ")" ]
Clean response.
[ "Clean", "response", "." ]
train
https://github.com/jasonkeene/python-ubersmith/blob/0c594e2eb41066d1fe7860e3a6f04b14c14f6e6a/ubersmith/calls/__init__.py#L69-L82
jasonkeene/python-ubersmith
ubersmith/compat.py
total_ordering
def total_ordering(cls): # pragma: no cover """Class decorator that fills in missing ordering methods""" convert = { '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), ('__le__', lambda self, other: self < other or self == other), ('__ge__', lambda self, other: not self < other)], '__le__': [('__ge__', lambda self, other: not self <= other or self == other), ('__lt__', lambda self, other: self <= other and not self == other), ('__gt__', lambda self, other: not self <= other)], '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), ('__ge__', lambda self, other: self > other or self == other), ('__le__', lambda self, other: not self > other)], '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), ('__gt__', lambda self, other: self >= other and not self == other), ('__lt__', lambda self, other: not self >= other)] } roots = set(dir(cls)) & set(convert) if not roots: raise ValueError('must define at least one ordering operation: < > <= >=') root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ for opname, opfunc in convert[root]: if opname not in roots: opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls
python
def total_ordering(cls): # pragma: no cover """Class decorator that fills in missing ordering methods""" convert = { '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), ('__le__', lambda self, other: self < other or self == other), ('__ge__', lambda self, other: not self < other)], '__le__': [('__ge__', lambda self, other: not self <= other or self == other), ('__lt__', lambda self, other: self <= other and not self == other), ('__gt__', lambda self, other: not self <= other)], '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), ('__ge__', lambda self, other: self > other or self == other), ('__le__', lambda self, other: not self > other)], '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), ('__gt__', lambda self, other: self >= other and not self == other), ('__lt__', lambda self, other: not self >= other)] } roots = set(dir(cls)) & set(convert) if not roots: raise ValueError('must define at least one ordering operation: < > <= >=') root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ for opname, opfunc in convert[root]: if opname not in roots: opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls
[ "def", "total_ordering", "(", "cls", ")", ":", "# pragma: no cover", "convert", "=", "{", "'__lt__'", ":", "[", "(", "'__gt__'", ",", "lambda", "self", ",", "other", ":", "not", "(", "self", "<", "other", "or", "self", "==", "other", ")", ")", ",", "(", "'__le__'", ",", "lambda", "self", ",", "other", ":", "self", "<", "other", "or", "self", "==", "other", ")", ",", "(", "'__ge__'", ",", "lambda", "self", ",", "other", ":", "not", "self", "<", "other", ")", "]", ",", "'__le__'", ":", "[", "(", "'__ge__'", ",", "lambda", "self", ",", "other", ":", "not", "self", "<=", "other", "or", "self", "==", "other", ")", ",", "(", "'__lt__'", ",", "lambda", "self", ",", "other", ":", "self", "<=", "other", "and", "not", "self", "==", "other", ")", ",", "(", "'__gt__'", ",", "lambda", "self", ",", "other", ":", "not", "self", "<=", "other", ")", "]", ",", "'__gt__'", ":", "[", "(", "'__lt__'", ",", "lambda", "self", ",", "other", ":", "not", "(", "self", ">", "other", "or", "self", "==", "other", ")", ")", ",", "(", "'__ge__'", ",", "lambda", "self", ",", "other", ":", "self", ">", "other", "or", "self", "==", "other", ")", ",", "(", "'__le__'", ",", "lambda", "self", ",", "other", ":", "not", "self", ">", "other", ")", "]", ",", "'__ge__'", ":", "[", "(", "'__le__'", ",", "lambda", "self", ",", "other", ":", "(", "not", "self", ">=", "other", ")", "or", "self", "==", "other", ")", ",", "(", "'__gt__'", ",", "lambda", "self", ",", "other", ":", "self", ">=", "other", "and", "not", "self", "==", "other", ")", ",", "(", "'__lt__'", ",", "lambda", "self", ",", "other", ":", "not", "self", ">=", "other", ")", "]", "}", "roots", "=", "set", "(", "dir", "(", "cls", ")", ")", "&", "set", "(", "convert", ")", "if", "not", "roots", ":", "raise", "ValueError", "(", "'must define at least one ordering operation: < > <= >='", ")", "root", "=", "max", "(", "roots", ")", "# prefer __lt__ to __le__ to __gt__ to __ge__", "for", "opname", ",", "opfunc", "in", "convert", "[", "root", "]", ":", "if", "opname", "not", "in", "roots", ":", "opfunc", ".", "__name__", "=", "opname", "opfunc", ".", "__doc__", "=", "getattr", "(", "int", ",", "opname", ")", ".", "__doc__", "setattr", "(", "cls", ",", "opname", ",", "opfunc", ")", "return", "cls" ]
Class decorator that fills in missing ordering methods
[ "Class", "decorator", "that", "fills", "in", "missing", "ordering", "methods" ]
train
https://github.com/jasonkeene/python-ubersmith/blob/0c594e2eb41066d1fe7860e3a6f04b14c14f6e6a/ubersmith/compat.py#L6-L31
guillermo-carrasco/bcbio-nextgen-monitor
bcbio_monitor/log/__init__.py
init_logger_file
def init_logger_file(log_file, log_level='INFO'): """ Append a FileHandler to the root logger. :param str log_file: Path to the log file :param str log_level: Logging level """ log_level = LOG_LEVELS[log_level] if log_level in LOG_LEVELS.keys() else logging.INFO ROOT_LOG.setLevel(log_level) file_handle = logging.FileHandler(log_file) file_handle.setLevel(log_level) file_handle.setFormatter(formatter) ROOT_LOG.addHandler(file_handle)
python
def init_logger_file(log_file, log_level='INFO'): """ Append a FileHandler to the root logger. :param str log_file: Path to the log file :param str log_level: Logging level """ log_level = LOG_LEVELS[log_level] if log_level in LOG_LEVELS.keys() else logging.INFO ROOT_LOG.setLevel(log_level) file_handle = logging.FileHandler(log_file) file_handle.setLevel(log_level) file_handle.setFormatter(formatter) ROOT_LOG.addHandler(file_handle)
[ "def", "init_logger_file", "(", "log_file", ",", "log_level", "=", "'INFO'", ")", ":", "log_level", "=", "LOG_LEVELS", "[", "log_level", "]", "if", "log_level", "in", "LOG_LEVELS", ".", "keys", "(", ")", "else", "logging", ".", "INFO", "ROOT_LOG", ".", "setLevel", "(", "log_level", ")", "file_handle", "=", "logging", ".", "FileHandler", "(", "log_file", ")", "file_handle", ".", "setLevel", "(", "log_level", ")", "file_handle", ".", "setFormatter", "(", "formatter", ")", "ROOT_LOG", ".", "addHandler", "(", "file_handle", ")" ]
Append a FileHandler to the root logger. :param str log_file: Path to the log file :param str log_level: Logging level
[ "Append", "a", "FileHandler", "to", "the", "root", "logger", ".", ":", "param", "str", "log_file", ":", "Path", "to", "the", "log", "file", ":", "param", "str", "log_level", ":", "Logging", "level" ]
train
https://github.com/guillermo-carrasco/bcbio-nextgen-monitor/blob/6d059154d774140e1fd03a0e3625f607cef06f5a/bcbio_monitor/log/__init__.py#L24-L36
blockstack-packages/blockstack-auth-python
blockchainauth/keys.py
load_signing_key
def load_signing_key(signing_key, crypto_backend=default_backend()): """ Optional: crypto backend object from the "cryptography" python library """ if not isinstance(crypto_backend, (Backend, MultiBackend)): raise ValueError('backend must be a valid Backend object') if isinstance(signing_key, EllipticCurvePrivateKey): return signing_key elif isinstance(signing_key, (str, unicode)): invalid_strings = [b'-----BEGIN PUBLIC KEY-----'] invalid_string_matches = [ string_value in signing_key for string_value in invalid_strings ] if any(invalid_string_matches): raise ValueError( 'Signing key must be a private key, not a public key.') try: return load_der_private_key( signing_key, password=None, backend=crypto_backend) except: try: return load_pem_private_key( signing_key, password=None, backend=crypto_backend) except Exception: raise ValueError( 'Signing key must be a valid private key PEM or DER.') else: raise ValueError('Signing key must be in string or unicode format.')
python
def load_signing_key(signing_key, crypto_backend=default_backend()): """ Optional: crypto backend object from the "cryptography" python library """ if not isinstance(crypto_backend, (Backend, MultiBackend)): raise ValueError('backend must be a valid Backend object') if isinstance(signing_key, EllipticCurvePrivateKey): return signing_key elif isinstance(signing_key, (str, unicode)): invalid_strings = [b'-----BEGIN PUBLIC KEY-----'] invalid_string_matches = [ string_value in signing_key for string_value in invalid_strings ] if any(invalid_string_matches): raise ValueError( 'Signing key must be a private key, not a public key.') try: return load_der_private_key( signing_key, password=None, backend=crypto_backend) except: try: return load_pem_private_key( signing_key, password=None, backend=crypto_backend) except Exception: raise ValueError( 'Signing key must be a valid private key PEM or DER.') else: raise ValueError('Signing key must be in string or unicode format.')
[ "def", "load_signing_key", "(", "signing_key", ",", "crypto_backend", "=", "default_backend", "(", ")", ")", ":", "if", "not", "isinstance", "(", "crypto_backend", ",", "(", "Backend", ",", "MultiBackend", ")", ")", ":", "raise", "ValueError", "(", "'backend must be a valid Backend object'", ")", "if", "isinstance", "(", "signing_key", ",", "EllipticCurvePrivateKey", ")", ":", "return", "signing_key", "elif", "isinstance", "(", "signing_key", ",", "(", "str", ",", "unicode", ")", ")", ":", "invalid_strings", "=", "[", "b'-----BEGIN PUBLIC KEY-----'", "]", "invalid_string_matches", "=", "[", "string_value", "in", "signing_key", "for", "string_value", "in", "invalid_strings", "]", "if", "any", "(", "invalid_string_matches", ")", ":", "raise", "ValueError", "(", "'Signing key must be a private key, not a public key.'", ")", "try", ":", "return", "load_der_private_key", "(", "signing_key", ",", "password", "=", "None", ",", "backend", "=", "crypto_backend", ")", "except", ":", "try", ":", "return", "load_pem_private_key", "(", "signing_key", ",", "password", "=", "None", ",", "backend", "=", "crypto_backend", ")", "except", "Exception", ":", "raise", "ValueError", "(", "'Signing key must be a valid private key PEM or DER.'", ")", "else", ":", "raise", "ValueError", "(", "'Signing key must be in string or unicode format.'", ")" ]
Optional: crypto backend object from the "cryptography" python library
[ "Optional", ":", "crypto", "backend", "object", "from", "the", "cryptography", "python", "library" ]
train
https://github.com/blockstack-packages/blockstack-auth-python/blob/24f1707fbb31d1dcd8c327d232027b15ffd66135/blockchainauth/keys.py#L21-L50
blockstack-packages/blockstack-auth-python
blockchainauth/keys.py
load_verifying_key
def load_verifying_key(verifying_key, crypto_backend=default_backend()): """ Optional: crypto backend object from the "cryptography" python library """ if not isinstance(crypto_backend, (Backend, MultiBackend)): raise ValueError('backend must be a valid Backend object') if isinstance(verifying_key, EllipticCurvePublicKey): return verifying_key elif isinstance(verifying_key, (str, unicode)): try: return load_der_public_key( verifying_key, backend=crypto_backend) except: try: return load_pem_public_key( verifying_key, backend=crypto_backend) except Exception: raise ValueError('Invalid verifying key format') else: raise ValueError('Invalid verification key type')
python
def load_verifying_key(verifying_key, crypto_backend=default_backend()): """ Optional: crypto backend object from the "cryptography" python library """ if not isinstance(crypto_backend, (Backend, MultiBackend)): raise ValueError('backend must be a valid Backend object') if isinstance(verifying_key, EllipticCurvePublicKey): return verifying_key elif isinstance(verifying_key, (str, unicode)): try: return load_der_public_key( verifying_key, backend=crypto_backend) except: try: return load_pem_public_key( verifying_key, backend=crypto_backend) except Exception: raise ValueError('Invalid verifying key format') else: raise ValueError('Invalid verification key type')
[ "def", "load_verifying_key", "(", "verifying_key", ",", "crypto_backend", "=", "default_backend", "(", ")", ")", ":", "if", "not", "isinstance", "(", "crypto_backend", ",", "(", "Backend", ",", "MultiBackend", ")", ")", ":", "raise", "ValueError", "(", "'backend must be a valid Backend object'", ")", "if", "isinstance", "(", "verifying_key", ",", "EllipticCurvePublicKey", ")", ":", "return", "verifying_key", "elif", "isinstance", "(", "verifying_key", ",", "(", "str", ",", "unicode", ")", ")", ":", "try", ":", "return", "load_der_public_key", "(", "verifying_key", ",", "backend", "=", "crypto_backend", ")", "except", ":", "try", ":", "return", "load_pem_public_key", "(", "verifying_key", ",", "backend", "=", "crypto_backend", ")", "except", "Exception", ":", "raise", "ValueError", "(", "'Invalid verifying key format'", ")", "else", ":", "raise", "ValueError", "(", "'Invalid verification key type'", ")" ]
Optional: crypto backend object from the "cryptography" python library
[ "Optional", ":", "crypto", "backend", "object", "from", "the", "cryptography", "python", "library" ]
train
https://github.com/blockstack-packages/blockstack-auth-python/blob/24f1707fbb31d1dcd8c327d232027b15ffd66135/blockchainauth/keys.py#L53-L72
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient._format_notes
def _format_notes(self, record): """ Extracts notes from a record and reformats them in a simplified format. """ notes = [] if "notes" in record: for note in record["notes"]: self._append_note_dict_to_list(notes, "general", note) if "language_and_script_notes" in record: self._append_note_dict_to_list( notes, "language_and_script", record["language_and_script_notes"] ) if "publication_notes" in record: self._append_note_dict_to_list( notes, "publication_notes", record["publication_notes"] ) if "physical_characteristics_and_technical_requirements" in record: self._append_note_dict_to_list( notes, "physical_condition", record["physical_characteristics_and_technical_requirements"], ) return notes
python
def _format_notes(self, record): """ Extracts notes from a record and reformats them in a simplified format. """ notes = [] if "notes" in record: for note in record["notes"]: self._append_note_dict_to_list(notes, "general", note) if "language_and_script_notes" in record: self._append_note_dict_to_list( notes, "language_and_script", record["language_and_script_notes"] ) if "publication_notes" in record: self._append_note_dict_to_list( notes, "publication_notes", record["publication_notes"] ) if "physical_characteristics_and_technical_requirements" in record: self._append_note_dict_to_list( notes, "physical_condition", record["physical_characteristics_and_technical_requirements"], ) return notes
[ "def", "_format_notes", "(", "self", ",", "record", ")", ":", "notes", "=", "[", "]", "if", "\"notes\"", "in", "record", ":", "for", "note", "in", "record", "[", "\"notes\"", "]", ":", "self", ".", "_append_note_dict_to_list", "(", "notes", ",", "\"general\"", ",", "note", ")", "if", "\"language_and_script_notes\"", "in", "record", ":", "self", ".", "_append_note_dict_to_list", "(", "notes", ",", "\"language_and_script\"", ",", "record", "[", "\"language_and_script_notes\"", "]", ")", "if", "\"publication_notes\"", "in", "record", ":", "self", ".", "_append_note_dict_to_list", "(", "notes", ",", "\"publication_notes\"", ",", "record", "[", "\"publication_notes\"", "]", ")", "if", "\"physical_characteristics_and_technical_requirements\"", "in", "record", ":", "self", ".", "_append_note_dict_to_list", "(", "notes", ",", "\"physical_condition\"", ",", "record", "[", "\"physical_characteristics_and_technical_requirements\"", "]", ",", ")", "return", "notes" ]
Extracts notes from a record and reformats them in a simplified format.
[ "Extracts", "notes", "from", "a", "record", "and", "reformats", "them", "in", "a", "simplified", "format", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L114-L141
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient._escape_lucene_query
def _escape_lucene_query(query, field=None): """ Escapes special characters in Solr queries. Note that this omits * - this is intentionally permitted in user queries. The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters """ replacement = r"\\\1" return re.sub(r'([\'" +\-!\(\)\{\}\[\]^"~?:\\/]|&&|\|\|)', replacement, query)
python
def _escape_lucene_query(query, field=None): """ Escapes special characters in Solr queries. Note that this omits * - this is intentionally permitted in user queries. The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters """ replacement = r"\\\1" return re.sub(r'([\'" +\-!\(\)\{\}\[\]^"~?:\\/]|&&|\|\|)', replacement, query)
[ "def", "_escape_lucene_query", "(", "query", ",", "field", "=", "None", ")", ":", "replacement", "=", "r\"\\\\\\1\"", "return", "re", ".", "sub", "(", "r'([\\'\" +\\-!\\(\\)\\{\\}\\[\\]^\"~?:\\\\/]|&&|\\|\\|)'", ",", "replacement", ",", "query", ")" ]
Escapes special characters in Solr queries. Note that this omits * - this is intentionally permitted in user queries. The list of special characters is located at http://lucene.apache.org/core/4_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters
[ "Escapes", "special", "characters", "in", "Solr", "queries", ".", "Note", "that", "this", "omits", "*", "-", "this", "is", "intentionally", "permitted", "in", "user", "queries", ".", "The", "list", "of", "special", "characters", "is", "located", "at", "http", ":", "//", "lucene", ".", "apache", ".", "org", "/", "core", "/", "4_0_0", "/", "queryparser", "/", "org", "/", "apache", "/", "lucene", "/", "queryparser", "/", "classic", "/", "package", "-", "summary", ".", "html#Escaping_Special_Characters" ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L151-L158
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.edit_record
def edit_record(self, new_record): """ Update a record in AtoM using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * notes * start_date * end_date * date_expression :raises ValueError: if the 'slug' field isn't specified, or no fields to edit were specified. """ try: record_id = new_record["slug"] except KeyError: raise ValueError("No slug provided!") record = self.get_record(record_id) field_map = {"title": "title", "level": "levelOfDescription"} fields_updated = False for field, targetfield in field_map.items(): try: record[targetfield] = new_record[field] fields_updated = True except KeyError: continue # Optionally add notes if "notes" in new_record and new_record["notes"]: note = new_record["notes"][0] new_note = {"content": note["content"], "type": note["type"]} # This only supports editing a single note, and a single piece of content # within that note. # If the record already has at least one note, then replace the first note # within that record with this one. if "notes" not in record or record["notes"] == []: record["notes"] = [new_note] else: record["notes"][0] = new_note fields_updated = True else: # Remove existing notes if the record didn't have a valid note; # a note with an empty string as content should be counted as # a request to delete the note. record["notes"] = [] # Update date updated_date = {} # Only single dates are currently supported if "dates" in new_record and type(new_record["dates"]) is list: new_record["dates"] = new_record["dates"][0] # Map agentarchives date specification to AtoM specification date_mapping = { "start_date": "start_date", # 'begin': 'start_date', "end_date": "end_date", # 'end': 'end_date', "date_expression": "date", } for date_field in date_mapping: if date_field in new_record: updated_date[date_mapping[date_field]] = new_record[date_field] # Add updated date specification to record update if updated_date != {}: record["dates"] = [updated_date] fields_updated = True if not fields_updated: raise ValueError("No fields to update specified!") self._put( urljoin(self.base_url, "informationobjects/{}".format(record_id)), data=json.dumps(record), )
python
def edit_record(self, new_record): """ Update a record in AtoM using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * notes * start_date * end_date * date_expression :raises ValueError: if the 'slug' field isn't specified, or no fields to edit were specified. """ try: record_id = new_record["slug"] except KeyError: raise ValueError("No slug provided!") record = self.get_record(record_id) field_map = {"title": "title", "level": "levelOfDescription"} fields_updated = False for field, targetfield in field_map.items(): try: record[targetfield] = new_record[field] fields_updated = True except KeyError: continue # Optionally add notes if "notes" in new_record and new_record["notes"]: note = new_record["notes"][0] new_note = {"content": note["content"], "type": note["type"]} # This only supports editing a single note, and a single piece of content # within that note. # If the record already has at least one note, then replace the first note # within that record with this one. if "notes" not in record or record["notes"] == []: record["notes"] = [new_note] else: record["notes"][0] = new_note fields_updated = True else: # Remove existing notes if the record didn't have a valid note; # a note with an empty string as content should be counted as # a request to delete the note. record["notes"] = [] # Update date updated_date = {} # Only single dates are currently supported if "dates" in new_record and type(new_record["dates"]) is list: new_record["dates"] = new_record["dates"][0] # Map agentarchives date specification to AtoM specification date_mapping = { "start_date": "start_date", # 'begin': 'start_date', "end_date": "end_date", # 'end': 'end_date', "date_expression": "date", } for date_field in date_mapping: if date_field in new_record: updated_date[date_mapping[date_field]] = new_record[date_field] # Add updated date specification to record update if updated_date != {}: record["dates"] = [updated_date] fields_updated = True if not fields_updated: raise ValueError("No fields to update specified!") self._put( urljoin(self.base_url, "informationobjects/{}".format(record_id)), data=json.dumps(record), )
[ "def", "edit_record", "(", "self", ",", "new_record", ")", ":", "try", ":", "record_id", "=", "new_record", "[", "\"slug\"", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"No slug provided!\"", ")", "record", "=", "self", ".", "get_record", "(", "record_id", ")", "field_map", "=", "{", "\"title\"", ":", "\"title\"", ",", "\"level\"", ":", "\"levelOfDescription\"", "}", "fields_updated", "=", "False", "for", "field", ",", "targetfield", "in", "field_map", ".", "items", "(", ")", ":", "try", ":", "record", "[", "targetfield", "]", "=", "new_record", "[", "field", "]", "fields_updated", "=", "True", "except", "KeyError", ":", "continue", "# Optionally add notes", "if", "\"notes\"", "in", "new_record", "and", "new_record", "[", "\"notes\"", "]", ":", "note", "=", "new_record", "[", "\"notes\"", "]", "[", "0", "]", "new_note", "=", "{", "\"content\"", ":", "note", "[", "\"content\"", "]", ",", "\"type\"", ":", "note", "[", "\"type\"", "]", "}", "# This only supports editing a single note, and a single piece of content", "# within that note.", "# If the record already has at least one note, then replace the first note", "# within that record with this one.", "if", "\"notes\"", "not", "in", "record", "or", "record", "[", "\"notes\"", "]", "==", "[", "]", ":", "record", "[", "\"notes\"", "]", "=", "[", "new_note", "]", "else", ":", "record", "[", "\"notes\"", "]", "[", "0", "]", "=", "new_note", "fields_updated", "=", "True", "else", ":", "# Remove existing notes if the record didn't have a valid note;", "# a note with an empty string as content should be counted as", "# a request to delete the note.", "record", "[", "\"notes\"", "]", "=", "[", "]", "# Update date", "updated_date", "=", "{", "}", "# Only single dates are currently supported", "if", "\"dates\"", "in", "new_record", "and", "type", "(", "new_record", "[", "\"dates\"", "]", ")", "is", "list", ":", "new_record", "[", "\"dates\"", "]", "=", "new_record", "[", "\"dates\"", "]", "[", "0", "]", "# Map agentarchives date specification to AtoM specification", "date_mapping", "=", "{", "\"start_date\"", ":", "\"start_date\"", ",", "# 'begin': 'start_date',", "\"end_date\"", ":", "\"end_date\"", ",", "# 'end': 'end_date',", "\"date_expression\"", ":", "\"date\"", ",", "}", "for", "date_field", "in", "date_mapping", ":", "if", "date_field", "in", "new_record", ":", "updated_date", "[", "date_mapping", "[", "date_field", "]", "]", "=", "new_record", "[", "date_field", "]", "# Add updated date specification to record update", "if", "updated_date", "!=", "{", "}", ":", "record", "[", "\"dates\"", "]", "=", "[", "updated_date", "]", "fields_updated", "=", "True", "if", "not", "fields_updated", ":", "raise", "ValueError", "(", "\"No fields to update specified!\"", ")", "self", ".", "_put", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"informationobjects/{}\"", ".", "format", "(", "record_id", ")", ")", ",", "data", "=", "json", ".", "dumps", "(", "record", ")", ",", ")" ]
Update a record in AtoM using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * notes * start_date * end_date * date_expression :raises ValueError: if the 'slug' field isn't specified, or no fields to edit were specified.
[ "Update", "a", "record", "in", "AtoM", "using", "the", "provided", "new_record", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L181-L264
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.get_levels_of_description
def get_levels_of_description(self): """ Returns an array of all levels of description defined in this AtoM instance. """ if not hasattr(self, "levels_of_description"): self.levels_of_description = [ item["name"] for item in self._get(urljoin(self.base_url, "taxonomies/34")).json() ] return self.levels_of_description
python
def get_levels_of_description(self): """ Returns an array of all levels of description defined in this AtoM instance. """ if not hasattr(self, "levels_of_description"): self.levels_of_description = [ item["name"] for item in self._get(urljoin(self.base_url, "taxonomies/34")).json() ] return self.levels_of_description
[ "def", "get_levels_of_description", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"levels_of_description\"", ")", ":", "self", ".", "levels_of_description", "=", "[", "item", "[", "\"name\"", "]", "for", "item", "in", "self", ".", "_get", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"taxonomies/34\"", ")", ")", ".", "json", "(", ")", "]", "return", "self", ".", "levels_of_description" ]
Returns an array of all levels of description defined in this AtoM instance.
[ "Returns", "an", "array", "of", "all", "levels", "of", "description", "defined", "in", "this", "AtoM", "instance", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L266-L276
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.collection_list
def collection_list(self, resource_id, resource_type="collection"): """ Fetches a list of slug representing descriptions within the specified parent description. :param resource_id str: The slug of the description to fetch children from. :param resource_type str: no-op; not required or used in this implementation. :return: A list of strings representing the slugs for all children of the requested description. :rtype list: """ def fetch_children(children): results = [] for child in children: results.append(child["slug"]) if "children" in child: results.extend(fetch_children(child["children"])) return results response = self._get( urljoin(self.base_url, "informationobjects/tree/{}".format(resource_id)) ) tree = response.json() return fetch_children(tree["children"])
python
def collection_list(self, resource_id, resource_type="collection"): """ Fetches a list of slug representing descriptions within the specified parent description. :param resource_id str: The slug of the description to fetch children from. :param resource_type str: no-op; not required or used in this implementation. :return: A list of strings representing the slugs for all children of the requested description. :rtype list: """ def fetch_children(children): results = [] for child in children: results.append(child["slug"]) if "children" in child: results.extend(fetch_children(child["children"])) return results response = self._get( urljoin(self.base_url, "informationobjects/tree/{}".format(resource_id)) ) tree = response.json() return fetch_children(tree["children"])
[ "def", "collection_list", "(", "self", ",", "resource_id", ",", "resource_type", "=", "\"collection\"", ")", ":", "def", "fetch_children", "(", "children", ")", ":", "results", "=", "[", "]", "for", "child", "in", "children", ":", "results", ".", "append", "(", "child", "[", "\"slug\"", "]", ")", "if", "\"children\"", "in", "child", ":", "results", ".", "extend", "(", "fetch_children", "(", "child", "[", "\"children\"", "]", ")", ")", "return", "results", "response", "=", "self", ".", "_get", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"informationobjects/tree/{}\"", ".", "format", "(", "resource_id", ")", ")", ")", "tree", "=", "response", ".", "json", "(", ")", "return", "fetch_children", "(", "tree", "[", "\"children\"", "]", ")" ]
Fetches a list of slug representing descriptions within the specified parent description. :param resource_id str: The slug of the description to fetch children from. :param resource_type str: no-op; not required or used in this implementation. :return: A list of strings representing the slugs for all children of the requested description. :rtype list:
[ "Fetches", "a", "list", "of", "slug", "representing", "descriptions", "within", "the", "specified", "parent", "description", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L278-L304
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.get_resource_component_and_children
def get_resource_component_and_children( self, resource_id, resource_type="collection", level=1, sort_data={}, recurse_max_level=False, sort_by=None, **kwargs ): """ Fetch detailed metadata for the specified resource_id and all of its children. :param str resource_id: The slug for which to fetch description metadata. :param str resource_type: no-op; not required or used in this implementation. :param int recurse_max_level: The maximum depth level to fetch when fetching children. Default is to fetch all of the resource's children, descending as deeply as necessary. Pass 1 to fetch no children. :return: A dict containing detailed metadata about both the requested resource and its children. Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format. :rtype dict: """ return self._get_resources( resource_id, recurse_max_level=recurse_max_level, sort_by=sort_by )
python
def get_resource_component_and_children( self, resource_id, resource_type="collection", level=1, sort_data={}, recurse_max_level=False, sort_by=None, **kwargs ): """ Fetch detailed metadata for the specified resource_id and all of its children. :param str resource_id: The slug for which to fetch description metadata. :param str resource_type: no-op; not required or used in this implementation. :param int recurse_max_level: The maximum depth level to fetch when fetching children. Default is to fetch all of the resource's children, descending as deeply as necessary. Pass 1 to fetch no children. :return: A dict containing detailed metadata about both the requested resource and its children. Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format. :rtype dict: """ return self._get_resources( resource_id, recurse_max_level=recurse_max_level, sort_by=sort_by )
[ "def", "get_resource_component_and_children", "(", "self", ",", "resource_id", ",", "resource_type", "=", "\"collection\"", ",", "level", "=", "1", ",", "sort_data", "=", "{", "}", ",", "recurse_max_level", "=", "False", ",", "sort_by", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_resources", "(", "resource_id", ",", "recurse_max_level", "=", "recurse_max_level", ",", "sort_by", "=", "sort_by", ")" ]
Fetch detailed metadata for the specified resource_id and all of its children. :param str resource_id: The slug for which to fetch description metadata. :param str resource_type: no-op; not required or used in this implementation. :param int recurse_max_level: The maximum depth level to fetch when fetching children. Default is to fetch all of the resource's children, descending as deeply as necessary. Pass 1 to fetch no children. :return: A dict containing detailed metadata about both the requested resource and its children. Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format. :rtype dict:
[ "Fetch", "detailed", "metadata", "for", "the", "specified", "resource_id", "and", "all", "of", "its", "children", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L370-L395
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.find_parent_id_for_component
def find_parent_id_for_component(self, slug): """ Given the slug of a description, returns the parent description's slug. :param string slug: The slug of a description. :return: The URL of the parent record. :rtype: string """ response = self.get_record(slug) if "parent" in response: return response["parent"] # resource was passed in, which has no higher-up record; # return the same ID else: return slug
python
def find_parent_id_for_component(self, slug): """ Given the slug of a description, returns the parent description's slug. :param string slug: The slug of a description. :return: The URL of the parent record. :rtype: string """ response = self.get_record(slug) if "parent" in response: return response["parent"] # resource was passed in, which has no higher-up record; # return the same ID else: return slug
[ "def", "find_parent_id_for_component", "(", "self", ",", "slug", ")", ":", "response", "=", "self", ".", "get_record", "(", "slug", ")", "if", "\"parent\"", "in", "response", ":", "return", "response", "[", "\"parent\"", "]", "# resource was passed in, which has no higher-up record;", "# return the same ID", "else", ":", "return", "slug" ]
Given the slug of a description, returns the parent description's slug. :param string slug: The slug of a description. :return: The URL of the parent record. :rtype: string
[ "Given", "the", "slug", "of", "a", "description", "returns", "the", "parent", "description", "s", "slug", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L437-L452
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.find_collection_ids
def find_collection_ids(self, search_pattern="", identifier="", fetched=0, page=1): """ Fetches a list of resource URLs for every top-level description in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list: """ response = self._collections_search_request(search_pattern, identifier, page) hits = response.json() results = [r["slug"] for r in hits["results"]] results_so_far = fetched + len(results) if hits["total"] > results_so_far: results.extend( self.find_collection_ids(fetched=results_so_far, page=page + 1) ) return results
python
def find_collection_ids(self, search_pattern="", identifier="", fetched=0, page=1): """ Fetches a list of resource URLs for every top-level description in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list: """ response = self._collections_search_request(search_pattern, identifier, page) hits = response.json() results = [r["slug"] for r in hits["results"]] results_so_far = fetched + len(results) if hits["total"] > results_so_far: results.extend( self.find_collection_ids(fetched=results_so_far, page=page + 1) ) return results
[ "def", "find_collection_ids", "(", "self", ",", "search_pattern", "=", "\"\"", ",", "identifier", "=", "\"\"", ",", "fetched", "=", "0", ",", "page", "=", "1", ")", ":", "response", "=", "self", ".", "_collections_search_request", "(", "search_pattern", ",", "identifier", ",", "page", ")", "hits", "=", "response", ".", "json", "(", ")", "results", "=", "[", "r", "[", "\"slug\"", "]", "for", "r", "in", "hits", "[", "\"results\"", "]", "]", "results_so_far", "=", "fetched", "+", "len", "(", "results", ")", "if", "hits", "[", "\"total\"", "]", ">", "results_so_far", ":", "results", ".", "extend", "(", "self", ".", "find_collection_ids", "(", "fetched", "=", "results_so_far", ",", "page", "=", "page", "+", "1", ")", ")", "return", "results" ]
Fetches a list of resource URLs for every top-level description in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list:
[ "Fetches", "a", "list", "of", "resource", "URLs", "for", "every", "top", "-", "level", "description", "in", "the", "database", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L454-L479
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient._collections_search_request
def _collections_search_request( self, search_pattern="", identifier="", page=1, page_size=50, sort_by=None ): """ Fetches a list of resource URLs for every top-level description in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list: """ skip = (page - 1) * page_size params = { "limit": page_size, "skip": skip, "topLod": "1", "sf0": "_all", "sq0": "", } if search_pattern: params["sq0"] = '"' + self._escape_lucene_query(search_pattern) + '"' if identifier != "": params["sf1"] = "identifier" params["sq1"] = self._escape_lucene_query(identifier) if sort_by is not None: params["sort"] = "alphabetic" if sort_by == "desc": params["reverse"] = True return self._get(urljoin(self.base_url, "informationobjects"), params=params)
python
def _collections_search_request( self, search_pattern="", identifier="", page=1, page_size=50, sort_by=None ): """ Fetches a list of resource URLs for every top-level description in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list: """ skip = (page - 1) * page_size params = { "limit": page_size, "skip": skip, "topLod": "1", "sf0": "_all", "sq0": "", } if search_pattern: params["sq0"] = '"' + self._escape_lucene_query(search_pattern) + '"' if identifier != "": params["sf1"] = "identifier" params["sq1"] = self._escape_lucene_query(identifier) if sort_by is not None: params["sort"] = "alphabetic" if sort_by == "desc": params["reverse"] = True return self._get(urljoin(self.base_url, "informationobjects"), params=params)
[ "def", "_collections_search_request", "(", "self", ",", "search_pattern", "=", "\"\"", ",", "identifier", "=", "\"\"", ",", "page", "=", "1", ",", "page_size", "=", "50", ",", "sort_by", "=", "None", ")", ":", "skip", "=", "(", "page", "-", "1", ")", "*", "page_size", "params", "=", "{", "\"limit\"", ":", "page_size", ",", "\"skip\"", ":", "skip", ",", "\"topLod\"", ":", "\"1\"", ",", "\"sf0\"", ":", "\"_all\"", ",", "\"sq0\"", ":", "\"\"", ",", "}", "if", "search_pattern", ":", "params", "[", "\"sq0\"", "]", "=", "'\"'", "+", "self", ".", "_escape_lucene_query", "(", "search_pattern", ")", "+", "'\"'", "if", "identifier", "!=", "\"\"", ":", "params", "[", "\"sf1\"", "]", "=", "\"identifier\"", "params", "[", "\"sq1\"", "]", "=", "self", ".", "_escape_lucene_query", "(", "identifier", ")", "if", "sort_by", "is", "not", "None", ":", "params", "[", "\"sort\"", "]", "=", "\"alphabetic\"", "if", "sort_by", "==", "\"desc\"", ":", "params", "[", "\"reverse\"", "]", "=", "True", "return", "self", ".", "_get", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"informationobjects\"", ")", ",", "params", "=", "params", ")" ]
Fetches a list of resource URLs for every top-level description in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list:
[ "Fetches", "a", "list", "of", "resource", "URLs", "for", "every", "top", "-", "level", "description", "in", "the", "database", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L481-L520
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.find_collections
def find_collections( self, search_pattern="", identifier="", fetched=0, page=1, page_size=30, sort_by=None, ): """ Fetches a list of all resource IDs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title or resourceid containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Restrict records to only those with this identifier. This refers to the human-assigned record identifier, not the automatically generated internal ID. This value can contain wildcards. :return: A list containing every matched resource's ID. :rtype: list """ def format_record(record): # Get record details full_record = self.get_record(record["slug"]) dates = self._fetch_dates_from_record(record) date_expression = self._fetch_date_expression_from_record(record) # Determine whether descendents exist url = urljoin( self.base_url, "informationobjects/tree/{}".format(record["slug"]) ) tree = self._get(url).json() if "children" in tree: has_children = len(self._get(url).json()["children"]) > 0 else: has_children = False formatted = { "id": record["slug"], "type": "resource", "sortPosition": 1, "identifier": record.get("reference_code", ""), "title": record.get("title", ""), "dates": dates, "date_expression": date_expression, "children": [] if has_children else False, "has_children": has_children, "notes": full_record.get("notes", []), } if "level_of_description" in record: formatted["levelOfDescription"] = record["level_of_description"] return formatted response = self._collections_search_request( search_pattern, identifier, page, page_size, sort_by ) hits = response.json() return [format_record(r) for r in hits["results"]]
python
def find_collections( self, search_pattern="", identifier="", fetched=0, page=1, page_size=30, sort_by=None, ): """ Fetches a list of all resource IDs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title or resourceid containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Restrict records to only those with this identifier. This refers to the human-assigned record identifier, not the automatically generated internal ID. This value can contain wildcards. :return: A list containing every matched resource's ID. :rtype: list """ def format_record(record): # Get record details full_record = self.get_record(record["slug"]) dates = self._fetch_dates_from_record(record) date_expression = self._fetch_date_expression_from_record(record) # Determine whether descendents exist url = urljoin( self.base_url, "informationobjects/tree/{}".format(record["slug"]) ) tree = self._get(url).json() if "children" in tree: has_children = len(self._get(url).json()["children"]) > 0 else: has_children = False formatted = { "id": record["slug"], "type": "resource", "sortPosition": 1, "identifier": record.get("reference_code", ""), "title": record.get("title", ""), "dates": dates, "date_expression": date_expression, "children": [] if has_children else False, "has_children": has_children, "notes": full_record.get("notes", []), } if "level_of_description" in record: formatted["levelOfDescription"] = record["level_of_description"] return formatted response = self._collections_search_request( search_pattern, identifier, page, page_size, sort_by ) hits = response.json() return [format_record(r) for r in hits["results"]]
[ "def", "find_collections", "(", "self", ",", "search_pattern", "=", "\"\"", ",", "identifier", "=", "\"\"", ",", "fetched", "=", "0", ",", "page", "=", "1", ",", "page_size", "=", "30", ",", "sort_by", "=", "None", ",", ")", ":", "def", "format_record", "(", "record", ")", ":", "# Get record details", "full_record", "=", "self", ".", "get_record", "(", "record", "[", "\"slug\"", "]", ")", "dates", "=", "self", ".", "_fetch_dates_from_record", "(", "record", ")", "date_expression", "=", "self", ".", "_fetch_date_expression_from_record", "(", "record", ")", "# Determine whether descendents exist", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "\"informationobjects/tree/{}\"", ".", "format", "(", "record", "[", "\"slug\"", "]", ")", ")", "tree", "=", "self", ".", "_get", "(", "url", ")", ".", "json", "(", ")", "if", "\"children\"", "in", "tree", ":", "has_children", "=", "len", "(", "self", ".", "_get", "(", "url", ")", ".", "json", "(", ")", "[", "\"children\"", "]", ")", ">", "0", "else", ":", "has_children", "=", "False", "formatted", "=", "{", "\"id\"", ":", "record", "[", "\"slug\"", "]", ",", "\"type\"", ":", "\"resource\"", ",", "\"sortPosition\"", ":", "1", ",", "\"identifier\"", ":", "record", ".", "get", "(", "\"reference_code\"", ",", "\"\"", ")", ",", "\"title\"", ":", "record", ".", "get", "(", "\"title\"", ",", "\"\"", ")", ",", "\"dates\"", ":", "dates", ",", "\"date_expression\"", ":", "date_expression", ",", "\"children\"", ":", "[", "]", "if", "has_children", "else", "False", ",", "\"has_children\"", ":", "has_children", ",", "\"notes\"", ":", "full_record", ".", "get", "(", "\"notes\"", ",", "[", "]", ")", ",", "}", "if", "\"level_of_description\"", "in", "record", ":", "formatted", "[", "\"levelOfDescription\"", "]", "=", "record", "[", "\"level_of_description\"", "]", "return", "formatted", "response", "=", "self", ".", "_collections_search_request", "(", "search_pattern", ",", "identifier", ",", "page", ",", "page_size", ",", "sort_by", ")", "hits", "=", "response", ".", "json", "(", ")", "return", "[", "format_record", "(", "r", ")", "for", "r", "in", "hits", "[", "\"results\"", "]", "]" ]
Fetches a list of all resource IDs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title or resourceid containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Restrict records to only those with this identifier. This refers to the human-assigned record identifier, not the automatically generated internal ID. This value can contain wildcards. :return: A list containing every matched resource's ID. :rtype: list
[ "Fetches", "a", "list", "of", "all", "resource", "IDs", "for", "every", "resource", "in", "the", "database", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L526-L588
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.augment_resource_ids
def augment_resource_ids(self, resource_ids): """ Given a list of resource IDs, returns a list of dicts containing detailed information about the specified resources and their children. This function recurses to a maximum of two levels when fetching children from the specified resources. Consult the documentation of ArchivistsToolkitClient.get_resource_component_children for the format of the returned dicts. :param list resource_ids: A list of one or more resource IDs. :return: A list containing metadata dicts. :rtype list: """ resources_augmented = [] for id in resource_ids: # resource_data = self.get_resource_component_and_children(id, recurse_max_level=2) # resources_augmented.append(resource_data) resources_augmented.append( self.get_resource_component_and_children(id, recurse_max_level=2) ) return resources_augmented
python
def augment_resource_ids(self, resource_ids): """ Given a list of resource IDs, returns a list of dicts containing detailed information about the specified resources and their children. This function recurses to a maximum of two levels when fetching children from the specified resources. Consult the documentation of ArchivistsToolkitClient.get_resource_component_children for the format of the returned dicts. :param list resource_ids: A list of one or more resource IDs. :return: A list containing metadata dicts. :rtype list: """ resources_augmented = [] for id in resource_ids: # resource_data = self.get_resource_component_and_children(id, recurse_max_level=2) # resources_augmented.append(resource_data) resources_augmented.append( self.get_resource_component_and_children(id, recurse_max_level=2) ) return resources_augmented
[ "def", "augment_resource_ids", "(", "self", ",", "resource_ids", ")", ":", "resources_augmented", "=", "[", "]", "for", "id", "in", "resource_ids", ":", "# resource_data = self.get_resource_component_and_children(id, recurse_max_level=2)", "# resources_augmented.append(resource_data)", "resources_augmented", ".", "append", "(", "self", ".", "get_resource_component_and_children", "(", "id", ",", "recurse_max_level", "=", "2", ")", ")", "return", "resources_augmented" ]
Given a list of resource IDs, returns a list of dicts containing detailed information about the specified resources and their children. This function recurses to a maximum of two levels when fetching children from the specified resources. Consult the documentation of ArchivistsToolkitClient.get_resource_component_children for the format of the returned dicts. :param list resource_ids: A list of one or more resource IDs. :return: A list containing metadata dicts. :rtype list:
[ "Given", "a", "list", "of", "resource", "IDs", "returns", "a", "list", "of", "dicts", "containing", "detailed", "information", "about", "the", "specified", "resources", "and", "their", "children", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L594-L613
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.add_digital_object
def add_digital_object( self, information_object_slug, identifier=None, title=None, uri=None, location_of_originals=None, object_type=None, xlink_show="embed", xlink_actuate="onLoad", restricted=False, use_statement="", use_conditions=None, access_conditions=None, size=None, format_name=None, format_version=None, format_registry_key=None, format_registry_name=None, file_uuid=None, aip_uuid=None, inherit_dates=False, usage=None, ): """ Creates a new digital object. """ new_object = {"information_object_slug": information_object_slug} if title is not None: new_object["name"] = title if uri is not None: new_object["uri"] = uri if size is not None: new_object["byte_size"] = size if object_type is not None: new_object["media_type"] = object_type if usage is not None: new_object["usage"] = usage if file_uuid is not None: new_object["file_uuid"] = file_uuid if aip_uuid is not None: new_object["aip_uuid"] = aip_uuid if format_name is not None: new_object["format_name"] = format_name if format_version is not None: new_object["format_version"] = format_version if format_registry_key is not None: new_object["format_registry_key"] = format_registry_key if format_registry_name is not None: new_object["format_registry_name"] = format_registry_name new_object["slug"] = self._post( urljoin(self.base_url, "digitalobjects"), data=json.dumps(new_object), expected_response=201, ).json()["slug"] return new_object
python
def add_digital_object( self, information_object_slug, identifier=None, title=None, uri=None, location_of_originals=None, object_type=None, xlink_show="embed", xlink_actuate="onLoad", restricted=False, use_statement="", use_conditions=None, access_conditions=None, size=None, format_name=None, format_version=None, format_registry_key=None, format_registry_name=None, file_uuid=None, aip_uuid=None, inherit_dates=False, usage=None, ): """ Creates a new digital object. """ new_object = {"information_object_slug": information_object_slug} if title is not None: new_object["name"] = title if uri is not None: new_object["uri"] = uri if size is not None: new_object["byte_size"] = size if object_type is not None: new_object["media_type"] = object_type if usage is not None: new_object["usage"] = usage if file_uuid is not None: new_object["file_uuid"] = file_uuid if aip_uuid is not None: new_object["aip_uuid"] = aip_uuid if format_name is not None: new_object["format_name"] = format_name if format_version is not None: new_object["format_version"] = format_version if format_registry_key is not None: new_object["format_registry_key"] = format_registry_key if format_registry_name is not None: new_object["format_registry_name"] = format_registry_name new_object["slug"] = self._post( urljoin(self.base_url, "digitalobjects"), data=json.dumps(new_object), expected_response=201, ).json()["slug"] return new_object
[ "def", "add_digital_object", "(", "self", ",", "information_object_slug", ",", "identifier", "=", "None", ",", "title", "=", "None", ",", "uri", "=", "None", ",", "location_of_originals", "=", "None", ",", "object_type", "=", "None", ",", "xlink_show", "=", "\"embed\"", ",", "xlink_actuate", "=", "\"onLoad\"", ",", "restricted", "=", "False", ",", "use_statement", "=", "\"\"", ",", "use_conditions", "=", "None", ",", "access_conditions", "=", "None", ",", "size", "=", "None", ",", "format_name", "=", "None", ",", "format_version", "=", "None", ",", "format_registry_key", "=", "None", ",", "format_registry_name", "=", "None", ",", "file_uuid", "=", "None", ",", "aip_uuid", "=", "None", ",", "inherit_dates", "=", "False", ",", "usage", "=", "None", ",", ")", ":", "new_object", "=", "{", "\"information_object_slug\"", ":", "information_object_slug", "}", "if", "title", "is", "not", "None", ":", "new_object", "[", "\"name\"", "]", "=", "title", "if", "uri", "is", "not", "None", ":", "new_object", "[", "\"uri\"", "]", "=", "uri", "if", "size", "is", "not", "None", ":", "new_object", "[", "\"byte_size\"", "]", "=", "size", "if", "object_type", "is", "not", "None", ":", "new_object", "[", "\"media_type\"", "]", "=", "object_type", "if", "usage", "is", "not", "None", ":", "new_object", "[", "\"usage\"", "]", "=", "usage", "if", "file_uuid", "is", "not", "None", ":", "new_object", "[", "\"file_uuid\"", "]", "=", "file_uuid", "if", "aip_uuid", "is", "not", "None", ":", "new_object", "[", "\"aip_uuid\"", "]", "=", "aip_uuid", "if", "format_name", "is", "not", "None", ":", "new_object", "[", "\"format_name\"", "]", "=", "format_name", "if", "format_version", "is", "not", "None", ":", "new_object", "[", "\"format_version\"", "]", "=", "format_version", "if", "format_registry_key", "is", "not", "None", ":", "new_object", "[", "\"format_registry_key\"", "]", "=", "format_registry_key", "if", "format_registry_name", "is", "not", "None", ":", "new_object", "[", "\"format_registry_name\"", "]", "=", "format_registry_name", "new_object", "[", "\"slug\"", "]", "=", "self", ".", "_post", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"digitalobjects\"", ")", ",", "data", "=", "json", ".", "dumps", "(", "new_object", ")", ",", "expected_response", "=", "201", ",", ")", ".", "json", "(", ")", "[", "\"slug\"", "]", "return", "new_object" ]
Creates a new digital object.
[ "Creates", "a", "new", "digital", "object", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L615-L678
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.add_child
def add_child( self, parent_slug=None, title="", level="", start_date=None, end_date=None, date_expression=None, notes=[], ): """ Adds a new resource component parented within `parent`. :param str parent_slug: The parent's slug. :param str title: A title for the record. :param str level: The level of description. :return: The ID of the newly-created record. """ new_object = {"title": title, "level_of_description": level} if parent_slug is not None: new_object["parent_slug"] = parent_slug # Optionally add date specification new_date = {} if start_date is not None: new_date["start_date"] = start_date if end_date is not None: new_date["end_date"] = end_date if date_expression is not None: new_date["date"] = date_expression if new_date != {}: new_object["dates"] = [new_date] # Optionally add notes new_object["notes"] = [] for note in notes: note_type = note.get("type", "General note") # If there is a note, but it's an empty string, skip this; content = note.get("content") if not content: continue new_note = {"content": content, "type": note_type} new_object["notes"].append(new_note) return self._post( urljoin(self.base_url, "informationobjects"), data=json.dumps(new_object), expected_response=201, ).json()["slug"]
python
def add_child( self, parent_slug=None, title="", level="", start_date=None, end_date=None, date_expression=None, notes=[], ): """ Adds a new resource component parented within `parent`. :param str parent_slug: The parent's slug. :param str title: A title for the record. :param str level: The level of description. :return: The ID of the newly-created record. """ new_object = {"title": title, "level_of_description": level} if parent_slug is not None: new_object["parent_slug"] = parent_slug # Optionally add date specification new_date = {} if start_date is not None: new_date["start_date"] = start_date if end_date is not None: new_date["end_date"] = end_date if date_expression is not None: new_date["date"] = date_expression if new_date != {}: new_object["dates"] = [new_date] # Optionally add notes new_object["notes"] = [] for note in notes: note_type = note.get("type", "General note") # If there is a note, but it's an empty string, skip this; content = note.get("content") if not content: continue new_note = {"content": content, "type": note_type} new_object["notes"].append(new_note) return self._post( urljoin(self.base_url, "informationobjects"), data=json.dumps(new_object), expected_response=201, ).json()["slug"]
[ "def", "add_child", "(", "self", ",", "parent_slug", "=", "None", ",", "title", "=", "\"\"", ",", "level", "=", "\"\"", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "date_expression", "=", "None", ",", "notes", "=", "[", "]", ",", ")", ":", "new_object", "=", "{", "\"title\"", ":", "title", ",", "\"level_of_description\"", ":", "level", "}", "if", "parent_slug", "is", "not", "None", ":", "new_object", "[", "\"parent_slug\"", "]", "=", "parent_slug", "# Optionally add date specification", "new_date", "=", "{", "}", "if", "start_date", "is", "not", "None", ":", "new_date", "[", "\"start_date\"", "]", "=", "start_date", "if", "end_date", "is", "not", "None", ":", "new_date", "[", "\"end_date\"", "]", "=", "end_date", "if", "date_expression", "is", "not", "None", ":", "new_date", "[", "\"date\"", "]", "=", "date_expression", "if", "new_date", "!=", "{", "}", ":", "new_object", "[", "\"dates\"", "]", "=", "[", "new_date", "]", "# Optionally add notes", "new_object", "[", "\"notes\"", "]", "=", "[", "]", "for", "note", "in", "notes", ":", "note_type", "=", "note", ".", "get", "(", "\"type\"", ",", "\"General note\"", ")", "# If there is a note, but it's an empty string, skip this;", "content", "=", "note", ".", "get", "(", "\"content\"", ")", "if", "not", "content", ":", "continue", "new_note", "=", "{", "\"content\"", ":", "content", ",", "\"type\"", ":", "note_type", "}", "new_object", "[", "\"notes\"", "]", ".", "append", "(", "new_note", ")", "return", "self", ".", "_post", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"informationobjects\"", ")", ",", "data", "=", "json", ".", "dumps", "(", "new_object", ")", ",", "expected_response", "=", "201", ",", ")", ".", "json", "(", ")", "[", "\"slug\"", "]" ]
Adds a new resource component parented within `parent`. :param str parent_slug: The parent's slug. :param str title: A title for the record. :param str level: The level of description. :return: The ID of the newly-created record.
[ "Adds", "a", "new", "resource", "component", "parented", "within", "parent", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L692-L747
artefactual-labs/agentarchives
agentarchives/atom/client.py
AtomClient.delete_record
def delete_record(self, record_id): """ Delete a record with record_id. """ self._delete( urljoin(self.base_url, "informationobjects/{}".format(record_id)), expected_response=204, ) return {"status": "Deleted"}
python
def delete_record(self, record_id): """ Delete a record with record_id. """ self._delete( urljoin(self.base_url, "informationobjects/{}".format(record_id)), expected_response=204, ) return {"status": "Deleted"}
[ "def", "delete_record", "(", "self", ",", "record_id", ")", ":", "self", ".", "_delete", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"informationobjects/{}\"", ".", "format", "(", "record_id", ")", ")", ",", "expected_response", "=", "204", ",", ")", "return", "{", "\"status\"", ":", "\"Deleted\"", "}" ]
Delete a record with record_id.
[ "Delete", "a", "record", "with", "record_id", "." ]
train
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L749-L757
moonso/extract_vcf
extract_vcf/config_parser.py
ConfigParser.get_string_dict
def get_string_dict(self, plugin_info): """ Convert a section with information of priorities to a string dict. To avoid typos we make all letters lower case when comparing Arguments: plugin_info (dict): A dictionary with plugin information Return: string_dict (dict): A dictionary with strings as keys and integer that specifies their priorities as values """ string_info = [] string_dict = {} for key in plugin_info: try: string_info.append(dict(plugin_info[key])) except ValueError: pass string_rules = {} for raw_info in string_info: try: string = raw_info['string'] except KeyError: raise ValidateError("String information has to have a 'string'") try: priority = raw_info['priority'] except KeyError: raise ValidateError("String information has to have a 'priority'") try: priority = int(priority) except ValueError: raise ValidateError("'priority' has to be an integer") string_dict[string] = priority if len(string_dict) == 0: raise ValidateError("'string' entrys must have string rules defined") return string_dict
python
def get_string_dict(self, plugin_info): """ Convert a section with information of priorities to a string dict. To avoid typos we make all letters lower case when comparing Arguments: plugin_info (dict): A dictionary with plugin information Return: string_dict (dict): A dictionary with strings as keys and integer that specifies their priorities as values """ string_info = [] string_dict = {} for key in plugin_info: try: string_info.append(dict(plugin_info[key])) except ValueError: pass string_rules = {} for raw_info in string_info: try: string = raw_info['string'] except KeyError: raise ValidateError("String information has to have a 'string'") try: priority = raw_info['priority'] except KeyError: raise ValidateError("String information has to have a 'priority'") try: priority = int(priority) except ValueError: raise ValidateError("'priority' has to be an integer") string_dict[string] = priority if len(string_dict) == 0: raise ValidateError("'string' entrys must have string rules defined") return string_dict
[ "def", "get_string_dict", "(", "self", ",", "plugin_info", ")", ":", "string_info", "=", "[", "]", "string_dict", "=", "{", "}", "for", "key", "in", "plugin_info", ":", "try", ":", "string_info", ".", "append", "(", "dict", "(", "plugin_info", "[", "key", "]", ")", ")", "except", "ValueError", ":", "pass", "string_rules", "=", "{", "}", "for", "raw_info", "in", "string_info", ":", "try", ":", "string", "=", "raw_info", "[", "'string'", "]", "except", "KeyError", ":", "raise", "ValidateError", "(", "\"String information has to have a 'string'\"", ")", "try", ":", "priority", "=", "raw_info", "[", "'priority'", "]", "except", "KeyError", ":", "raise", "ValidateError", "(", "\"String information has to have a 'priority'\"", ")", "try", ":", "priority", "=", "int", "(", "priority", ")", "except", "ValueError", ":", "raise", "ValidateError", "(", "\"'priority' has to be an integer\"", ")", "string_dict", "[", "string", "]", "=", "priority", "if", "len", "(", "string_dict", ")", "==", "0", ":", "raise", "ValidateError", "(", "\"'string' entrys must have string rules defined\"", ")", "return", "string_dict" ]
Convert a section with information of priorities to a string dict. To avoid typos we make all letters lower case when comparing Arguments: plugin_info (dict): A dictionary with plugin information Return: string_dict (dict): A dictionary with strings as keys and integer that specifies their priorities as values
[ "Convert", "a", "section", "with", "information", "of", "priorities", "to", "a", "string", "dict", ".", "To", "avoid", "typos", "we", "make", "all", "letters", "lower", "case", "when", "comparing", "Arguments", ":", "plugin_info", "(", "dict", ")", ":", "A", "dictionary", "with", "plugin", "information", "Return", ":", "string_dict", "(", "dict", ")", ":", "A", "dictionary", "with", "strings", "as", "keys", "and", "integer", "that", "specifies", "their", "priorities", "as", "values" ]
train
https://github.com/moonso/extract_vcf/blob/c8381b362fa6734cd2ee65ef260738868d981aaf/extract_vcf/config_parser.py#L128-L171
moonso/extract_vcf
extract_vcf/config_parser.py
ConfigParser.version_check
def version_check(self): """ Check if the version entry is in the proper format """ try: version_info = self['Version'] except KeyError: raise ValidateError('Config file has to have a Version section') try: float(version_info['version']) except KeyError: raise ValidateError('Config file has to have a version section') except ValueError: raise ValidateError('Version has to be a float.') try: version_info['name'] except KeyError: raise ValidateError("Config file has to have a name") return
python
def version_check(self): """ Check if the version entry is in the proper format """ try: version_info = self['Version'] except KeyError: raise ValidateError('Config file has to have a Version section') try: float(version_info['version']) except KeyError: raise ValidateError('Config file has to have a version section') except ValueError: raise ValidateError('Version has to be a float.') try: version_info['name'] except KeyError: raise ValidateError("Config file has to have a name") return
[ "def", "version_check", "(", "self", ")", ":", "try", ":", "version_info", "=", "self", "[", "'Version'", "]", "except", "KeyError", ":", "raise", "ValidateError", "(", "'Config file has to have a Version section'", ")", "try", ":", "float", "(", "version_info", "[", "'version'", "]", ")", "except", "KeyError", ":", "raise", "ValidateError", "(", "'Config file has to have a version section'", ")", "except", "ValueError", ":", "raise", "ValidateError", "(", "'Version has to be a float.'", ")", "try", ":", "version_info", "[", "'name'", "]", "except", "KeyError", ":", "raise", "ValidateError", "(", "\"Config file has to have a name\"", ")", "return" ]
Check if the version entry is in the proper format
[ "Check", "if", "the", "version", "entry", "is", "in", "the", "proper", "format" ]
train
https://github.com/moonso/extract_vcf/blob/c8381b362fa6734cd2ee65ef260738868d981aaf/extract_vcf/config_parser.py#L175-L194
moonso/extract_vcf
extract_vcf/config_parser.py
ConfigParser.check_plugin
def check_plugin(self, plugin): """ Check if the section is in the proper format vcf format. Args: vcf_section (dict): The information from a vcf section Returns: True is it is in the proper format """ vcf_section = self[plugin] try: vcf_field = vcf_section['field'] if not vcf_field in self.vcf_columns: raise ValidateError( "field has to be in {0}\n" "Wrong field name in plugin: {1}".format( self.vcf_columns, plugin )) if vcf_field == 'INFO': try: info_key = vcf_section['info_key'] if info_key == 'CSQ': try: csq_key = vcf_section['csq_key'] except KeyError: raise ValidateError( "CSQ entrys has to refer to an csq field.\n" "Refer with keyword 'csq_key'\n" "csq_key is missing in section: {0}".format( plugin ) ) except KeyError: raise ValidateError( "INFO entrys has to refer to an INFO field.\n" "Refer with keyword 'info_key'\n" "info_key is missing in section: {0}".format( plugin ) ) except KeyError: raise ValidateError( "Vcf entrys have to refer to a field in the VCF with keyword" " 'field'.\nMissing keyword 'field' in plugin: {0}".format( plugin )) try: data_type = vcf_section['data_type'] if not data_type in self.data_types: raise ValidateError( "data_type has to be in {0}\n" "Wrong data_type in plugin: {1}".format( self.data_types, plugin) ) except KeyError: raise ValidateError( "Vcf entrys have to refer to a data type in the VCF with " "keyword 'data_type'.\n" "Missing data_type in plugin: {0}".format(plugin) ) separators = vcf_section.get('separators', None) if separators: if len(separators) == 1: self[plugin]['separators'] = list(separators) else: if data_type != 'flag': raise ValidateError( "If data_type != flag the separators have to be defined" "Missing separators in plugin: {0}".format(plugin) ) record_rule = vcf_section.get('record_rule', None) if record_rule: if not record_rule in ['min', 'max']: raise ValidateError( "Record rules have to be in {0}\n" "Wrong record_rule in plugin: {1}".format( ['min', 'max'], plugin) ) else: self.logger.info("Setting record rule to default: 'max'") return True
python
def check_plugin(self, plugin): """ Check if the section is in the proper format vcf format. Args: vcf_section (dict): The information from a vcf section Returns: True is it is in the proper format """ vcf_section = self[plugin] try: vcf_field = vcf_section['field'] if not vcf_field in self.vcf_columns: raise ValidateError( "field has to be in {0}\n" "Wrong field name in plugin: {1}".format( self.vcf_columns, plugin )) if vcf_field == 'INFO': try: info_key = vcf_section['info_key'] if info_key == 'CSQ': try: csq_key = vcf_section['csq_key'] except KeyError: raise ValidateError( "CSQ entrys has to refer to an csq field.\n" "Refer with keyword 'csq_key'\n" "csq_key is missing in section: {0}".format( plugin ) ) except KeyError: raise ValidateError( "INFO entrys has to refer to an INFO field.\n" "Refer with keyword 'info_key'\n" "info_key is missing in section: {0}".format( plugin ) ) except KeyError: raise ValidateError( "Vcf entrys have to refer to a field in the VCF with keyword" " 'field'.\nMissing keyword 'field' in plugin: {0}".format( plugin )) try: data_type = vcf_section['data_type'] if not data_type in self.data_types: raise ValidateError( "data_type has to be in {0}\n" "Wrong data_type in plugin: {1}".format( self.data_types, plugin) ) except KeyError: raise ValidateError( "Vcf entrys have to refer to a data type in the VCF with " "keyword 'data_type'.\n" "Missing data_type in plugin: {0}".format(plugin) ) separators = vcf_section.get('separators', None) if separators: if len(separators) == 1: self[plugin]['separators'] = list(separators) else: if data_type != 'flag': raise ValidateError( "If data_type != flag the separators have to be defined" "Missing separators in plugin: {0}".format(plugin) ) record_rule = vcf_section.get('record_rule', None) if record_rule: if not record_rule in ['min', 'max']: raise ValidateError( "Record rules have to be in {0}\n" "Wrong record_rule in plugin: {1}".format( ['min', 'max'], plugin) ) else: self.logger.info("Setting record rule to default: 'max'") return True
[ "def", "check_plugin", "(", "self", ",", "plugin", ")", ":", "vcf_section", "=", "self", "[", "plugin", "]", "try", ":", "vcf_field", "=", "vcf_section", "[", "'field'", "]", "if", "not", "vcf_field", "in", "self", ".", "vcf_columns", ":", "raise", "ValidateError", "(", "\"field has to be in {0}\\n\"", "\"Wrong field name in plugin: {1}\"", ".", "format", "(", "self", ".", "vcf_columns", ",", "plugin", ")", ")", "if", "vcf_field", "==", "'INFO'", ":", "try", ":", "info_key", "=", "vcf_section", "[", "'info_key'", "]", "if", "info_key", "==", "'CSQ'", ":", "try", ":", "csq_key", "=", "vcf_section", "[", "'csq_key'", "]", "except", "KeyError", ":", "raise", "ValidateError", "(", "\"CSQ entrys has to refer to an csq field.\\n\"", "\"Refer with keyword 'csq_key'\\n\"", "\"csq_key is missing in section: {0}\"", ".", "format", "(", "plugin", ")", ")", "except", "KeyError", ":", "raise", "ValidateError", "(", "\"INFO entrys has to refer to an INFO field.\\n\"", "\"Refer with keyword 'info_key'\\n\"", "\"info_key is missing in section: {0}\"", ".", "format", "(", "plugin", ")", ")", "except", "KeyError", ":", "raise", "ValidateError", "(", "\"Vcf entrys have to refer to a field in the VCF with keyword\"", "\" 'field'.\\nMissing keyword 'field' in plugin: {0}\"", ".", "format", "(", "plugin", ")", ")", "try", ":", "data_type", "=", "vcf_section", "[", "'data_type'", "]", "if", "not", "data_type", "in", "self", ".", "data_types", ":", "raise", "ValidateError", "(", "\"data_type has to be in {0}\\n\"", "\"Wrong data_type in plugin: {1}\"", ".", "format", "(", "self", ".", "data_types", ",", "plugin", ")", ")", "except", "KeyError", ":", "raise", "ValidateError", "(", "\"Vcf entrys have to refer to a data type in the VCF with \"", "\"keyword 'data_type'.\\n\"", "\"Missing data_type in plugin: {0}\"", ".", "format", "(", "plugin", ")", ")", "separators", "=", "vcf_section", ".", "get", "(", "'separators'", ",", "None", ")", "if", "separators", ":", "if", "len", "(", "separators", ")", "==", "1", ":", "self", "[", "plugin", "]", "[", "'separators'", "]", "=", "list", "(", "separators", ")", "else", ":", "if", "data_type", "!=", "'flag'", ":", "raise", "ValidateError", "(", "\"If data_type != flag the separators have to be defined\"", "\"Missing separators in plugin: {0}\"", ".", "format", "(", "plugin", ")", ")", "record_rule", "=", "vcf_section", ".", "get", "(", "'record_rule'", ",", "None", ")", "if", "record_rule", ":", "if", "not", "record_rule", "in", "[", "'min'", ",", "'max'", "]", ":", "raise", "ValidateError", "(", "\"Record rules have to be in {0}\\n\"", "\"Wrong record_rule in plugin: {1}\"", ".", "format", "(", "[", "'min'", ",", "'max'", "]", ",", "plugin", ")", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "\"Setting record rule to default: 'max'\"", ")", "return", "True" ]
Check if the section is in the proper format vcf format. Args: vcf_section (dict): The information from a vcf section Returns: True is it is in the proper format
[ "Check", "if", "the", "section", "is", "in", "the", "proper", "format", "vcf", "format", "." ]
train
https://github.com/moonso/extract_vcf/blob/c8381b362fa6734cd2ee65ef260738868d981aaf/extract_vcf/config_parser.py#L196-L290
Aluriak/tergraw
tergraw/graphutils.py
dict_to_nx
def dict_to_nx(graph, oriented=False): """Return an nx.Graph equivalent of given {node: succs}""" nxg = nx.DiGraph() if oriented else nx.Graph() for node, succs in graph.items(): for succ in succs: nxg.add_edge(node, succ) return nxg
python
def dict_to_nx(graph, oriented=False): """Return an nx.Graph equivalent of given {node: succs}""" nxg = nx.DiGraph() if oriented else nx.Graph() for node, succs in graph.items(): for succ in succs: nxg.add_edge(node, succ) return nxg
[ "def", "dict_to_nx", "(", "graph", ",", "oriented", "=", "False", ")", ":", "nxg", "=", "nx", ".", "DiGraph", "(", ")", "if", "oriented", "else", "nx", ".", "Graph", "(", ")", "for", "node", ",", "succs", "in", "graph", ".", "items", "(", ")", ":", "for", "succ", "in", "succs", ":", "nxg", ".", "add_edge", "(", "node", ",", "succ", ")", "return", "nxg" ]
Return an nx.Graph equivalent of given {node: succs}
[ "Return", "an", "nx", ".", "Graph", "equivalent", "of", "given", "{", "node", ":", "succs", "}" ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/graphutils.py#L12-L18
Aluriak/tergraw
tergraw/graphutils.py
process_input_graph
def process_input_graph(func): """Decorator, ensuring first argument is a networkx graph object. If the first arg is a dict {node: succs}, a networkx graph equivalent to the dict will be send in place of it.""" @wraps(func) def wrapped_func(*args, **kwargs): input_graph = args[0] if isinstance(input_graph, nx.DiGraph): return func(*args, **kwargs) else: nx_graph = dict_to_nx(args[0], oriented=True) args = [nx_graph] + list(args[1:]) return func(*args, **kwargs) return wrapped_func
python
def process_input_graph(func): """Decorator, ensuring first argument is a networkx graph object. If the first arg is a dict {node: succs}, a networkx graph equivalent to the dict will be send in place of it.""" @wraps(func) def wrapped_func(*args, **kwargs): input_graph = args[0] if isinstance(input_graph, nx.DiGraph): return func(*args, **kwargs) else: nx_graph = dict_to_nx(args[0], oriented=True) args = [nx_graph] + list(args[1:]) return func(*args, **kwargs) return wrapped_func
[ "def", "process_input_graph", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "input_graph", "=", "args", "[", "0", "]", "if", "isinstance", "(", "input_graph", ",", "nx", ".", "DiGraph", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "nx_graph", "=", "dict_to_nx", "(", "args", "[", "0", "]", ",", "oriented", "=", "True", ")", "args", "=", "[", "nx_graph", "]", "+", "list", "(", "args", "[", "1", ":", "]", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped_func" ]
Decorator, ensuring first argument is a networkx graph object. If the first arg is a dict {node: succs}, a networkx graph equivalent to the dict will be send in place of it.
[ "Decorator", "ensuring", "first", "argument", "is", "a", "networkx", "graph", "object", ".", "If", "the", "first", "arg", "is", "a", "dict", "{", "node", ":", "succs", "}", "a", "networkx", "graph", "equivalent", "to", "the", "dict", "will", "be", "send", "in", "place", "of", "it", "." ]
train
https://github.com/Aluriak/tergraw/blob/7f73cd286a77611e9c73f50b1e43be4f6643ac9f/tergraw/graphutils.py#L21-L34
davidcarboni/Flask-B3
b3/__init__.py
values
def values(): """Get the full current set of B3 values. :return: A dict containing the keys "X-B3-TraceId", "X-B3-ParentSpanId", "X-B3-SpanId", "X-B3-Sampled" and "X-B3-Flags" for the current span or subspan. NB some of the values are likely be None, but all keys will be present. """ result = {} try: # Check if there's a sub-span in progress, otherwise use the main span: span = g.get("subspan") if "subspan" in g else g for header in b3_headers: result[header] = span.get(header) except RuntimeError: # We're probably working outside the Application Context at this point, likely on startup: # https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context # We return a dict of empty values so the expected keys are present. for header in b3_headers: result[header] = None return result
python
def values(): """Get the full current set of B3 values. :return: A dict containing the keys "X-B3-TraceId", "X-B3-ParentSpanId", "X-B3-SpanId", "X-B3-Sampled" and "X-B3-Flags" for the current span or subspan. NB some of the values are likely be None, but all keys will be present. """ result = {} try: # Check if there's a sub-span in progress, otherwise use the main span: span = g.get("subspan") if "subspan" in g else g for header in b3_headers: result[header] = span.get(header) except RuntimeError: # We're probably working outside the Application Context at this point, likely on startup: # https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context # We return a dict of empty values so the expected keys are present. for header in b3_headers: result[header] = None return result
[ "def", "values", "(", ")", ":", "result", "=", "{", "}", "try", ":", "# Check if there's a sub-span in progress, otherwise use the main span:", "span", "=", "g", ".", "get", "(", "\"subspan\"", ")", "if", "\"subspan\"", "in", "g", "else", "g", "for", "header", "in", "b3_headers", ":", "result", "[", "header", "]", "=", "span", ".", "get", "(", "header", ")", "except", "RuntimeError", ":", "# We're probably working outside the Application Context at this point, likely on startup:", "# https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context", "# We return a dict of empty values so the expected keys are present.", "for", "header", "in", "b3_headers", ":", "result", "[", "header", "]", "=", "None", "return", "result" ]
Get the full current set of B3 values. :return: A dict containing the keys "X-B3-TraceId", "X-B3-ParentSpanId", "X-B3-SpanId", "X-B3-Sampled" and "X-B3-Flags" for the current span or subspan. NB some of the values are likely be None, but all keys will be present.
[ "Get", "the", "full", "current", "set", "of", "B3", "values", ".", ":", "return", ":", "A", "dict", "containing", "the", "keys", "X", "-", "B3", "-", "TraceId", "X", "-", "B3", "-", "ParentSpanId", "X", "-", "B3", "-", "SpanId", "X", "-", "B3", "-", "Sampled", "and", "X", "-", "B3", "-", "Flags", "for", "the", "current", "span", "or", "subspan", ".", "NB", "some", "of", "the", "values", "are", "likely", "be", "None", "but", "all", "keys", "will", "be", "present", "." ]
train
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L20-L39
davidcarboni/Flask-B3
b3/__init__.py
start_span
def start_span(request_headers=None): """Collects incoming B3 headers and sets up values for this request as needed. The collected/computed values are stored on the application context g using the defined http header names as keys. :param request_headers: Incoming request headers can be passed explicitly. If not passed, Flask request.headers will be used. This enables you to pass this function to Flask.before_request(). """ global debug try: headers = request_headers if request_headers else request.headers except RuntimeError: # We're probably working outside the Application Context at this point, likely on startup: # https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context # We return a dict of empty values so the expected keys are present. headers = {} trace_id = headers.get(b3_trace_id) parent_span_id = headers.get(b3_parent_span_id) span_id = headers.get(b3_span_id) sampled = headers.get(b3_sampled) flags = headers.get(b3_flags) root_span = not trace_id # Collect (or generate) a trace ID setattr(g, b3_trace_id, trace_id or _generate_identifier()) # Parent span, if present setattr(g, b3_parent_span_id, parent_span_id) # Collect (or set) the span ID setattr(g, b3_span_id, span_id or g.get(b3_trace_id)) # Collect the "sampled" flag, if present # We'll propagate the sampled value unchanged if it's set. # We're not currently recording traces to Zipkin, so if it's present, follow the standard and propagate it, # otherwise it's better to leave it out, rather than make it "0". # This allows downstream services to make a decision if they need to. setattr(g, b3_sampled, sampled) # Set or update the debug setting # We'll set it to "1" if debug=True, otherwise we'll propagate it if present. setattr(g, b3_flags, "1" if debug else flags) _info("Server receive. Starting span" if trace_id else "Root span") _log.debug("Resolved B3 values: {values}".format(values=values()))
python
def start_span(request_headers=None): """Collects incoming B3 headers and sets up values for this request as needed. The collected/computed values are stored on the application context g using the defined http header names as keys. :param request_headers: Incoming request headers can be passed explicitly. If not passed, Flask request.headers will be used. This enables you to pass this function to Flask.before_request(). """ global debug try: headers = request_headers if request_headers else request.headers except RuntimeError: # We're probably working outside the Application Context at this point, likely on startup: # https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context # We return a dict of empty values so the expected keys are present. headers = {} trace_id = headers.get(b3_trace_id) parent_span_id = headers.get(b3_parent_span_id) span_id = headers.get(b3_span_id) sampled = headers.get(b3_sampled) flags = headers.get(b3_flags) root_span = not trace_id # Collect (or generate) a trace ID setattr(g, b3_trace_id, trace_id or _generate_identifier()) # Parent span, if present setattr(g, b3_parent_span_id, parent_span_id) # Collect (or set) the span ID setattr(g, b3_span_id, span_id or g.get(b3_trace_id)) # Collect the "sampled" flag, if present # We'll propagate the sampled value unchanged if it's set. # We're not currently recording traces to Zipkin, so if it's present, follow the standard and propagate it, # otherwise it's better to leave it out, rather than make it "0". # This allows downstream services to make a decision if they need to. setattr(g, b3_sampled, sampled) # Set or update the debug setting # We'll set it to "1" if debug=True, otherwise we'll propagate it if present. setattr(g, b3_flags, "1" if debug else flags) _info("Server receive. Starting span" if trace_id else "Root span") _log.debug("Resolved B3 values: {values}".format(values=values()))
[ "def", "start_span", "(", "request_headers", "=", "None", ")", ":", "global", "debug", "try", ":", "headers", "=", "request_headers", "if", "request_headers", "else", "request", ".", "headers", "except", "RuntimeError", ":", "# We're probably working outside the Application Context at this point, likely on startup:", "# https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context", "# We return a dict of empty values so the expected keys are present.", "headers", "=", "{", "}", "trace_id", "=", "headers", ".", "get", "(", "b3_trace_id", ")", "parent_span_id", "=", "headers", ".", "get", "(", "b3_parent_span_id", ")", "span_id", "=", "headers", ".", "get", "(", "b3_span_id", ")", "sampled", "=", "headers", ".", "get", "(", "b3_sampled", ")", "flags", "=", "headers", ".", "get", "(", "b3_flags", ")", "root_span", "=", "not", "trace_id", "# Collect (or generate) a trace ID", "setattr", "(", "g", ",", "b3_trace_id", ",", "trace_id", "or", "_generate_identifier", "(", ")", ")", "# Parent span, if present", "setattr", "(", "g", ",", "b3_parent_span_id", ",", "parent_span_id", ")", "# Collect (or set) the span ID", "setattr", "(", "g", ",", "b3_span_id", ",", "span_id", "or", "g", ".", "get", "(", "b3_trace_id", ")", ")", "# Collect the \"sampled\" flag, if present", "# We'll propagate the sampled value unchanged if it's set.", "# We're not currently recording traces to Zipkin, so if it's present, follow the standard and propagate it,", "# otherwise it's better to leave it out, rather than make it \"0\".", "# This allows downstream services to make a decision if they need to.", "setattr", "(", "g", ",", "b3_sampled", ",", "sampled", ")", "# Set or update the debug setting", "# We'll set it to \"1\" if debug=True, otherwise we'll propagate it if present.", "setattr", "(", "g", ",", "b3_flags", ",", "\"1\"", "if", "debug", "else", "flags", ")", "_info", "(", "\"Server receive. Starting span\"", "if", "trace_id", "else", "\"Root span\"", ")", "_log", ".", "debug", "(", "\"Resolved B3 values: {values}\"", ".", "format", "(", "values", "=", "values", "(", ")", ")", ")" ]
Collects incoming B3 headers and sets up values for this request as needed. The collected/computed values are stored on the application context g using the defined http header names as keys. :param request_headers: Incoming request headers can be passed explicitly. If not passed, Flask request.headers will be used. This enables you to pass this function to Flask.before_request().
[ "Collects", "incoming", "B3", "headers", "and", "sets", "up", "values", "for", "this", "request", "as", "needed", ".", "The", "collected", "/", "computed", "values", "are", "stored", "on", "the", "application", "context", "g", "using", "the", "defined", "http", "header", "names", "as", "keys", ".", ":", "param", "request_headers", ":", "Incoming", "request", "headers", "can", "be", "passed", "explicitly", ".", "If", "not", "passed", "Flask", "request", ".", "headers", "will", "be", "used", ".", "This", "enables", "you", "to", "pass", "this", "function", "to", "Flask", ".", "before_request", "()", "." ]
train
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L42-L85
davidcarboni/Flask-B3
b3/__init__.py
span
def span(route): """Optional decorator for Flask routes. If you don't want to trace all routes using `Flask.before_request()' and 'Flask.after_request()' you can use this decorator as an alternative way to handle incoming B3 headers: @app.route('/instrumented') @span def instrumented(): ... ... ... NB @span needs to come after (not before) @app.route. """ @wraps(route) def route_decorator(*args, **kwargs): start_span() try: return route(*args, **kwargs) finally: end_span() return route_decorator
python
def span(route): """Optional decorator for Flask routes. If you don't want to trace all routes using `Flask.before_request()' and 'Flask.after_request()' you can use this decorator as an alternative way to handle incoming B3 headers: @app.route('/instrumented') @span def instrumented(): ... ... ... NB @span needs to come after (not before) @app.route. """ @wraps(route) def route_decorator(*args, **kwargs): start_span() try: return route(*args, **kwargs) finally: end_span() return route_decorator
[ "def", "span", "(", "route", ")", ":", "@", "wraps", "(", "route", ")", "def", "route_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "start_span", "(", ")", "try", ":", "return", "route", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "end_span", "(", ")", "return", "route_decorator" ]
Optional decorator for Flask routes. If you don't want to trace all routes using `Flask.before_request()' and 'Flask.after_request()' you can use this decorator as an alternative way to handle incoming B3 headers: @app.route('/instrumented') @span def instrumented(): ... ... ... NB @span needs to come after (not before) @app.route.
[ "Optional", "decorator", "for", "Flask", "routes", "." ]
train
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L99-L123
davidcarboni/Flask-B3
b3/__init__.py
_start_subspan
def _start_subspan(headers=None): """ Sets up a new span to contact a downstream service. This is used when making a downstream service call. It returns a dict containing the required sub-span headers. Each downstream call you make is handled as a new span, so call this every time you need to contact another service. This temporarily updates what's returned by values() to match the sub-span, so it can can also be used when calling e.g. a database that doesn't support B3. You'll still be able to record the client side of an interaction, even if the downstream server doesn't use the propagated trace information. You'll need to call end_subspan when you're done. You can do this using the `SubSpan` class: with SubSpan([headers]) as headers_b3: ... log.debug("Client start: calling downstream service") ... requests.get(<downstream service>, headers=headers_b3) ... log.debug("Client receive: downstream service responded") For the specification, see: https://github.com/openzipkin/b3-propagation :param headers: The headers dict. Headers will be added to this as needed. :return: A dict containing header values for a downstream request. This can be passed directly to e.g. requests.get(...). """ b3 = values() g.subspan = { # Propagate the trace ID b3_trace_id: b3[b3_trace_id], # Start a new span for the outgoing request b3_span_id: _generate_identifier(), # Set the current span as the parent span b3_parent_span_id: b3[b3_span_id], b3_sampled: b3[b3_sampled], b3_flags: b3[b3_flags], } # Set up headers # NB dict() ensures we don't alter the value passed in. Maybe that's too conservative? result = dict(headers or {}) result.update({ b3_trace_id: g.subspan[b3_trace_id], b3_span_id: g.subspan[b3_span_id], b3_parent_span_id: g.subspan[b3_parent_span_id], }) # Propagate only if set: if g.subspan[b3_sampled]: result[b3_sampled] = g.subspan[b3_sampled] if g.subspan[b3_flags]: result[b3_flags] = g.subspan[b3_flags] _info("Client start. Starting sub-span") _log.debug("B3 values for sub-span: {b3_headers}".format(b3_headers=values())) _log.debug("All headers for downstream request: {b3_headers}".format(b3_headers=result)) return result
python
def _start_subspan(headers=None): """ Sets up a new span to contact a downstream service. This is used when making a downstream service call. It returns a dict containing the required sub-span headers. Each downstream call you make is handled as a new span, so call this every time you need to contact another service. This temporarily updates what's returned by values() to match the sub-span, so it can can also be used when calling e.g. a database that doesn't support B3. You'll still be able to record the client side of an interaction, even if the downstream server doesn't use the propagated trace information. You'll need to call end_subspan when you're done. You can do this using the `SubSpan` class: with SubSpan([headers]) as headers_b3: ... log.debug("Client start: calling downstream service") ... requests.get(<downstream service>, headers=headers_b3) ... log.debug("Client receive: downstream service responded") For the specification, see: https://github.com/openzipkin/b3-propagation :param headers: The headers dict. Headers will be added to this as needed. :return: A dict containing header values for a downstream request. This can be passed directly to e.g. requests.get(...). """ b3 = values() g.subspan = { # Propagate the trace ID b3_trace_id: b3[b3_trace_id], # Start a new span for the outgoing request b3_span_id: _generate_identifier(), # Set the current span as the parent span b3_parent_span_id: b3[b3_span_id], b3_sampled: b3[b3_sampled], b3_flags: b3[b3_flags], } # Set up headers # NB dict() ensures we don't alter the value passed in. Maybe that's too conservative? result = dict(headers or {}) result.update({ b3_trace_id: g.subspan[b3_trace_id], b3_span_id: g.subspan[b3_span_id], b3_parent_span_id: g.subspan[b3_parent_span_id], }) # Propagate only if set: if g.subspan[b3_sampled]: result[b3_sampled] = g.subspan[b3_sampled] if g.subspan[b3_flags]: result[b3_flags] = g.subspan[b3_flags] _info("Client start. Starting sub-span") _log.debug("B3 values for sub-span: {b3_headers}".format(b3_headers=values())) _log.debug("All headers for downstream request: {b3_headers}".format(b3_headers=result)) return result
[ "def", "_start_subspan", "(", "headers", "=", "None", ")", ":", "b3", "=", "values", "(", ")", "g", ".", "subspan", "=", "{", "# Propagate the trace ID", "b3_trace_id", ":", "b3", "[", "b3_trace_id", "]", ",", "# Start a new span for the outgoing request", "b3_span_id", ":", "_generate_identifier", "(", ")", ",", "# Set the current span as the parent span", "b3_parent_span_id", ":", "b3", "[", "b3_span_id", "]", ",", "b3_sampled", ":", "b3", "[", "b3_sampled", "]", ",", "b3_flags", ":", "b3", "[", "b3_flags", "]", ",", "}", "# Set up headers", "# NB dict() ensures we don't alter the value passed in. Maybe that's too conservative?", "result", "=", "dict", "(", "headers", "or", "{", "}", ")", "result", ".", "update", "(", "{", "b3_trace_id", ":", "g", ".", "subspan", "[", "b3_trace_id", "]", ",", "b3_span_id", ":", "g", ".", "subspan", "[", "b3_span_id", "]", ",", "b3_parent_span_id", ":", "g", ".", "subspan", "[", "b3_parent_span_id", "]", ",", "}", ")", "# Propagate only if set:", "if", "g", ".", "subspan", "[", "b3_sampled", "]", ":", "result", "[", "b3_sampled", "]", "=", "g", ".", "subspan", "[", "b3_sampled", "]", "if", "g", ".", "subspan", "[", "b3_flags", "]", ":", "result", "[", "b3_flags", "]", "=", "g", ".", "subspan", "[", "b3_flags", "]", "_info", "(", "\"Client start. Starting sub-span\"", ")", "_log", ".", "debug", "(", "\"B3 values for sub-span: {b3_headers}\"", ".", "format", "(", "b3_headers", "=", "values", "(", ")", ")", ")", "_log", ".", "debug", "(", "\"All headers for downstream request: {b3_headers}\"", ".", "format", "(", "b3_headers", "=", "result", ")", ")", "return", "result" ]
Sets up a new span to contact a downstream service. This is used when making a downstream service call. It returns a dict containing the required sub-span headers. Each downstream call you make is handled as a new span, so call this every time you need to contact another service. This temporarily updates what's returned by values() to match the sub-span, so it can can also be used when calling e.g. a database that doesn't support B3. You'll still be able to record the client side of an interaction, even if the downstream server doesn't use the propagated trace information. You'll need to call end_subspan when you're done. You can do this using the `SubSpan` class: with SubSpan([headers]) as headers_b3: ... log.debug("Client start: calling downstream service") ... requests.get(<downstream service>, headers=headers_b3) ... log.debug("Client receive: downstream service responded") For the specification, see: https://github.com/openzipkin/b3-propagation :param headers: The headers dict. Headers will be added to this as needed. :return: A dict containing header values for a downstream request. This can be passed directly to e.g. requests.get(...).
[ "Sets", "up", "a", "new", "span", "to", "contact", "a", "downstream", "service", ".", "This", "is", "used", "when", "making", "a", "downstream", "service", "call", ".", "It", "returns", "a", "dict", "containing", "the", "required", "sub", "-", "span", "headers", ".", "Each", "downstream", "call", "you", "make", "is", "handled", "as", "a", "new", "span", "so", "call", "this", "every", "time", "you", "need", "to", "contact", "another", "service", "." ]
train
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L153-L209
davidcarboni/Flask-B3
b3/__init__.py
_generate_identifier
def _generate_identifier(): """ Generates a new, random identifier in B3 format. :return: A 64-bit random identifier, rendered as a hex String. """ bit_length = 64 byte_length = int(bit_length / 8) identifier = os.urandom(byte_length) return hexlify(identifier).decode('ascii')
python
def _generate_identifier(): """ Generates a new, random identifier in B3 format. :return: A 64-bit random identifier, rendered as a hex String. """ bit_length = 64 byte_length = int(bit_length / 8) identifier = os.urandom(byte_length) return hexlify(identifier).decode('ascii')
[ "def", "_generate_identifier", "(", ")", ":", "bit_length", "=", "64", "byte_length", "=", "int", "(", "bit_length", "/", "8", ")", "identifier", "=", "os", ".", "urandom", "(", "byte_length", ")", "return", "hexlify", "(", "identifier", ")", ".", "decode", "(", "'ascii'", ")" ]
Generates a new, random identifier in B3 format. :return: A 64-bit random identifier, rendered as a hex String.
[ "Generates", "a", "new", "random", "identifier", "in", "B3", "format", ".", ":", "return", ":", "A", "64", "-", "bit", "random", "identifier", "rendered", "as", "a", "hex", "String", "." ]
train
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L227-L235
davidcarboni/Flask-B3
b3/__init__.py
_info
def _info(message): """Convenience function to log current span values. """ span = values() _log.debug(message + ": {span} in trace {trace}. (Parent span: {parent}).".format( span=span.get(b3_span_id), trace=span.get(b3_trace_id), parent=span.get(b3_parent_span_id), ))
python
def _info(message): """Convenience function to log current span values. """ span = values() _log.debug(message + ": {span} in trace {trace}. (Parent span: {parent}).".format( span=span.get(b3_span_id), trace=span.get(b3_trace_id), parent=span.get(b3_parent_span_id), ))
[ "def", "_info", "(", "message", ")", ":", "span", "=", "values", "(", ")", "_log", ".", "debug", "(", "message", "+", "\": {span} in trace {trace}. (Parent span: {parent}).\"", ".", "format", "(", "span", "=", "span", ".", "get", "(", "b3_span_id", ")", ",", "trace", "=", "span", ".", "get", "(", "b3_trace_id", ")", ",", "parent", "=", "span", ".", "get", "(", "b3_parent_span_id", ")", ",", ")", ")" ]
Convenience function to log current span values.
[ "Convenience", "function", "to", "log", "current", "span", "values", "." ]
train
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L238-L246
proycon/flat
flat/modes/editor/views.py
pub_view
def pub_view(request, docid, configuration): """The initial view, does not provide the document content yet""" if 'autodeclare' in settings.CONFIGURATIONS[configuration]: for annotationtype, set in settings.CONFIGURATIONS['configuration']['autodeclare']: try: r = flat.comm.query(request, "USE pub/" + docid + " DECLARE " + annotationtype + " OF " + set) except Exception as e: return fatalerror(request,e) return initdoc(request, 'pub',docid, 'editor', 'editor.html', configuration=configuration)
python
def pub_view(request, docid, configuration): """The initial view, does not provide the document content yet""" if 'autodeclare' in settings.CONFIGURATIONS[configuration]: for annotationtype, set in settings.CONFIGURATIONS['configuration']['autodeclare']: try: r = flat.comm.query(request, "USE pub/" + docid + " DECLARE " + annotationtype + " OF " + set) except Exception as e: return fatalerror(request,e) return initdoc(request, 'pub',docid, 'editor', 'editor.html', configuration=configuration)
[ "def", "pub_view", "(", "request", ",", "docid", ",", "configuration", ")", ":", "if", "'autodeclare'", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", ":", "for", "annotationtype", ",", "set", "in", "settings", ".", "CONFIGURATIONS", "[", "'configuration'", "]", "[", "'autodeclare'", "]", ":", "try", ":", "r", "=", "flat", ".", "comm", ".", "query", "(", "request", ",", "\"USE pub/\"", "+", "docid", "+", "\" DECLARE \"", "+", "annotationtype", "+", "\" OF \"", "+", "set", ")", "except", "Exception", "as", "e", ":", "return", "fatalerror", "(", "request", ",", "e", ")", "return", "initdoc", "(", "request", ",", "'pub'", ",", "docid", ",", "'editor'", ",", "'editor.html'", ",", "configuration", "=", "configuration", ")" ]
The initial view, does not provide the document content yet
[ "The", "initial", "view", "does", "not", "provide", "the", "document", "content", "yet" ]
train
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/modes/editor/views.py#L36-L45
proycon/flat
flat/comm.py
checkversion
def checkversion(version): """Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal""" try: for refversion, responseversion in zip([int(x) for x in REQUIREFOLIADOCSERVE.split('.')], [int(x) for x in version.split('.')]): if responseversion > refversion: return 1 #response is newer than library elif responseversion < refversion: return -1 #response is older than library return 0 #versions are equal except ValueError: raise ValueError("Unable to parse version, invalid syntax")
python
def checkversion(version): """Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal""" try: for refversion, responseversion in zip([int(x) for x in REQUIREFOLIADOCSERVE.split('.')], [int(x) for x in version.split('.')]): if responseversion > refversion: return 1 #response is newer than library elif responseversion < refversion: return -1 #response is older than library return 0 #versions are equal except ValueError: raise ValueError("Unable to parse version, invalid syntax")
[ "def", "checkversion", "(", "version", ")", ":", "try", ":", "for", "refversion", ",", "responseversion", "in", "zip", "(", "[", "int", "(", "x", ")", "for", "x", "in", "REQUIREFOLIADOCSERVE", ".", "split", "(", "'.'", ")", "]", ",", "[", "int", "(", "x", ")", "for", "x", "in", "version", ".", "split", "(", "'.'", ")", "]", ")", ":", "if", "responseversion", ">", "refversion", ":", "return", "1", "#response is newer than library", "elif", "responseversion", "<", "refversion", ":", "return", "-", "1", "#response is older than library", "return", "0", "#versions are equal", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Unable to parse version, invalid syntax\"", ")" ]
Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal
[ "Checks", "foliadocserve", "version", "returns", "1", "if", "the", "document", "is", "newer", "than", "the", "library", "-", "1", "if", "it", "is", "older", "0", "if", "it", "is", "equal" ]
train
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/comm.py#L12-L22
proycon/flat
flat/modes/metadata/views.py
view
def view(request, namespace, docid): """The initial view, does not provide the document content yet""" if flat.users.models.hasreadpermission(request.user.username, namespace, request): if 'autodeclare' in settings.CONFIGURATIONS[request.session['configuration']]: if flat.users.models.haswritepermission(request.user.username, namespace, request): for annotationtype, set in settings.CONFIGURATIONS[request.session['configuration']]['autodeclare']: try: r = flat.comm.query(request, "USE " + namespace + "/" + docid + " DECLARE " + annotationtype + " OF " + set) except Exception as e: return fatalerror(request,e) return initdoc(request, namespace,docid, 'metadata', 'metadata.html') else: return fatalerror(request,"Permission denied")
python
def view(request, namespace, docid): """The initial view, does not provide the document content yet""" if flat.users.models.hasreadpermission(request.user.username, namespace, request): if 'autodeclare' in settings.CONFIGURATIONS[request.session['configuration']]: if flat.users.models.haswritepermission(request.user.username, namespace, request): for annotationtype, set in settings.CONFIGURATIONS[request.session['configuration']]['autodeclare']: try: r = flat.comm.query(request, "USE " + namespace + "/" + docid + " DECLARE " + annotationtype + " OF " + set) except Exception as e: return fatalerror(request,e) return initdoc(request, namespace,docid, 'metadata', 'metadata.html') else: return fatalerror(request,"Permission denied")
[ "def", "view", "(", "request", ",", "namespace", ",", "docid", ")", ":", "if", "flat", ".", "users", ".", "models", ".", "hasreadpermission", "(", "request", ".", "user", ".", "username", ",", "namespace", ",", "request", ")", ":", "if", "'autodeclare'", "in", "settings", ".", "CONFIGURATIONS", "[", "request", ".", "session", "[", "'configuration'", "]", "]", ":", "if", "flat", ".", "users", ".", "models", ".", "haswritepermission", "(", "request", ".", "user", ".", "username", ",", "namespace", ",", "request", ")", ":", "for", "annotationtype", ",", "set", "in", "settings", ".", "CONFIGURATIONS", "[", "request", ".", "session", "[", "'configuration'", "]", "]", "[", "'autodeclare'", "]", ":", "try", ":", "r", "=", "flat", ".", "comm", ".", "query", "(", "request", ",", "\"USE \"", "+", "namespace", "+", "\"/\"", "+", "docid", "+", "\" DECLARE \"", "+", "annotationtype", "+", "\" OF \"", "+", "set", ")", "except", "Exception", "as", "e", ":", "return", "fatalerror", "(", "request", ",", "e", ")", "return", "initdoc", "(", "request", ",", "namespace", ",", "docid", ",", "'metadata'", ",", "'metadata.html'", ")", "else", ":", "return", "fatalerror", "(", "request", ",", "\"Permission denied\"", ")" ]
The initial view, does not provide the document content yet
[ "The", "initial", "view", "does", "not", "provide", "the", "document", "content", "yet" ]
train
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/modes/metadata/views.py#L17-L30
proycon/flat
flat/views.py
initdoc
def initdoc(request, namespace, docid, mode, template, context=None, configuration=None): """Initialise a document (not invoked directly)""" perspective = request.GET.get('perspective','document') if context is None: context = {} if 'configuration' in request.session: configuration = request.session['configuration'] elif configuration is None: return fatalerror(request, "No configuration specified") if configuration not in settings.CONFIGURATIONS: return fatalerror(request, "Specified configuration does not exist") flatargs = { 'setdefinitions': True, 'declarations': True, #implies provenance as well 'metadata': True, 'toc': True, 'slices': request.GET.get('slices',settings.CONFIGURATIONS[configuration].get('slices','p:25,s:100')), #overriden either by configuration or by user 'customslicesize': 0, #disabled for initial probe 'textclasses': True, } error = False try: doc = flat.comm.query(request, "USE " + namespace + "/" + docid + " PROBE", **flatargs) #retrieves only the meta information, not document content context.update(getcontext(request,namespace,docid, doc, mode, configuration)) except Exception as e: context.update(docserveerror(e)) error = True if not error: dorequiredeclaration = 'requiredeclaration' in settings.CONFIGURATIONS[configuration] and settings.CONFIGURATIONS[configuration]['requiredeclaration'] if dorequiredeclaration: if not 'declarations' in doc: return fatalerror(request, "Refusing to load document, missing expected declarations, none declared") declarations = doc['declarations'] for annotationtype, annotationset in settings.CONFIGURATIONS[configuration]['requiredeclaration']: found = False for d in declarations: if annotationtype == d['annotationtype'] and (not annotationset or annotationset == d['set']): found = True break if not found: if annotationset: return fatalerror(request, "Refusing to load document, missing expected declaration for annotation type " + annotationtype + "/" + annotationset) else: return fatalerror(request, "Refusing to load document, missing expected declaration for annotation type " + annotationtype) dometadataindex = 'metadataindex' in settings.CONFIGURATIONS[configuration] and settings.CONFIGURATIONS[configuration]['metadataindex'] if dometadataindex: metadata = json.loads(context['metadata']) for metakey in settings.CONFIGURATIONS[configuration]['metadataindex']: if metakey in metadata: MetadataIndex.objects.update_or_create(namespace=namespace,docid=docid, key=metakey,defaults={'value':metadata[metakey]}) response = render(request, template, context) if 'fatalerror' in context: response.status_code = 500 return response
python
def initdoc(request, namespace, docid, mode, template, context=None, configuration=None): """Initialise a document (not invoked directly)""" perspective = request.GET.get('perspective','document') if context is None: context = {} if 'configuration' in request.session: configuration = request.session['configuration'] elif configuration is None: return fatalerror(request, "No configuration specified") if configuration not in settings.CONFIGURATIONS: return fatalerror(request, "Specified configuration does not exist") flatargs = { 'setdefinitions': True, 'declarations': True, #implies provenance as well 'metadata': True, 'toc': True, 'slices': request.GET.get('slices',settings.CONFIGURATIONS[configuration].get('slices','p:25,s:100')), #overriden either by configuration or by user 'customslicesize': 0, #disabled for initial probe 'textclasses': True, } error = False try: doc = flat.comm.query(request, "USE " + namespace + "/" + docid + " PROBE", **flatargs) #retrieves only the meta information, not document content context.update(getcontext(request,namespace,docid, doc, mode, configuration)) except Exception as e: context.update(docserveerror(e)) error = True if not error: dorequiredeclaration = 'requiredeclaration' in settings.CONFIGURATIONS[configuration] and settings.CONFIGURATIONS[configuration]['requiredeclaration'] if dorequiredeclaration: if not 'declarations' in doc: return fatalerror(request, "Refusing to load document, missing expected declarations, none declared") declarations = doc['declarations'] for annotationtype, annotationset in settings.CONFIGURATIONS[configuration]['requiredeclaration']: found = False for d in declarations: if annotationtype == d['annotationtype'] and (not annotationset or annotationset == d['set']): found = True break if not found: if annotationset: return fatalerror(request, "Refusing to load document, missing expected declaration for annotation type " + annotationtype + "/" + annotationset) else: return fatalerror(request, "Refusing to load document, missing expected declaration for annotation type " + annotationtype) dometadataindex = 'metadataindex' in settings.CONFIGURATIONS[configuration] and settings.CONFIGURATIONS[configuration]['metadataindex'] if dometadataindex: metadata = json.loads(context['metadata']) for metakey in settings.CONFIGURATIONS[configuration]['metadataindex']: if metakey in metadata: MetadataIndex.objects.update_or_create(namespace=namespace,docid=docid, key=metakey,defaults={'value':metadata[metakey]}) response = render(request, template, context) if 'fatalerror' in context: response.status_code = 500 return response
[ "def", "initdoc", "(", "request", ",", "namespace", ",", "docid", ",", "mode", ",", "template", ",", "context", "=", "None", ",", "configuration", "=", "None", ")", ":", "perspective", "=", "request", ".", "GET", ".", "get", "(", "'perspective'", ",", "'document'", ")", "if", "context", "is", "None", ":", "context", "=", "{", "}", "if", "'configuration'", "in", "request", ".", "session", ":", "configuration", "=", "request", ".", "session", "[", "'configuration'", "]", "elif", "configuration", "is", "None", ":", "return", "fatalerror", "(", "request", ",", "\"No configuration specified\"", ")", "if", "configuration", "not", "in", "settings", ".", "CONFIGURATIONS", ":", "return", "fatalerror", "(", "request", ",", "\"Specified configuration does not exist\"", ")", "flatargs", "=", "{", "'setdefinitions'", ":", "True", ",", "'declarations'", ":", "True", ",", "#implies provenance as well", "'metadata'", ":", "True", ",", "'toc'", ":", "True", ",", "'slices'", ":", "request", ".", "GET", ".", "get", "(", "'slices'", ",", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", ".", "get", "(", "'slices'", ",", "'p:25,s:100'", ")", ")", ",", "#overriden either by configuration or by user", "'customslicesize'", ":", "0", ",", "#disabled for initial probe", "'textclasses'", ":", "True", ",", "}", "error", "=", "False", "try", ":", "doc", "=", "flat", ".", "comm", ".", "query", "(", "request", ",", "\"USE \"", "+", "namespace", "+", "\"/\"", "+", "docid", "+", "\" PROBE\"", ",", "*", "*", "flatargs", ")", "#retrieves only the meta information, not document content", "context", ".", "update", "(", "getcontext", "(", "request", ",", "namespace", ",", "docid", ",", "doc", ",", "mode", ",", "configuration", ")", ")", "except", "Exception", "as", "e", ":", "context", ".", "update", "(", "docserveerror", "(", "e", ")", ")", "error", "=", "True", "if", "not", "error", ":", "dorequiredeclaration", "=", "'requiredeclaration'", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "and", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'requiredeclaration'", "]", "if", "dorequiredeclaration", ":", "if", "not", "'declarations'", "in", "doc", ":", "return", "fatalerror", "(", "request", ",", "\"Refusing to load document, missing expected declarations, none declared\"", ")", "declarations", "=", "doc", "[", "'declarations'", "]", "for", "annotationtype", ",", "annotationset", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'requiredeclaration'", "]", ":", "found", "=", "False", "for", "d", "in", "declarations", ":", "if", "annotationtype", "==", "d", "[", "'annotationtype'", "]", "and", "(", "not", "annotationset", "or", "annotationset", "==", "d", "[", "'set'", "]", ")", ":", "found", "=", "True", "break", "if", "not", "found", ":", "if", "annotationset", ":", "return", "fatalerror", "(", "request", ",", "\"Refusing to load document, missing expected declaration for annotation type \"", "+", "annotationtype", "+", "\"/\"", "+", "annotationset", ")", "else", ":", "return", "fatalerror", "(", "request", ",", "\"Refusing to load document, missing expected declaration for annotation type \"", "+", "annotationtype", ")", "dometadataindex", "=", "'metadataindex'", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "and", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'metadataindex'", "]", "if", "dometadataindex", ":", "metadata", "=", "json", ".", "loads", "(", "context", "[", "'metadata'", "]", ")", "for", "metakey", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'metadataindex'", "]", ":", "if", "metakey", "in", "metadata", ":", "MetadataIndex", ".", "objects", ".", "update_or_create", "(", "namespace", "=", "namespace", ",", "docid", "=", "docid", ",", "key", "=", "metakey", ",", "defaults", "=", "{", "'value'", ":", "metadata", "[", "metakey", "]", "}", ")", "response", "=", "render", "(", "request", ",", "template", ",", "context", ")", "if", "'fatalerror'", "in", "context", ":", "response", ".", "status_code", "=", "500", "return", "response" ]
Initialise a document (not invoked directly)
[ "Initialise", "a", "document", "(", "not", "invoked", "directly", ")" ]
train
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/views.py#L123-L177
proycon/flat
flat/views.py
query_helper
def query_helper(request,namespace, docid, configuration=None): """Does the actual query, called by query() or pub_query(), not directly""" flatargs = { 'customslicesize': request.POST.get('customslicesize',settings.CONFIGURATIONS[configuration].get('customslicesize','50')), #for pagination of search results } #stupid compatibility stuff if sys.version < '3': if hasattr(request, 'body'): data = json.loads(unicode(request.body,'utf-8')) #pylint: disable=undefined-variable else: #older django data = json.loads(unicode(request.raw_post_data,'utf-8')) #pylint: disable=undefined-variable else: if hasattr(request, 'body'): data = json.loads(str(request.body,'utf-8')) else: #older django data = json.loads(str(request.raw_post_data,'utf-8')) if not data['queries']: return HttpResponseForbidden("No queries to run") for query in data['queries']: #get document selector and check it doesn't violate the namespace docselector, query = getdocumentselector(query) if not docselector: return HttpResponseForbidden("Query does not start with a valid document selector (USE keyword)!") elif docselector[0] != namespace: return HttpResponseForbidden("Query would affect a different namespace than your current one, forbidden!") if query != "GET" and query[:4] != "CQL " and query[:4] != "META": #parse query on this end to catch syntax errors prior to sending, should be fast enough anyway #first resolve variables to dummies (real ones will be handled server-side) as it won't be valid FQL otherwise query = query.replace("$FOLIADOCSERVE_PROCESSOR", "PROCESSOR name \"foliadocserve\"") query = query.replace("$FLAT_PROCESSOR", "PROCESSOR name \"FLAT\" version \"" + VERSION + "\" host \"" + request.get_host() + "\" src \"" + request.build_absolute_uri("/") + "\"") #also another instance in comm.py try: query = fql.Query(query) except fql.SyntaxError as e: return HttpResponseForbidden("FQL Syntax Error: " + str(e)) needwritepermission = query.declarations or query.action and query.action.action != "SELECT" else: needwritepermission = False if configuration != "pub": if needwritepermission and not flat.users.models.haswritepermission(request.user.username, namespace, request): return HttpResponseForbidden("Permission denied, no write access") query = "\n".join(data['queries']) #throw all queries on a big pile to transmit try: d = flat.comm.query(request, query,**flatargs) except Exception as e: if sys.version < '3': errmsg = docserveerror(e)['fatalerror_text'] return HttpResponseForbidden("FoLiA Document Server error: ".encode('utf-8') + errmsg.encode('utf-8')) else: return HttpResponseForbidden("FoLiA Document Server error: " + docserveerror(e)['fatalerror_text']) return HttpResponse(json.dumps(d).encode('utf-8'), content_type='application/json')
python
def query_helper(request,namespace, docid, configuration=None): """Does the actual query, called by query() or pub_query(), not directly""" flatargs = { 'customslicesize': request.POST.get('customslicesize',settings.CONFIGURATIONS[configuration].get('customslicesize','50')), #for pagination of search results } #stupid compatibility stuff if sys.version < '3': if hasattr(request, 'body'): data = json.loads(unicode(request.body,'utf-8')) #pylint: disable=undefined-variable else: #older django data = json.loads(unicode(request.raw_post_data,'utf-8')) #pylint: disable=undefined-variable else: if hasattr(request, 'body'): data = json.loads(str(request.body,'utf-8')) else: #older django data = json.loads(str(request.raw_post_data,'utf-8')) if not data['queries']: return HttpResponseForbidden("No queries to run") for query in data['queries']: #get document selector and check it doesn't violate the namespace docselector, query = getdocumentselector(query) if not docselector: return HttpResponseForbidden("Query does not start with a valid document selector (USE keyword)!") elif docselector[0] != namespace: return HttpResponseForbidden("Query would affect a different namespace than your current one, forbidden!") if query != "GET" and query[:4] != "CQL " and query[:4] != "META": #parse query on this end to catch syntax errors prior to sending, should be fast enough anyway #first resolve variables to dummies (real ones will be handled server-side) as it won't be valid FQL otherwise query = query.replace("$FOLIADOCSERVE_PROCESSOR", "PROCESSOR name \"foliadocserve\"") query = query.replace("$FLAT_PROCESSOR", "PROCESSOR name \"FLAT\" version \"" + VERSION + "\" host \"" + request.get_host() + "\" src \"" + request.build_absolute_uri("/") + "\"") #also another instance in comm.py try: query = fql.Query(query) except fql.SyntaxError as e: return HttpResponseForbidden("FQL Syntax Error: " + str(e)) needwritepermission = query.declarations or query.action and query.action.action != "SELECT" else: needwritepermission = False if configuration != "pub": if needwritepermission and not flat.users.models.haswritepermission(request.user.username, namespace, request): return HttpResponseForbidden("Permission denied, no write access") query = "\n".join(data['queries']) #throw all queries on a big pile to transmit try: d = flat.comm.query(request, query,**flatargs) except Exception as e: if sys.version < '3': errmsg = docserveerror(e)['fatalerror_text'] return HttpResponseForbidden("FoLiA Document Server error: ".encode('utf-8') + errmsg.encode('utf-8')) else: return HttpResponseForbidden("FoLiA Document Server error: " + docserveerror(e)['fatalerror_text']) return HttpResponse(json.dumps(d).encode('utf-8'), content_type='application/json')
[ "def", "query_helper", "(", "request", ",", "namespace", ",", "docid", ",", "configuration", "=", "None", ")", ":", "flatargs", "=", "{", "'customslicesize'", ":", "request", ".", "POST", ".", "get", "(", "'customslicesize'", ",", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", ".", "get", "(", "'customslicesize'", ",", "'50'", ")", ")", ",", "#for pagination of search results", "}", "#stupid compatibility stuff", "if", "sys", ".", "version", "<", "'3'", ":", "if", "hasattr", "(", "request", ",", "'body'", ")", ":", "data", "=", "json", ".", "loads", "(", "unicode", "(", "request", ".", "body", ",", "'utf-8'", ")", ")", "#pylint: disable=undefined-variable", "else", ":", "#older django", "data", "=", "json", ".", "loads", "(", "unicode", "(", "request", ".", "raw_post_data", ",", "'utf-8'", ")", ")", "#pylint: disable=undefined-variable", "else", ":", "if", "hasattr", "(", "request", ",", "'body'", ")", ":", "data", "=", "json", ".", "loads", "(", "str", "(", "request", ".", "body", ",", "'utf-8'", ")", ")", "else", ":", "#older django", "data", "=", "json", ".", "loads", "(", "str", "(", "request", ".", "raw_post_data", ",", "'utf-8'", ")", ")", "if", "not", "data", "[", "'queries'", "]", ":", "return", "HttpResponseForbidden", "(", "\"No queries to run\"", ")", "for", "query", "in", "data", "[", "'queries'", "]", ":", "#get document selector and check it doesn't violate the namespace", "docselector", ",", "query", "=", "getdocumentselector", "(", "query", ")", "if", "not", "docselector", ":", "return", "HttpResponseForbidden", "(", "\"Query does not start with a valid document selector (USE keyword)!\"", ")", "elif", "docselector", "[", "0", "]", "!=", "namespace", ":", "return", "HttpResponseForbidden", "(", "\"Query would affect a different namespace than your current one, forbidden!\"", ")", "if", "query", "!=", "\"GET\"", "and", "query", "[", ":", "4", "]", "!=", "\"CQL \"", "and", "query", "[", ":", "4", "]", "!=", "\"META\"", ":", "#parse query on this end to catch syntax errors prior to sending, should be fast enough anyway", "#first resolve variables to dummies (real ones will be handled server-side) as it won't be valid FQL otherwise", "query", "=", "query", ".", "replace", "(", "\"$FOLIADOCSERVE_PROCESSOR\"", ",", "\"PROCESSOR name \\\"foliadocserve\\\"\"", ")", "query", "=", "query", ".", "replace", "(", "\"$FLAT_PROCESSOR\"", ",", "\"PROCESSOR name \\\"FLAT\\\" version \\\"\"", "+", "VERSION", "+", "\"\\\" host \\\"\"", "+", "request", ".", "get_host", "(", ")", "+", "\"\\\" src \\\"\"", "+", "request", ".", "build_absolute_uri", "(", "\"/\"", ")", "+", "\"\\\"\"", ")", "#also another instance in comm.py", "try", ":", "query", "=", "fql", ".", "Query", "(", "query", ")", "except", "fql", ".", "SyntaxError", "as", "e", ":", "return", "HttpResponseForbidden", "(", "\"FQL Syntax Error: \"", "+", "str", "(", "e", ")", ")", "needwritepermission", "=", "query", ".", "declarations", "or", "query", ".", "action", "and", "query", ".", "action", ".", "action", "!=", "\"SELECT\"", "else", ":", "needwritepermission", "=", "False", "if", "configuration", "!=", "\"pub\"", ":", "if", "needwritepermission", "and", "not", "flat", ".", "users", ".", "models", ".", "haswritepermission", "(", "request", ".", "user", ".", "username", ",", "namespace", ",", "request", ")", ":", "return", "HttpResponseForbidden", "(", "\"Permission denied, no write access\"", ")", "query", "=", "\"\\n\"", ".", "join", "(", "data", "[", "'queries'", "]", ")", "#throw all queries on a big pile to transmit", "try", ":", "d", "=", "flat", ".", "comm", ".", "query", "(", "request", ",", "query", ",", "*", "*", "flatargs", ")", "except", "Exception", "as", "e", ":", "if", "sys", ".", "version", "<", "'3'", ":", "errmsg", "=", "docserveerror", "(", "e", ")", "[", "'fatalerror_text'", "]", "return", "HttpResponseForbidden", "(", "\"FoLiA Document Server error: \"", ".", "encode", "(", "'utf-8'", ")", "+", "errmsg", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "return", "HttpResponseForbidden", "(", "\"FoLiA Document Server error: \"", "+", "docserveerror", "(", "e", ")", "[", "'fatalerror_text'", "]", ")", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "d", ")", ".", "encode", "(", "'utf-8'", ")", ",", "content_type", "=", "'application/json'", ")" ]
Does the actual query, called by query() or pub_query(), not directly
[ "Does", "the", "actual", "query", "called", "by", "query", "()", "or", "pub_query", "()", "not", "directly" ]
train
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/views.py#L179-L233
proycon/flat
flat/modes/viewer/views.py
pub_poll
def pub_poll(request, docid): """The initial viewer, does not provide the document content yet""" try: r = flat.comm.get(request, '/poll/pub/' + docid + '/', False) except URLError: return HttpResponseForbidden("Unable to connect to the document server [viewer/poll]") return HttpResponse(r, content_type='application/json')
python
def pub_poll(request, docid): """The initial viewer, does not provide the document content yet""" try: r = flat.comm.get(request, '/poll/pub/' + docid + '/', False) except URLError: return HttpResponseForbidden("Unable to connect to the document server [viewer/poll]") return HttpResponse(r, content_type='application/json')
[ "def", "pub_poll", "(", "request", ",", "docid", ")", ":", "try", ":", "r", "=", "flat", ".", "comm", ".", "get", "(", "request", ",", "'/poll/pub/'", "+", "docid", "+", "'/'", ",", "False", ")", "except", "URLError", ":", "return", "HttpResponseForbidden", "(", "\"Unable to connect to the document server [viewer/poll]\"", ")", "return", "HttpResponse", "(", "r", ",", "content_type", "=", "'application/json'", ")" ]
The initial viewer, does not provide the document content yet
[ "The", "initial", "viewer", "does", "not", "provide", "the", "document", "content", "yet" ]
train
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/modes/viewer/views.py#L37-L43
bluedynamics/cone.ugm
src/cone/ugm/browser/autoincrement.py
AutoIncrementForm.prepare
def prepare(_next, self): """Hook after prepare and set 'id' disabled. """ _next(self) if not self.autoincrement_support: return id_field = self.form['id'] del id_field.attrs['required'] id_field.attrs['disabled'] = 'disabled' id_field.getter = _('auto_incremented', default='auto incremented')
python
def prepare(_next, self): """Hook after prepare and set 'id' disabled. """ _next(self) if not self.autoincrement_support: return id_field = self.form['id'] del id_field.attrs['required'] id_field.attrs['disabled'] = 'disabled' id_field.getter = _('auto_incremented', default='auto incremented')
[ "def", "prepare", "(", "_next", ",", "self", ")", ":", "_next", "(", "self", ")", "if", "not", "self", ".", "autoincrement_support", ":", "return", "id_field", "=", "self", ".", "form", "[", "'id'", "]", "del", "id_field", ".", "attrs", "[", "'required'", "]", "id_field", ".", "attrs", "[", "'disabled'", "]", "=", "'disabled'", "id_field", ".", "getter", "=", "_", "(", "'auto_incremented'", ",", "default", "=", "'auto incremented'", ")" ]
Hook after prepare and set 'id' disabled.
[ "Hook", "after", "prepare", "and", "set", "id", "disabled", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/autoincrement.py#L52-L61
bluedynamics/cone.ugm
src/cone/ugm/browser/actions.py
delete_user_action
def delete_user_action(model, request): """Delete user from database. """ try: users = model.parent.backend uid = model.model.name del users[uid] users() model.parent.invalidate() localizer = get_localizer(request) message = localizer.translate(_( 'delete_user_from_database', default="Deleted user '${uid}' from database.", mapping={'uid': uid} )) return { 'success': True, 'message': message } except Exception as e: return { 'success': False, 'message': str(e) }
python
def delete_user_action(model, request): """Delete user from database. """ try: users = model.parent.backend uid = model.model.name del users[uid] users() model.parent.invalidate() localizer = get_localizer(request) message = localizer.translate(_( 'delete_user_from_database', default="Deleted user '${uid}' from database.", mapping={'uid': uid} )) return { 'success': True, 'message': message } except Exception as e: return { 'success': False, 'message': str(e) }
[ "def", "delete_user_action", "(", "model", ",", "request", ")", ":", "try", ":", "users", "=", "model", ".", "parent", ".", "backend", "uid", "=", "model", ".", "model", ".", "name", "del", "users", "[", "uid", "]", "users", "(", ")", "model", ".", "parent", ".", "invalidate", "(", ")", "localizer", "=", "get_localizer", "(", "request", ")", "message", "=", "localizer", ".", "translate", "(", "_", "(", "'delete_user_from_database'", ",", "default", "=", "\"Deleted user '${uid}' from database.\"", ",", "mapping", "=", "{", "'uid'", ":", "uid", "}", ")", ")", "return", "{", "'success'", ":", "True", ",", "'message'", ":", "message", "}", "except", "Exception", "as", "e", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "str", "(", "e", ")", "}" ]
Delete user from database.
[ "Delete", "user", "from", "database", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/actions.py#L67-L90
bluedynamics/cone.ugm
src/cone/ugm/browser/actions.py
user_add_to_group_action
def user_add_to_group_action(model, request): """Add user to group. """ group_id = request.params.get('id') if not group_id: group_ids = request.params.getall('id[]') else: group_ids = [group_id] try: user = model.model validate_add_users_to_groups(model, [user.id], group_ids) groups = user.root.groups for group_id in group_ids: groups[group_id].add(user.name) groups() model.parent.invalidate(user.name) localizer = get_localizer(request) message = localizer.translate(_( 'added_user_to_group', default="Added user '${uid}' to group '${gid}'.", mapping={ 'uid': user.id, 'gid': ', '.join(group_ids) } )) return { 'success': True, 'message': message } except ManageMembershipError as e: if e.reason is not LM_TARGET_GID_NOT_ALLOWED: raise Exception(u"Unknown ManageMembershipError reason.") localizer = get_localizer(request) message = localizer.translate(_( 'lm_add_target_gid_not_allowed', default=( "Failed adding user '${uid}' to group '${gid}'. " "Manage membership denied for target group." ), mapping={ 'uid': user.id, 'gid': e.data } )) return { 'success': False, 'message': message } except Exception as e: return { 'success': False, 'message': str(e) }
python
def user_add_to_group_action(model, request): """Add user to group. """ group_id = request.params.get('id') if not group_id: group_ids = request.params.getall('id[]') else: group_ids = [group_id] try: user = model.model validate_add_users_to_groups(model, [user.id], group_ids) groups = user.root.groups for group_id in group_ids: groups[group_id].add(user.name) groups() model.parent.invalidate(user.name) localizer = get_localizer(request) message = localizer.translate(_( 'added_user_to_group', default="Added user '${uid}' to group '${gid}'.", mapping={ 'uid': user.id, 'gid': ', '.join(group_ids) } )) return { 'success': True, 'message': message } except ManageMembershipError as e: if e.reason is not LM_TARGET_GID_NOT_ALLOWED: raise Exception(u"Unknown ManageMembershipError reason.") localizer = get_localizer(request) message = localizer.translate(_( 'lm_add_target_gid_not_allowed', default=( "Failed adding user '${uid}' to group '${gid}'. " "Manage membership denied for target group." ), mapping={ 'uid': user.id, 'gid': e.data } )) return { 'success': False, 'message': message } except Exception as e: return { 'success': False, 'message': str(e) }
[ "def", "user_add_to_group_action", "(", "model", ",", "request", ")", ":", "group_id", "=", "request", ".", "params", ".", "get", "(", "'id'", ")", "if", "not", "group_id", ":", "group_ids", "=", "request", ".", "params", ".", "getall", "(", "'id[]'", ")", "else", ":", "group_ids", "=", "[", "group_id", "]", "try", ":", "user", "=", "model", ".", "model", "validate_add_users_to_groups", "(", "model", ",", "[", "user", ".", "id", "]", ",", "group_ids", ")", "groups", "=", "user", ".", "root", ".", "groups", "for", "group_id", "in", "group_ids", ":", "groups", "[", "group_id", "]", ".", "add", "(", "user", ".", "name", ")", "groups", "(", ")", "model", ".", "parent", ".", "invalidate", "(", "user", ".", "name", ")", "localizer", "=", "get_localizer", "(", "request", ")", "message", "=", "localizer", ".", "translate", "(", "_", "(", "'added_user_to_group'", ",", "default", "=", "\"Added user '${uid}' to group '${gid}'.\"", ",", "mapping", "=", "{", "'uid'", ":", "user", ".", "id", ",", "'gid'", ":", "', '", ".", "join", "(", "group_ids", ")", "}", ")", ")", "return", "{", "'success'", ":", "True", ",", "'message'", ":", "message", "}", "except", "ManageMembershipError", "as", "e", ":", "if", "e", ".", "reason", "is", "not", "LM_TARGET_GID_NOT_ALLOWED", ":", "raise", "Exception", "(", "u\"Unknown ManageMembershipError reason.\"", ")", "localizer", "=", "get_localizer", "(", "request", ")", "message", "=", "localizer", ".", "translate", "(", "_", "(", "'lm_add_target_gid_not_allowed'", ",", "default", "=", "(", "\"Failed adding user '${uid}' to group '${gid}'. \"", "\"Manage membership denied for target group.\"", ")", ",", "mapping", "=", "{", "'uid'", ":", "user", ".", "id", ",", "'gid'", ":", "e", ".", "data", "}", ")", ")", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "message", "}", "except", "Exception", "as", "e", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "str", "(", "e", ")", "}" ]
Add user to group.
[ "Add", "user", "to", "group", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/actions.py#L99-L151
bluedynamics/cone.ugm
src/cone/ugm/browser/actions.py
delete_group_action
def delete_group_action(model, request): """Delete group from database. """ try: groups = model.parent.backend uid = model.model.name del groups[uid] groups() model.parent.invalidate() except Exception as e: return { 'success': False, 'message': str(e) } localizer = get_localizer(request) message = localizer.translate(_( 'deleted_group', default='Deleted group from database' )) return { 'success': True, 'message': message }
python
def delete_group_action(model, request): """Delete group from database. """ try: groups = model.parent.backend uid = model.model.name del groups[uid] groups() model.parent.invalidate() except Exception as e: return { 'success': False, 'message': str(e) } localizer = get_localizer(request) message = localizer.translate(_( 'deleted_group', default='Deleted group from database' )) return { 'success': True, 'message': message }
[ "def", "delete_group_action", "(", "model", ",", "request", ")", ":", "try", ":", "groups", "=", "model", ".", "parent", ".", "backend", "uid", "=", "model", ".", "model", ".", "name", "del", "groups", "[", "uid", "]", "groups", "(", ")", "model", ".", "parent", ".", "invalidate", "(", ")", "except", "Exception", "as", "e", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "str", "(", "e", ")", "}", "localizer", "=", "get_localizer", "(", "request", ")", "message", "=", "localizer", ".", "translate", "(", "_", "(", "'deleted_group'", ",", "default", "=", "'Deleted group from database'", ")", ")", "return", "{", "'success'", ":", "True", ",", "'message'", ":", "message", "}" ]
Delete group from database.
[ "Delete", "group", "from", "database", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/actions.py#L237-L259
bluedynamics/cone.ugm
src/cone/ugm/browser/actions.py
group_add_user_action
def group_add_user_action(model, request): """Add user to group. """ user_id = request.params.get('id') if not user_id: user_ids = request.params.getall('id[]') else: user_ids = [user_id] try: group = model.model validate_add_users_to_groups(model, user_ids, [group.id]) for user_id in user_ids: group.add(user_id) group() model.parent.invalidate(group.name) localizer = get_localizer(request) message = localizer.translate(_( 'added_user_to_group', default="Added user '${uid}' to group '${gid}'.", mapping={ 'uid': ', '.join(user_ids), 'gid': group.id } )) return { 'success': True, 'message': message } except ManageMembershipError as e: if e.reason is not LM_TARGET_UID_NOT_ALLOWED: raise Exception(u"Unknown ManageMembershipError reason.") localizer = get_localizer(request) message = localizer.translate(_( 'lm_add_target_uid_not_allowed', default=( "Failed adding user '${uid}' to group '${gid}'. " "Manage membership denied for user." ), mapping={ 'uid': e.data, 'gid': group.id } )) return { 'success': False, 'message': message } except Exception as e: return { 'success': False, 'message': str(e) }
python
def group_add_user_action(model, request): """Add user to group. """ user_id = request.params.get('id') if not user_id: user_ids = request.params.getall('id[]') else: user_ids = [user_id] try: group = model.model validate_add_users_to_groups(model, user_ids, [group.id]) for user_id in user_ids: group.add(user_id) group() model.parent.invalidate(group.name) localizer = get_localizer(request) message = localizer.translate(_( 'added_user_to_group', default="Added user '${uid}' to group '${gid}'.", mapping={ 'uid': ', '.join(user_ids), 'gid': group.id } )) return { 'success': True, 'message': message } except ManageMembershipError as e: if e.reason is not LM_TARGET_UID_NOT_ALLOWED: raise Exception(u"Unknown ManageMembershipError reason.") localizer = get_localizer(request) message = localizer.translate(_( 'lm_add_target_uid_not_allowed', default=( "Failed adding user '${uid}' to group '${gid}'. " "Manage membership denied for user." ), mapping={ 'uid': e.data, 'gid': group.id } )) return { 'success': False, 'message': message } except Exception as e: return { 'success': False, 'message': str(e) }
[ "def", "group_add_user_action", "(", "model", ",", "request", ")", ":", "user_id", "=", "request", ".", "params", ".", "get", "(", "'id'", ")", "if", "not", "user_id", ":", "user_ids", "=", "request", ".", "params", ".", "getall", "(", "'id[]'", ")", "else", ":", "user_ids", "=", "[", "user_id", "]", "try", ":", "group", "=", "model", ".", "model", "validate_add_users_to_groups", "(", "model", ",", "user_ids", ",", "[", "group", ".", "id", "]", ")", "for", "user_id", "in", "user_ids", ":", "group", ".", "add", "(", "user_id", ")", "group", "(", ")", "model", ".", "parent", ".", "invalidate", "(", "group", ".", "name", ")", "localizer", "=", "get_localizer", "(", "request", ")", "message", "=", "localizer", ".", "translate", "(", "_", "(", "'added_user_to_group'", ",", "default", "=", "\"Added user '${uid}' to group '${gid}'.\"", ",", "mapping", "=", "{", "'uid'", ":", "', '", ".", "join", "(", "user_ids", ")", ",", "'gid'", ":", "group", ".", "id", "}", ")", ")", "return", "{", "'success'", ":", "True", ",", "'message'", ":", "message", "}", "except", "ManageMembershipError", "as", "e", ":", "if", "e", ".", "reason", "is", "not", "LM_TARGET_UID_NOT_ALLOWED", ":", "raise", "Exception", "(", "u\"Unknown ManageMembershipError reason.\"", ")", "localizer", "=", "get_localizer", "(", "request", ")", "message", "=", "localizer", ".", "translate", "(", "_", "(", "'lm_add_target_uid_not_allowed'", ",", "default", "=", "(", "\"Failed adding user '${uid}' to group '${gid}'. \"", "\"Manage membership denied for user.\"", ")", ",", "mapping", "=", "{", "'uid'", ":", "e", ".", "data", ",", "'gid'", ":", "group", ".", "id", "}", ")", ")", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "message", "}", "except", "Exception", "as", "e", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "str", "(", "e", ")", "}" ]
Add user to group.
[ "Add", "user", "to", "group", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/actions.py#L268-L319
bluedynamics/cone.ugm
src/cone/ugm/browser/roles.py
PrincipalRolesForm.prepare
def prepare(_next, self): """Hook after prepare and set 'principal_roles' as selection to ``self.form``. """ _next(self) if not self.roles_support: return if not self.request.has_permission('manage', self.model.parent): # XXX: yafowil selection display renderer return value = [] if self.action_resource == 'edit': value = self.model.model.roles roles_widget = factory( 'field:label:select', name='principal_roles', value=value, props={ 'label': _('roles', default='Roles'), 'multivalued': True, 'vocabulary': self.roles_vocab, 'format': 'single', 'listing_tag': 'ul', 'listing_label_position': 'after', }) save_widget = self.form['save'] self.form.insertbefore(roles_widget, save_widget)
python
def prepare(_next, self): """Hook after prepare and set 'principal_roles' as selection to ``self.form``. """ _next(self) if not self.roles_support: return if not self.request.has_permission('manage', self.model.parent): # XXX: yafowil selection display renderer return value = [] if self.action_resource == 'edit': value = self.model.model.roles roles_widget = factory( 'field:label:select', name='principal_roles', value=value, props={ 'label': _('roles', default='Roles'), 'multivalued': True, 'vocabulary': self.roles_vocab, 'format': 'single', 'listing_tag': 'ul', 'listing_label_position': 'after', }) save_widget = self.form['save'] self.form.insertbefore(roles_widget, save_widget)
[ "def", "prepare", "(", "_next", ",", "self", ")", ":", "_next", "(", "self", ")", "if", "not", "self", ".", "roles_support", ":", "return", "if", "not", "self", ".", "request", ".", "has_permission", "(", "'manage'", ",", "self", ".", "model", ".", "parent", ")", ":", "# XXX: yafowil selection display renderer", "return", "value", "=", "[", "]", "if", "self", ".", "action_resource", "==", "'edit'", ":", "value", "=", "self", ".", "model", ".", "model", ".", "roles", "roles_widget", "=", "factory", "(", "'field:label:select'", ",", "name", "=", "'principal_roles'", ",", "value", "=", "value", ",", "props", "=", "{", "'label'", ":", "_", "(", "'roles'", ",", "default", "=", "'Roles'", ")", ",", "'multivalued'", ":", "True", ",", "'vocabulary'", ":", "self", ".", "roles_vocab", ",", "'format'", ":", "'single'", ",", "'listing_tag'", ":", "'ul'", ",", "'listing_label_position'", ":", "'after'", ",", "}", ")", "save_widget", "=", "self", ".", "form", "[", "'save'", "]", "self", ".", "form", ".", "insertbefore", "(", "roles_widget", ",", "save_widget", ")" ]
Hook after prepare and set 'principal_roles' as selection to ``self.form``.
[ "Hook", "after", "prepare", "and", "set", "principal_roles", "as", "selection", "to", "self", ".", "form", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/roles.py#L26-L52
bluedynamics/cone.ugm
src/cone/ugm/__init__.py
initialize_ugm
def initialize_ugm(config, global_config, local_config): """Initialize UGM. """ # custom UGM styles cfg.merged.css.protected.append((static_resources, 'styles.css')) # custom UGM javascript cfg.merged.js.protected.append((static_resources, 'ugm.js')) # UGM settings register_config('ugm_general', GeneralSettings) register_config('ugm_server', ServerSettings) register_config('ugm_users', UsersSettings) register_config('ugm_groups', GroupsSettings) register_config('ugm_roles', RolesSettings) register_config('ugm_localmanager', LocalManagerSettings) # Users container register_entry('users', users_factory) # Groups container register_entry('groups', groups_factory) # register default acl's # XXX: define permissions referring users, user, groups respective group only acl_registry.register(ugm_user_acl, User, 'user') acl_registry.register(ugm_default_acl, Users, 'users') acl_registry.register(ugm_default_acl, Group, 'group') acl_registry.register(ugm_default_acl, Groups, 'groups') # localmanager config file location lm_config = local_config.get('ugm.localmanager_config', '') os.environ['LOCAL_MANAGER_CFG_FILE'] = lm_config # add translation config.add_translation_dirs('cone.ugm:locale/') # static resources config.add_view(static_resources, name='cone.ugm.static') # scan browser package config.scan('cone.ugm.browser')
python
def initialize_ugm(config, global_config, local_config): """Initialize UGM. """ # custom UGM styles cfg.merged.css.protected.append((static_resources, 'styles.css')) # custom UGM javascript cfg.merged.js.protected.append((static_resources, 'ugm.js')) # UGM settings register_config('ugm_general', GeneralSettings) register_config('ugm_server', ServerSettings) register_config('ugm_users', UsersSettings) register_config('ugm_groups', GroupsSettings) register_config('ugm_roles', RolesSettings) register_config('ugm_localmanager', LocalManagerSettings) # Users container register_entry('users', users_factory) # Groups container register_entry('groups', groups_factory) # register default acl's # XXX: define permissions referring users, user, groups respective group only acl_registry.register(ugm_user_acl, User, 'user') acl_registry.register(ugm_default_acl, Users, 'users') acl_registry.register(ugm_default_acl, Group, 'group') acl_registry.register(ugm_default_acl, Groups, 'groups') # localmanager config file location lm_config = local_config.get('ugm.localmanager_config', '') os.environ['LOCAL_MANAGER_CFG_FILE'] = lm_config # add translation config.add_translation_dirs('cone.ugm:locale/') # static resources config.add_view(static_resources, name='cone.ugm.static') # scan browser package config.scan('cone.ugm.browser')
[ "def", "initialize_ugm", "(", "config", ",", "global_config", ",", "local_config", ")", ":", "# custom UGM styles", "cfg", ".", "merged", ".", "css", ".", "protected", ".", "append", "(", "(", "static_resources", ",", "'styles.css'", ")", ")", "# custom UGM javascript", "cfg", ".", "merged", ".", "js", ".", "protected", ".", "append", "(", "(", "static_resources", ",", "'ugm.js'", ")", ")", "# UGM settings", "register_config", "(", "'ugm_general'", ",", "GeneralSettings", ")", "register_config", "(", "'ugm_server'", ",", "ServerSettings", ")", "register_config", "(", "'ugm_users'", ",", "UsersSettings", ")", "register_config", "(", "'ugm_groups'", ",", "GroupsSettings", ")", "register_config", "(", "'ugm_roles'", ",", "RolesSettings", ")", "register_config", "(", "'ugm_localmanager'", ",", "LocalManagerSettings", ")", "# Users container", "register_entry", "(", "'users'", ",", "users_factory", ")", "# Groups container", "register_entry", "(", "'groups'", ",", "groups_factory", ")", "# register default acl's", "# XXX: define permissions referring users, user, groups respective group only", "acl_registry", ".", "register", "(", "ugm_user_acl", ",", "User", ",", "'user'", ")", "acl_registry", ".", "register", "(", "ugm_default_acl", ",", "Users", ",", "'users'", ")", "acl_registry", ".", "register", "(", "ugm_default_acl", ",", "Group", ",", "'group'", ")", "acl_registry", ".", "register", "(", "ugm_default_acl", ",", "Groups", ",", "'groups'", ")", "# localmanager config file location", "lm_config", "=", "local_config", ".", "get", "(", "'ugm.localmanager_config'", ",", "''", ")", "os", ".", "environ", "[", "'LOCAL_MANAGER_CFG_FILE'", "]", "=", "lm_config", "# add translation", "config", ".", "add_translation_dirs", "(", "'cone.ugm:locale/'", ")", "# static resources", "config", ".", "add_view", "(", "static_resources", ",", "name", "=", "'cone.ugm.static'", ")", "# scan browser package", "config", ".", "scan", "(", "'cone.ugm.browser'", ")" ]
Initialize UGM.
[ "Initialize", "UGM", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/__init__.py#L62-L103
bluedynamics/cone.ugm
src/cone/ugm/browser/expires.py
expiration_extractor
def expiration_extractor(widget, data): """Extract expiration information. - If active flag not set, Account is disabled (value 0). - If active flag set and value is UNSET, account never expires. - If active flag set and datetime choosen, account expires at given datetime. - Timestamp in seconds since epoch is returned. """ active = int(data.request.get('%s.active' % widget.name, '0')) if not active: return 0 expires = data.extracted if expires: return time.mktime(expires.utctimetuple()) return UNSET
python
def expiration_extractor(widget, data): """Extract expiration information. - If active flag not set, Account is disabled (value 0). - If active flag set and value is UNSET, account never expires. - If active flag set and datetime choosen, account expires at given datetime. - Timestamp in seconds since epoch is returned. """ active = int(data.request.get('%s.active' % widget.name, '0')) if not active: return 0 expires = data.extracted if expires: return time.mktime(expires.utctimetuple()) return UNSET
[ "def", "expiration_extractor", "(", "widget", ",", "data", ")", ":", "active", "=", "int", "(", "data", ".", "request", ".", "get", "(", "'%s.active'", "%", "widget", ".", "name", ",", "'0'", ")", ")", "if", "not", "active", ":", "return", "0", "expires", "=", "data", ".", "extracted", "if", "expires", ":", "return", "time", ".", "mktime", "(", "expires", ".", "utctimetuple", "(", ")", ")", "return", "UNSET" ]
Extract expiration information. - If active flag not set, Account is disabled (value 0). - If active flag set and value is UNSET, account never expires. - If active flag set and datetime choosen, account expires at given datetime. - Timestamp in seconds since epoch is returned.
[ "Extract", "expiration", "information", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/expires.py#L23-L38
bluedynamics/cone.ugm
src/cone/ugm/browser/expires.py
ExpirationForm.prepare
def prepare(_next, self): """Hook after prepare and set expiration widget to ``self.form``. """ _next(self) cfg = ugm_general(self.model) if cfg.attrs['users_account_expiration'] != 'True': return mode = 'edit' if not self.request.has_permission( 'manage_expiration', self.model.parent): mode = 'display' if self.action_resource == 'edit': attr = cfg.attrs['users_expires_attr'] unit = int(cfg.attrs['users_expires_unit']) value = int(self.model.attrs.get(attr, 0)) # if format days, convert to seconds if unit == 0: value *= 86400 else: value = UNSET expires_widget = factory( 'field:label:expiration', name='active', value=value, props={ 'label': _('active', default='Active') }, mode=mode ) save_widget = self.form['save'] self.form.insertbefore(expires_widget, save_widget)
python
def prepare(_next, self): """Hook after prepare and set expiration widget to ``self.form``. """ _next(self) cfg = ugm_general(self.model) if cfg.attrs['users_account_expiration'] != 'True': return mode = 'edit' if not self.request.has_permission( 'manage_expiration', self.model.parent): mode = 'display' if self.action_resource == 'edit': attr = cfg.attrs['users_expires_attr'] unit = int(cfg.attrs['users_expires_unit']) value = int(self.model.attrs.get(attr, 0)) # if format days, convert to seconds if unit == 0: value *= 86400 else: value = UNSET expires_widget = factory( 'field:label:expiration', name='active', value=value, props={ 'label': _('active', default='Active') }, mode=mode ) save_widget = self.form['save'] self.form.insertbefore(expires_widget, save_widget)
[ "def", "prepare", "(", "_next", ",", "self", ")", ":", "_next", "(", "self", ")", "cfg", "=", "ugm_general", "(", "self", ".", "model", ")", "if", "cfg", ".", "attrs", "[", "'users_account_expiration'", "]", "!=", "'True'", ":", "return", "mode", "=", "'edit'", "if", "not", "self", ".", "request", ".", "has_permission", "(", "'manage_expiration'", ",", "self", ".", "model", ".", "parent", ")", ":", "mode", "=", "'display'", "if", "self", ".", "action_resource", "==", "'edit'", ":", "attr", "=", "cfg", ".", "attrs", "[", "'users_expires_attr'", "]", "unit", "=", "int", "(", "cfg", ".", "attrs", "[", "'users_expires_unit'", "]", ")", "value", "=", "int", "(", "self", ".", "model", ".", "attrs", ".", "get", "(", "attr", ",", "0", ")", ")", "# if format days, convert to seconds", "if", "unit", "==", "0", ":", "value", "*=", "86400", "else", ":", "value", "=", "UNSET", "expires_widget", "=", "factory", "(", "'field:label:expiration'", ",", "name", "=", "'active'", ",", "value", "=", "value", ",", "props", "=", "{", "'label'", ":", "_", "(", "'active'", ",", "default", "=", "'Active'", ")", "}", ",", "mode", "=", "mode", ")", "save_widget", "=", "self", ".", "form", "[", "'save'", "]", "self", ".", "form", ".", "insertbefore", "(", "expires_widget", ",", "save_widget", ")" ]
Hook after prepare and set expiration widget to ``self.form``.
[ "Hook", "after", "prepare", "and", "set", "expiration", "widget", "to", "self", ".", "form", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/expires.py#L127-L158
bluedynamics/cone.ugm
src/cone/ugm/browser/portrait.py
portrait_image
def portrait_image(model, request): """XXX: needs polishing. Return configured default portrait if not set on user. """ response = Response() cfg = ugm_general(model) response.body = model.attrs[cfg.attrs['users_portrait_attr']] response.headers['Content-Type'] = 'image/jpeg' response.headers['Cache-Control'] = 'max-age=0' return response
python
def portrait_image(model, request): """XXX: needs polishing. Return configured default portrait if not set on user. """ response = Response() cfg = ugm_general(model) response.body = model.attrs[cfg.attrs['users_portrait_attr']] response.headers['Content-Type'] = 'image/jpeg' response.headers['Cache-Control'] = 'max-age=0' return response
[ "def", "portrait_image", "(", "model", ",", "request", ")", ":", "response", "=", "Response", "(", ")", "cfg", "=", "ugm_general", "(", "model", ")", "response", ".", "body", "=", "model", ".", "attrs", "[", "cfg", ".", "attrs", "[", "'users_portrait_attr'", "]", "]", "response", ".", "headers", "[", "'Content-Type'", "]", "=", "'image/jpeg'", "response", ".", "headers", "[", "'Cache-Control'", "]", "=", "'max-age=0'", "return", "response" ]
XXX: needs polishing. Return configured default portrait if not set on user.
[ "XXX", ":", "needs", "polishing", ".", "Return", "configured", "default", "portrait", "if", "not", "set", "on", "user", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/portrait.py#L22-L31
bluedynamics/cone.ugm
src/cone/ugm/browser/portrait.py
PortraitForm.prepare
def prepare(_next, self): """Hook after prepare and set 'portrait' as image widget to ``self.form``. """ _next(self) if not self.portrait_support: return model = self.model request = self.request if request.has_permission('edit_user', model.parent): mode = 'edit' else: mode = 'display' cfg = ugm_general(model) image_attr = cfg.attrs['users_portrait_attr'] image_accept = cfg.attrs['users_portrait_accept'] image_width = int(cfg.attrs['users_portrait_width']) image_height = int(cfg.attrs['users_portrait_height']) image_data = model.attrs.get(image_attr) if image_data: image_value = { 'file': BytesIO(image_data), 'mimetype': 'image/jpeg', } image_url = make_url(request, node=model, resource='portrait_image') else: image_value = UNSET resource = 'cone.ugm.static/images/default_portrait.jpg' image_url = make_url(request, node=model.root, resource=resource) portrait_widget = factory( 'field:label:error:image', name='portrait', value=image_value, props={ 'label': _('portrait', default='Portrait'), 'src': image_url, 'alt': _('portrait', default='Portrait'), 'accept': image_accept, 'minsize': (image_width, image_height), 'crop': { 'size': (image_width, image_height), 'fitting': True, } }, mode=mode) save_widget = self.form['save'] self.form.insertbefore(portrait_widget, save_widget)
python
def prepare(_next, self): """Hook after prepare and set 'portrait' as image widget to ``self.form``. """ _next(self) if not self.portrait_support: return model = self.model request = self.request if request.has_permission('edit_user', model.parent): mode = 'edit' else: mode = 'display' cfg = ugm_general(model) image_attr = cfg.attrs['users_portrait_attr'] image_accept = cfg.attrs['users_portrait_accept'] image_width = int(cfg.attrs['users_portrait_width']) image_height = int(cfg.attrs['users_portrait_height']) image_data = model.attrs.get(image_attr) if image_data: image_value = { 'file': BytesIO(image_data), 'mimetype': 'image/jpeg', } image_url = make_url(request, node=model, resource='portrait_image') else: image_value = UNSET resource = 'cone.ugm.static/images/default_portrait.jpg' image_url = make_url(request, node=model.root, resource=resource) portrait_widget = factory( 'field:label:error:image', name='portrait', value=image_value, props={ 'label': _('portrait', default='Portrait'), 'src': image_url, 'alt': _('portrait', default='Portrait'), 'accept': image_accept, 'minsize': (image_width, image_height), 'crop': { 'size': (image_width, image_height), 'fitting': True, } }, mode=mode) save_widget = self.form['save'] self.form.insertbefore(portrait_widget, save_widget)
[ "def", "prepare", "(", "_next", ",", "self", ")", ":", "_next", "(", "self", ")", "if", "not", "self", ".", "portrait_support", ":", "return", "model", "=", "self", ".", "model", "request", "=", "self", ".", "request", "if", "request", ".", "has_permission", "(", "'edit_user'", ",", "model", ".", "parent", ")", ":", "mode", "=", "'edit'", "else", ":", "mode", "=", "'display'", "cfg", "=", "ugm_general", "(", "model", ")", "image_attr", "=", "cfg", ".", "attrs", "[", "'users_portrait_attr'", "]", "image_accept", "=", "cfg", ".", "attrs", "[", "'users_portrait_accept'", "]", "image_width", "=", "int", "(", "cfg", ".", "attrs", "[", "'users_portrait_width'", "]", ")", "image_height", "=", "int", "(", "cfg", ".", "attrs", "[", "'users_portrait_height'", "]", ")", "image_data", "=", "model", ".", "attrs", ".", "get", "(", "image_attr", ")", "if", "image_data", ":", "image_value", "=", "{", "'file'", ":", "BytesIO", "(", "image_data", ")", ",", "'mimetype'", ":", "'image/jpeg'", ",", "}", "image_url", "=", "make_url", "(", "request", ",", "node", "=", "model", ",", "resource", "=", "'portrait_image'", ")", "else", ":", "image_value", "=", "UNSET", "resource", "=", "'cone.ugm.static/images/default_portrait.jpg'", "image_url", "=", "make_url", "(", "request", ",", "node", "=", "model", ".", "root", ",", "resource", "=", "resource", ")", "portrait_widget", "=", "factory", "(", "'field:label:error:image'", ",", "name", "=", "'portrait'", ",", "value", "=", "image_value", ",", "props", "=", "{", "'label'", ":", "_", "(", "'portrait'", ",", "default", "=", "'Portrait'", ")", ",", "'src'", ":", "image_url", ",", "'alt'", ":", "_", "(", "'portrait'", ",", "default", "=", "'Portrait'", ")", ",", "'accept'", ":", "image_accept", ",", "'minsize'", ":", "(", "image_width", ",", "image_height", ")", ",", "'crop'", ":", "{", "'size'", ":", "(", "image_width", ",", "image_height", ")", ",", "'fitting'", ":", "True", ",", "}", "}", ",", "mode", "=", "mode", ")", "save_widget", "=", "self", ".", "form", "[", "'save'", "]", "self", ".", "form", ".", "insertbefore", "(", "portrait_widget", ",", "save_widget", ")" ]
Hook after prepare and set 'portrait' as image widget to ``self.form``.
[ "Hook", "after", "prepare", "and", "set", "portrait", "as", "image", "widget", "to", "self", ".", "form", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/portrait.py#L45-L92
bluedynamics/cone.ugm
src/cone/ugm/model/localmanager.py
LocalManager.local_manager_consider_for_user
def local_manager_consider_for_user(self): """Flag whether local manager ACL should be considered for current authenticated user. """ if not self.local_management_enabled: return False request = get_current_request() if authenticated_userid(request) == security.ADMIN_USER: return False roles = security.authenticated_user(request).roles if 'admin' in roles or 'manager' in roles: return False return True
python
def local_manager_consider_for_user(self): """Flag whether local manager ACL should be considered for current authenticated user. """ if not self.local_management_enabled: return False request = get_current_request() if authenticated_userid(request) == security.ADMIN_USER: return False roles = security.authenticated_user(request).roles if 'admin' in roles or 'manager' in roles: return False return True
[ "def", "local_manager_consider_for_user", "(", "self", ")", ":", "if", "not", "self", ".", "local_management_enabled", ":", "return", "False", "request", "=", "get_current_request", "(", ")", "if", "authenticated_userid", "(", "request", ")", "==", "security", ".", "ADMIN_USER", ":", "return", "False", "roles", "=", "security", ".", "authenticated_user", "(", "request", ")", ".", "roles", "if", "'admin'", "in", "roles", "or", "'manager'", "in", "roles", ":", "return", "False", "return", "True" ]
Flag whether local manager ACL should be considered for current authenticated user.
[ "Flag", "whether", "local", "manager", "ACL", "should", "be", "considered", "for", "current", "authenticated", "user", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/model/localmanager.py#L73-L85
bluedynamics/cone.ugm
src/cone/ugm/model/localmanager.py
LocalManager.local_manager_gid
def local_manager_gid(self): """Group id of local manager group of current authenticated member. Currently a user can be assigned only to one local manager group. If more than one local manager group is configured, an error is raised. """ config = self.root['settings']['ugm_localmanager'].attrs user = security.authenticated_user(get_current_request()) if not user: return None gids = user.group_ids adm_gids = list() for gid in gids: rule = config.get(gid) if rule: adm_gids.append(gid) if len(adm_gids) == 0: return None if len(adm_gids) > 1: msg = (u"Authenticated member defined in local manager " u"groups %s but only one management group allowed for " u"each user. Please contact System Administrator in " u"order to fix this problem.") exc = msg % ', '.join(["'%s'" % gid for gid in adm_gids]) raise Exception(exc) return adm_gids[0]
python
def local_manager_gid(self): """Group id of local manager group of current authenticated member. Currently a user can be assigned only to one local manager group. If more than one local manager group is configured, an error is raised. """ config = self.root['settings']['ugm_localmanager'].attrs user = security.authenticated_user(get_current_request()) if not user: return None gids = user.group_ids adm_gids = list() for gid in gids: rule = config.get(gid) if rule: adm_gids.append(gid) if len(adm_gids) == 0: return None if len(adm_gids) > 1: msg = (u"Authenticated member defined in local manager " u"groups %s but only one management group allowed for " u"each user. Please contact System Administrator in " u"order to fix this problem.") exc = msg % ', '.join(["'%s'" % gid for gid in adm_gids]) raise Exception(exc) return adm_gids[0]
[ "def", "local_manager_gid", "(", "self", ")", ":", "config", "=", "self", ".", "root", "[", "'settings'", "]", "[", "'ugm_localmanager'", "]", ".", "attrs", "user", "=", "security", ".", "authenticated_user", "(", "get_current_request", "(", ")", ")", "if", "not", "user", ":", "return", "None", "gids", "=", "user", ".", "group_ids", "adm_gids", "=", "list", "(", ")", "for", "gid", "in", "gids", ":", "rule", "=", "config", ".", "get", "(", "gid", ")", "if", "rule", ":", "adm_gids", ".", "append", "(", "gid", ")", "if", "len", "(", "adm_gids", ")", "==", "0", ":", "return", "None", "if", "len", "(", "adm_gids", ")", ">", "1", ":", "msg", "=", "(", "u\"Authenticated member defined in local manager \"", "u\"groups %s but only one management group allowed for \"", "u\"each user. Please contact System Administrator in \"", "u\"order to fix this problem.\"", ")", "exc", "=", "msg", "%", "', '", ".", "join", "(", "[", "\"'%s'\"", "%", "gid", "for", "gid", "in", "adm_gids", "]", ")", "raise", "Exception", "(", "exc", ")", "return", "adm_gids", "[", "0", "]" ]
Group id of local manager group of current authenticated member. Currently a user can be assigned only to one local manager group. If more than one local manager group is configured, an error is raised.
[ "Group", "id", "of", "local", "manager", "group", "of", "current", "authenticated", "member", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/model/localmanager.py#L89-L114
bluedynamics/cone.ugm
src/cone/ugm/model/localmanager.py
LocalManager.local_manager_rule
def local_manager_rule(self): """Return rule for local manager. """ adm_gid = self.local_manager_gid if not adm_gid: return None config = self.root['settings']['ugm_localmanager'].attrs return config[adm_gid]
python
def local_manager_rule(self): """Return rule for local manager. """ adm_gid = self.local_manager_gid if not adm_gid: return None config = self.root['settings']['ugm_localmanager'].attrs return config[adm_gid]
[ "def", "local_manager_rule", "(", "self", ")", ":", "adm_gid", "=", "self", ".", "local_manager_gid", "if", "not", "adm_gid", ":", "return", "None", "config", "=", "self", ".", "root", "[", "'settings'", "]", "[", "'ugm_localmanager'", "]", ".", "attrs", "return", "config", "[", "adm_gid", "]" ]
Return rule for local manager.
[ "Return", "rule", "for", "local", "manager", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/model/localmanager.py#L118-L125
bluedynamics/cone.ugm
src/cone/ugm/model/localmanager.py
LocalManager.local_manager_target_uids
def local_manager_target_uids(self): """Target uid's for local manager. """ groups = self.root['groups'].backend managed_uids = set() for gid in self.local_manager_target_gids: group = groups.get(gid) if group: managed_uids.update(group.member_ids) return list(managed_uids)
python
def local_manager_target_uids(self): """Target uid's for local manager. """ groups = self.root['groups'].backend managed_uids = set() for gid in self.local_manager_target_gids: group = groups.get(gid) if group: managed_uids.update(group.member_ids) return list(managed_uids)
[ "def", "local_manager_target_uids", "(", "self", ")", ":", "groups", "=", "self", ".", "root", "[", "'groups'", "]", ".", "backend", "managed_uids", "=", "set", "(", ")", "for", "gid", "in", "self", ".", "local_manager_target_gids", ":", "group", "=", "groups", ".", "get", "(", "gid", ")", "if", "group", ":", "managed_uids", ".", "update", "(", "group", ".", "member_ids", ")", "return", "list", "(", "managed_uids", ")" ]
Target uid's for local manager.
[ "Target", "uid", "s", "for", "local", "manager", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/model/localmanager.py#L149-L158
bluedynamics/cone.ugm
src/cone/ugm/model/localmanager.py
LocalManager.local_manager_is_default
def local_manager_is_default(self, adm_gid, gid): """Check whether gid is default group for local manager group. """ config = self.root['settings']['ugm_localmanager'].attrs rule = config[adm_gid] if gid not in rule['target']: raise Exception(u"group '%s' not managed by '%s'" % (gid, adm_gid)) return gid in rule['default']
python
def local_manager_is_default(self, adm_gid, gid): """Check whether gid is default group for local manager group. """ config = self.root['settings']['ugm_localmanager'].attrs rule = config[adm_gid] if gid not in rule['target']: raise Exception(u"group '%s' not managed by '%s'" % (gid, adm_gid)) return gid in rule['default']
[ "def", "local_manager_is_default", "(", "self", ",", "adm_gid", ",", "gid", ")", ":", "config", "=", "self", ".", "root", "[", "'settings'", "]", "[", "'ugm_localmanager'", "]", ".", "attrs", "rule", "=", "config", "[", "adm_gid", "]", "if", "gid", "not", "in", "rule", "[", "'target'", "]", ":", "raise", "Exception", "(", "u\"group '%s' not managed by '%s'\"", "%", "(", "gid", ",", "adm_gid", ")", ")", "return", "gid", "in", "rule", "[", "'default'", "]" ]
Check whether gid is default group for local manager group.
[ "Check", "whether", "gid", "is", "default", "group", "for", "local", "manager", "group", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/model/localmanager.py#L161-L168
bluedynamics/cone.ugm
src/cone/ugm/browser/user.py
UserForm.form_field_definitions
def form_field_definitions(self): """Hook optional_login extractor if necessary for form defaults. """ schema = copy.deepcopy(form_field_definitions.user) uid, login = self._get_auth_attrs() if uid != login: field = schema.get(login, schema['default']) if field['chain'].find('*optional_login') == -1: field['chain'] = '%s:%s' % ( '*optional_login', field['chain']) if not field.get('custom'): field['custom'] = dict() field['custom']['optional_login'] = \ (['context.optional_login'], [], [], [], []) schema[login] = field return schema
python
def form_field_definitions(self): """Hook optional_login extractor if necessary for form defaults. """ schema = copy.deepcopy(form_field_definitions.user) uid, login = self._get_auth_attrs() if uid != login: field = schema.get(login, schema['default']) if field['chain'].find('*optional_login') == -1: field['chain'] = '%s:%s' % ( '*optional_login', field['chain']) if not field.get('custom'): field['custom'] = dict() field['custom']['optional_login'] = \ (['context.optional_login'], [], [], [], []) schema[login] = field return schema
[ "def", "form_field_definitions", "(", "self", ")", ":", "schema", "=", "copy", ".", "deepcopy", "(", "form_field_definitions", ".", "user", ")", "uid", ",", "login", "=", "self", ".", "_get_auth_attrs", "(", ")", "if", "uid", "!=", "login", ":", "field", "=", "schema", ".", "get", "(", "login", ",", "schema", "[", "'default'", "]", ")", "if", "field", "[", "'chain'", "]", ".", "find", "(", "'*optional_login'", ")", "==", "-", "1", ":", "field", "[", "'chain'", "]", "=", "'%s:%s'", "%", "(", "'*optional_login'", ",", "field", "[", "'chain'", "]", ")", "if", "not", "field", ".", "get", "(", "'custom'", ")", ":", "field", "[", "'custom'", "]", "=", "dict", "(", ")", "field", "[", "'custom'", "]", "[", "'optional_login'", "]", "=", "(", "[", "'context.optional_login'", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "schema", "[", "login", "]", "=", "field", "return", "schema" ]
Hook optional_login extractor if necessary for form defaults.
[ "Hook", "optional_login", "extractor", "if", "necessary", "for", "form", "defaults", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/user.py#L183-L198
bluedynamics/cone.ugm
src/cone/ugm/browser/remote.py
remote_add_user
def remote_add_user(model, request): """Add user via remote service. Returns a JSON response containing success state and a message indicating what happened:: { success: true, // respective false message: 'message' } Expected request parameters: id New user id. password User password to be set initially (optional). roles Comma seperated role names the user initially has. groups Comma seperated groups names the user should initially be member of. attr.* User attributes to be set. I.e. ``attr.mail`` would set the mail attribute for newly created user. All request parameters prefixed with ``attr`` get checked against user attribute attrmap from settings. Restrictions - All values, whether single or multi valued, are passed as string or list of strings to the create function. """ params = request.params uid = params.get('id') if not uid: return { 'success': False, 'message': u"No user ID given.", } users = model.backend if uid in users: return { 'success': False, 'message': u"User with given ID already exists.", } password = params.get('password') add_roles = params.get('roles', '') add_roles = [val.strip() for val in add_roles.split(',') if val] add_groups = params.get('groups', '') add_groups = [val.strip() for val in add_groups.split(',') if val] attrs = dict() for key, val in params.items(): if not key.startswith('attr.'): continue key = key[key.find('.') + 1:] attrs[key] = val settings = ugm_users(model) attrmap = settings.attrs.users_form_attrmap exposed = settings.attrs.users_exposed_attributes if not exposed: exposed = list() valid_attrs = attrmap.keys() + exposed checked_attrs = dict() for key in valid_attrs: val = attrs.get(key) if not val: continue checked_attrs[key] = val try: user = users.create(uid, **checked_attrs) message = u"" from cone.app.security import DEFAULT_ROLES available_roles = [role[0] for role in DEFAULT_ROLES] for role in add_roles: if role not in available_roles: message += u"Role '%s' given but inexistent. " % role continue user.add_role(role) groups = users.parent.groups for group in add_groups: if group not in groups: message += u"Group '%s' given but inexistent. " % group continue groups[group].add(uid) users.parent() if password is not None: users.passwd(uid, None, password) message += u"Created user with ID '%s'." % uid return { 'success': True, 'message': message, } except Exception as e: return { 'success': False, 'message': str(e), } finally: model.invalidate()
python
def remote_add_user(model, request): """Add user via remote service. Returns a JSON response containing success state and a message indicating what happened:: { success: true, // respective false message: 'message' } Expected request parameters: id New user id. password User password to be set initially (optional). roles Comma seperated role names the user initially has. groups Comma seperated groups names the user should initially be member of. attr.* User attributes to be set. I.e. ``attr.mail`` would set the mail attribute for newly created user. All request parameters prefixed with ``attr`` get checked against user attribute attrmap from settings. Restrictions - All values, whether single or multi valued, are passed as string or list of strings to the create function. """ params = request.params uid = params.get('id') if not uid: return { 'success': False, 'message': u"No user ID given.", } users = model.backend if uid in users: return { 'success': False, 'message': u"User with given ID already exists.", } password = params.get('password') add_roles = params.get('roles', '') add_roles = [val.strip() for val in add_roles.split(',') if val] add_groups = params.get('groups', '') add_groups = [val.strip() for val in add_groups.split(',') if val] attrs = dict() for key, val in params.items(): if not key.startswith('attr.'): continue key = key[key.find('.') + 1:] attrs[key] = val settings = ugm_users(model) attrmap = settings.attrs.users_form_attrmap exposed = settings.attrs.users_exposed_attributes if not exposed: exposed = list() valid_attrs = attrmap.keys() + exposed checked_attrs = dict() for key in valid_attrs: val = attrs.get(key) if not val: continue checked_attrs[key] = val try: user = users.create(uid, **checked_attrs) message = u"" from cone.app.security import DEFAULT_ROLES available_roles = [role[0] for role in DEFAULT_ROLES] for role in add_roles: if role not in available_roles: message += u"Role '%s' given but inexistent. " % role continue user.add_role(role) groups = users.parent.groups for group in add_groups: if group not in groups: message += u"Group '%s' given but inexistent. " % group continue groups[group].add(uid) users.parent() if password is not None: users.passwd(uid, None, password) message += u"Created user with ID '%s'." % uid return { 'success': True, 'message': message, } except Exception as e: return { 'success': False, 'message': str(e), } finally: model.invalidate()
[ "def", "remote_add_user", "(", "model", ",", "request", ")", ":", "params", "=", "request", ".", "params", "uid", "=", "params", ".", "get", "(", "'id'", ")", "if", "not", "uid", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "u\"No user ID given.\"", ",", "}", "users", "=", "model", ".", "backend", "if", "uid", "in", "users", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "u\"User with given ID already exists.\"", ",", "}", "password", "=", "params", ".", "get", "(", "'password'", ")", "add_roles", "=", "params", ".", "get", "(", "'roles'", ",", "''", ")", "add_roles", "=", "[", "val", ".", "strip", "(", ")", "for", "val", "in", "add_roles", ".", "split", "(", "','", ")", "if", "val", "]", "add_groups", "=", "params", ".", "get", "(", "'groups'", ",", "''", ")", "add_groups", "=", "[", "val", ".", "strip", "(", ")", "for", "val", "in", "add_groups", ".", "split", "(", "','", ")", "if", "val", "]", "attrs", "=", "dict", "(", ")", "for", "key", ",", "val", "in", "params", ".", "items", "(", ")", ":", "if", "not", "key", ".", "startswith", "(", "'attr.'", ")", ":", "continue", "key", "=", "key", "[", "key", ".", "find", "(", "'.'", ")", "+", "1", ":", "]", "attrs", "[", "key", "]", "=", "val", "settings", "=", "ugm_users", "(", "model", ")", "attrmap", "=", "settings", ".", "attrs", ".", "users_form_attrmap", "exposed", "=", "settings", ".", "attrs", ".", "users_exposed_attributes", "if", "not", "exposed", ":", "exposed", "=", "list", "(", ")", "valid_attrs", "=", "attrmap", ".", "keys", "(", ")", "+", "exposed", "checked_attrs", "=", "dict", "(", ")", "for", "key", "in", "valid_attrs", ":", "val", "=", "attrs", ".", "get", "(", "key", ")", "if", "not", "val", ":", "continue", "checked_attrs", "[", "key", "]", "=", "val", "try", ":", "user", "=", "users", ".", "create", "(", "uid", ",", "*", "*", "checked_attrs", ")", "message", "=", "u\"\"", "from", "cone", ".", "app", ".", "security", "import", "DEFAULT_ROLES", "available_roles", "=", "[", "role", "[", "0", "]", "for", "role", "in", "DEFAULT_ROLES", "]", "for", "role", "in", "add_roles", ":", "if", "role", "not", "in", "available_roles", ":", "message", "+=", "u\"Role '%s' given but inexistent. \"", "%", "role", "continue", "user", ".", "add_role", "(", "role", ")", "groups", "=", "users", ".", "parent", ".", "groups", "for", "group", "in", "add_groups", ":", "if", "group", "not", "in", "groups", ":", "message", "+=", "u\"Group '%s' given but inexistent. \"", "%", "group", "continue", "groups", "[", "group", "]", ".", "add", "(", "uid", ")", "users", ".", "parent", "(", ")", "if", "password", "is", "not", "None", ":", "users", ".", "passwd", "(", "uid", ",", "None", ",", "password", ")", "message", "+=", "u\"Created user with ID '%s'.\"", "%", "uid", "return", "{", "'success'", ":", "True", ",", "'message'", ":", "message", ",", "}", "except", "Exception", "as", "e", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "str", "(", "e", ")", ",", "}", "finally", ":", "model", ".", "invalidate", "(", ")" ]
Add user via remote service. Returns a JSON response containing success state and a message indicating what happened:: { success: true, // respective false message: 'message' } Expected request parameters: id New user id. password User password to be set initially (optional). roles Comma seperated role names the user initially has. groups Comma seperated groups names the user should initially be member of. attr.* User attributes to be set. I.e. ``attr.mail`` would set the mail attribute for newly created user. All request parameters prefixed with ``attr`` get checked against user attribute attrmap from settings. Restrictions - All values, whether single or multi valued, are passed as string or list of strings to the create function.
[ "Add", "user", "via", "remote", "service", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/remote.py#L12-L124
bluedynamics/cone.ugm
src/cone/ugm/browser/remote.py
remote_delete_user
def remote_delete_user(model, request): """Remove user via remote service. Returns a JSON response containing success state and a message indicating what happened:: { success: true, // respective false message: 'message' } Expected request parameters: id Id of user to delete. """ params = request.params uid = params.get('id') if not uid: return { 'success': False, 'message': u"No user ID given.", } users = model.backend if uid not in users: return { 'success': False, 'message': u"User with given ID not exists.", } try: del users[uid] users.parent() message = u"Deleted user with ID '%s'." % uid return { 'success': True, 'message': message, } except Exception as e: return { 'success': False, 'message': str(e), } finally: model.invalidate()
python
def remote_delete_user(model, request): """Remove user via remote service. Returns a JSON response containing success state and a message indicating what happened:: { success: true, // respective false message: 'message' } Expected request parameters: id Id of user to delete. """ params = request.params uid = params.get('id') if not uid: return { 'success': False, 'message': u"No user ID given.", } users = model.backend if uid not in users: return { 'success': False, 'message': u"User with given ID not exists.", } try: del users[uid] users.parent() message = u"Deleted user with ID '%s'." % uid return { 'success': True, 'message': message, } except Exception as e: return { 'success': False, 'message': str(e), } finally: model.invalidate()
[ "def", "remote_delete_user", "(", "model", ",", "request", ")", ":", "params", "=", "request", ".", "params", "uid", "=", "params", ".", "get", "(", "'id'", ")", "if", "not", "uid", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "u\"No user ID given.\"", ",", "}", "users", "=", "model", ".", "backend", "if", "uid", "not", "in", "users", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "u\"User with given ID not exists.\"", ",", "}", "try", ":", "del", "users", "[", "uid", "]", "users", ".", "parent", "(", ")", "message", "=", "u\"Deleted user with ID '%s'.\"", "%", "uid", "return", "{", "'success'", ":", "True", ",", "'message'", ":", "message", ",", "}", "except", "Exception", "as", "e", ":", "return", "{", "'success'", ":", "False", ",", "'message'", ":", "str", "(", "e", ")", ",", "}", "finally", ":", "model", ".", "invalidate", "(", ")" ]
Remove user via remote service. Returns a JSON response containing success state and a message indicating what happened:: { success: true, // respective false message: 'message' } Expected request parameters: id Id of user to delete.
[ "Remove", "user", "via", "remote", "service", "." ]
train
https://github.com/bluedynamics/cone.ugm/blob/3c197075f3f6e94781289311c5637bb9c8e5597c/src/cone/ugm/browser/remote.py#L133-L180
ninuxorg/nodeshot
nodeshot/interop/sync/admin.py
LayerExternalInline.get_formset
def get_formset(self, request, obj=None, **kwargs): """ Load Synchronizer schema to display specific fields in admin """ if obj is not None: try: # this is enough to load the new schema obj.external except LayerExternal.DoesNotExist: pass return super(LayerExternalInline, self).get_formset(request, obj=None, **kwargs)
python
def get_formset(self, request, obj=None, **kwargs): """ Load Synchronizer schema to display specific fields in admin """ if obj is not None: try: # this is enough to load the new schema obj.external except LayerExternal.DoesNotExist: pass return super(LayerExternalInline, self).get_formset(request, obj=None, **kwargs)
[ "def", "get_formset", "(", "self", ",", "request", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "obj", "is", "not", "None", ":", "try", ":", "# this is enough to load the new schema", "obj", ".", "external", "except", "LayerExternal", ".", "DoesNotExist", ":", "pass", "return", "super", "(", "LayerExternalInline", ",", "self", ")", ".", "get_formset", "(", "request", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")" ]
Load Synchronizer schema to display specific fields in admin
[ "Load", "Synchronizer", "schema", "to", "display", "specific", "fields", "in", "admin" ]
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/interop/sync/admin.py#L19-L29
ninuxorg/nodeshot
nodeshot/core/websockets/server.py
public_broadcaster
def public_broadcaster(): """ Thread which runs in parallel and constantly checks for new messages in the public pipe and broadcasts them publicly to all connected clients. """ while __websocket_server_running__: pipein = open(PUBLIC_PIPE, 'r') line = pipein.readline().replace('\n', '').replace('\r', '') if line != '': WebSocketHandler.broadcast(line) print line remaining_lines = pipein.read() pipein.close() pipeout = open(PUBLIC_PIPE, 'w') pipeout.write(remaining_lines) pipeout.close() else: pipein.close() time.sleep(0.05)
python
def public_broadcaster(): """ Thread which runs in parallel and constantly checks for new messages in the public pipe and broadcasts them publicly to all connected clients. """ while __websocket_server_running__: pipein = open(PUBLIC_PIPE, 'r') line = pipein.readline().replace('\n', '').replace('\r', '') if line != '': WebSocketHandler.broadcast(line) print line remaining_lines = pipein.read() pipein.close() pipeout = open(PUBLIC_PIPE, 'w') pipeout.write(remaining_lines) pipeout.close() else: pipein.close() time.sleep(0.05)
[ "def", "public_broadcaster", "(", ")", ":", "while", "__websocket_server_running__", ":", "pipein", "=", "open", "(", "PUBLIC_PIPE", ",", "'r'", ")", "line", "=", "pipein", ".", "readline", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "if", "line", "!=", "''", ":", "WebSocketHandler", ".", "broadcast", "(", "line", ")", "print", "line", "remaining_lines", "=", "pipein", ".", "read", "(", ")", "pipein", ".", "close", "(", ")", "pipeout", "=", "open", "(", "PUBLIC_PIPE", ",", "'w'", ")", "pipeout", ".", "write", "(", "remaining_lines", ")", "pipeout", ".", "close", "(", ")", "else", ":", "pipein", ".", "close", "(", ")", "time", ".", "sleep", "(", "0.05", ")" ]
Thread which runs in parallel and constantly checks for new messages in the public pipe and broadcasts them publicly to all connected clients.
[ "Thread", "which", "runs", "in", "parallel", "and", "constantly", "checks", "for", "new", "messages", "in", "the", "public", "pipe", "and", "broadcasts", "them", "publicly", "to", "all", "connected", "clients", "." ]
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/websockets/server.py#L17-L37
ninuxorg/nodeshot
nodeshot/core/websockets/server.py
private_messenger
def private_messenger(): """ Thread which runs in parallel and constantly checks for new messages in the private pipe and sends them to the specific client. If client is not connected the message is discarded. """ while __websocket_server_running__: pipein = open(PRIVATE_PIPE, 'r') line = pipein.readline().replace('\n', '').replace('\r', '') if line != '': message = json.loads(line) WebSocketHandler.send_private_message(user_id=message['user_id'], message=message) print line remaining_lines = pipein.read() pipein.close() pipeout = open(PRIVATE_PIPE, 'w') pipeout.write(remaining_lines) pipeout.close() else: pipein.close() time.sleep(0.05)
python
def private_messenger(): """ Thread which runs in parallel and constantly checks for new messages in the private pipe and sends them to the specific client. If client is not connected the message is discarded. """ while __websocket_server_running__: pipein = open(PRIVATE_PIPE, 'r') line = pipein.readline().replace('\n', '').replace('\r', '') if line != '': message = json.loads(line) WebSocketHandler.send_private_message(user_id=message['user_id'], message=message) print line remaining_lines = pipein.read() pipein.close() pipeout = open(PRIVATE_PIPE, 'w') pipeout.write(remaining_lines) pipeout.close() else: pipein.close() time.sleep(0.05)
[ "def", "private_messenger", "(", ")", ":", "while", "__websocket_server_running__", ":", "pipein", "=", "open", "(", "PRIVATE_PIPE", ",", "'r'", ")", "line", "=", "pipein", ".", "readline", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", "if", "line", "!=", "''", ":", "message", "=", "json", ".", "loads", "(", "line", ")", "WebSocketHandler", ".", "send_private_message", "(", "user_id", "=", "message", "[", "'user_id'", "]", ",", "message", "=", "message", ")", "print", "line", "remaining_lines", "=", "pipein", ".", "read", "(", ")", "pipein", ".", "close", "(", ")", "pipeout", "=", "open", "(", "PRIVATE_PIPE", ",", "'w'", ")", "pipeout", ".", "write", "(", "remaining_lines", ")", "pipeout", ".", "close", "(", ")", "else", ":", "pipein", ".", "close", "(", ")", "time", ".", "sleep", "(", "0.05", ")" ]
Thread which runs in parallel and constantly checks for new messages in the private pipe and sends them to the specific client. If client is not connected the message is discarded.
[ "Thread", "which", "runs", "in", "parallel", "and", "constantly", "checks", "for", "new", "messages", "in", "the", "private", "pipe", "and", "sends", "them", "to", "the", "specific", "client", ".", "If", "client", "is", "not", "connected", "the", "message", "is", "discarded", "." ]
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/websockets/server.py#L43-L66
ninuxorg/nodeshot
nodeshot/core/metrics/models.py
Metric.write
def write(self, values, timestamp=None, database=None, async=True): """ write metric point """ func = write_async if async else write return func(name=self.name, values=values, tags=self.tags, timestamp=timestamp, database=database)
python
def write(self, values, timestamp=None, database=None, async=True): """ write metric point """ func = write_async if async else write return func(name=self.name, values=values, tags=self.tags, timestamp=timestamp, database=database)
[ "def", "write", "(", "self", ",", "values", ",", "timestamp", "=", "None", ",", "database", "=", "None", ",", "async", "=", "True", ")", ":", "func", "=", "write_async", "if", "async", "else", "write", "return", "func", "(", "name", "=", "self", ".", "name", ",", "values", "=", "values", ",", "tags", "=", "self", ".", "tags", ",", "timestamp", "=", "timestamp", ",", "database", "=", "database", ")" ]
write metric point
[ "write", "metric", "point" ]
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/metrics/models.py#L41-L48
ninuxorg/nodeshot
nodeshot/networking/hardware/models/antenna.py
Antenna.save
def save(self, *args, **kwargs): """ 1. set polarization according to AntennaModel (self.model.polarization) when creating a new antenna 2. inherit latitude and longitude from node """ if not self.pk and self.model.polarization: self.polarization = self.model.polarization super(Antenna, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """ 1. set polarization according to AntennaModel (self.model.polarization) when creating a new antenna 2. inherit latitude and longitude from node """ if not self.pk and self.model.polarization: self.polarization = self.model.polarization super(Antenna, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "pk", "and", "self", ".", "model", ".", "polarization", ":", "self", ".", "polarization", "=", "self", ".", "model", ".", "polarization", "super", "(", "Antenna", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
1. set polarization according to AntennaModel (self.model.polarization) when creating a new antenna 2. inherit latitude and longitude from node
[ "1", ".", "set", "polarization", "according", "to", "AntennaModel", "(", "self", ".", "model", ".", "polarization", ")", "when", "creating", "a", "new", "antenna", "2", ".", "inherit", "latitude", "and", "longitude", "from", "node" ]
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/hardware/models/antenna.py#L32-L39