repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
4
175
func_name
stringlengths
1
129
whole_func_string
stringlengths
91
50.9k
language
stringclasses
1 value
func_code_string
stringlengths
91
50.9k
func_code_tokens
sequence
func_documentation_string
stringlengths
1
31.6k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
89
268
score
float64
0
0.09
rgs1/zk_shell
zk_shell/xclient.py
XClient.sessions_info
def sessions_info(self, hosts): """Returns ClientInfo per session. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (session_id, ClientInfo). """ info_by_id = {} for server_endpoint, dump in self.dump_by_server(hosts).items(): server_ip, server_port = server_endpoint for line in dump.split("\n"): mat = self.IP_PORT_REGEX.match(line) if mat is None: continue ip, port, sid = mat.groups() info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port) return info_by_id
python
def sessions_info(self, hosts): """Returns ClientInfo per session. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (session_id, ClientInfo). """ info_by_id = {} for server_endpoint, dump in self.dump_by_server(hosts).items(): server_ip, server_port = server_endpoint for line in dump.split("\n"): mat = self.IP_PORT_REGEX.match(line) if mat is None: continue ip, port, sid = mat.groups() info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port) return info_by_id
[ "def", "sessions_info", "(", "self", ",", "hosts", ")", ":", "info_by_id", "=", "{", "}", "for", "server_endpoint", ",", "dump", "in", "self", ".", "dump_by_server", "(", "hosts", ")", ".", "items", "(", ")", ":", "server_ip", ",", "server_port", "=", "server_endpoint", "for", "line", "in", "dump", ".", "split", "(", "\"\\n\"", ")", ":", "mat", "=", "self", ".", "IP_PORT_REGEX", ".", "match", "(", "line", ")", "if", "mat", "is", "None", ":", "continue", "ip", ",", "port", ",", "sid", "=", "mat", ".", "groups", "(", ")", "info_by_id", "[", "sid", "]", "=", "ClientInfo", "(", "sid", ",", "ip", ",", "port", ",", "server_ip", ",", "server_port", ")", "return", "info_by_id" ]
Returns ClientInfo per session. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (session_id, ClientInfo).
[ "Returns", "ClientInfo", "per", "session", "." ]
train
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/xclient.py#L516-L534
0.004373
xflr6/concepts
concepts/definitions.py
Definition.rename_object
def rename_object(self, old, new): """Replace the name of an object by a new one.""" self._objects.replace(old, new) pairs = self._pairs pairs |= {(new, p) for p in self._properties if (old, p) in pairs and not pairs.remove((old, p))}
python
def rename_object(self, old, new): """Replace the name of an object by a new one.""" self._objects.replace(old, new) pairs = self._pairs pairs |= {(new, p) for p in self._properties if (old, p) in pairs and not pairs.remove((old, p))}
[ "def", "rename_object", "(", "self", ",", "old", ",", "new", ")", ":", "self", ".", "_objects", ".", "replace", "(", "old", ",", "new", ")", "pairs", "=", "self", ".", "_pairs", "pairs", "|=", "{", "(", "new", ",", "p", ")", "for", "p", "in", "self", ".", "_properties", "if", "(", "old", ",", "p", ")", "in", "pairs", "and", "not", "pairs", ".", "remove", "(", "(", "old", ",", "p", ")", ")", "}" ]
Replace the name of an object by a new one.
[ "Replace", "the", "name", "of", "an", "object", "by", "a", "new", "one", "." ]
train
https://github.com/xflr6/concepts/blob/2801b27b05fa02cccee7d549451810ffcbf5c942/concepts/definitions.py#L317-L322
0.010791
pettarin/ipapy
ipapy/data/__init__.py
convert_unicode_field
def convert_unicode_field(string): """ Convert a Unicode field into the corresponding list of Unicode strings. The (input) Unicode field is a Unicode string containing one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``), separated by a space. :param str string: the (input) Unicode field :rtype: list of Unicode strings """ values = [] for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]: values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)])) return values
python
def convert_unicode_field(string): """ Convert a Unicode field into the corresponding list of Unicode strings. The (input) Unicode field is a Unicode string containing one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``), separated by a space. :param str string: the (input) Unicode field :rtype: list of Unicode strings """ values = [] for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]: values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)])) return values
[ "def", "convert_unicode_field", "(", "string", ")", ":", "values", "=", "[", "]", "for", "codepoint", "in", "[", "s", "for", "s", "in", "string", ".", "split", "(", "DATA_FILE_CODEPOINT_SEPARATOR", ")", "if", "(", "s", "!=", "DATA_FILE_VALUE_NOT_AVAILABLE", ")", "and", "(", "len", "(", "s", ")", ">", "0", ")", "]", ":", "values", ".", "append", "(", "u\"\"", ".", "join", "(", "[", "hex_to_unichr", "(", "c", ")", "for", "c", "in", "codepoint", ".", "split", "(", "DATA_FILE_CODEPOINT_JOINER", ")", "]", ")", ")", "return", "values" ]
Convert a Unicode field into the corresponding list of Unicode strings. The (input) Unicode field is a Unicode string containing one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``), separated by a space. :param str string: the (input) Unicode field :rtype: list of Unicode strings
[ "Convert", "a", "Unicode", "field", "into", "the", "corresponding", "list", "of", "Unicode", "strings", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/data/__init__.py#L55-L69
0.004594
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
_token_set
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise)
python
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise)
[ "def", "_token_set", "(", "s1", ",", "s2", ",", "partial", "=", "True", ",", "force_ascii", "=", "True", ",", "full_process", "=", "True", ")", ":", "if", "not", "full_process", "and", "s1", "==", "s2", ":", "return", "100", "p1", "=", "utils", ".", "full_process", "(", "s1", ",", "force_ascii", "=", "force_ascii", ")", "if", "full_process", "else", "s1", "p2", "=", "utils", ".", "full_process", "(", "s2", ",", "force_ascii", "=", "force_ascii", ")", "if", "full_process", "else", "s2", "if", "not", "utils", ".", "validate_string", "(", "p1", ")", ":", "return", "0", "if", "not", "utils", ".", "validate_string", "(", "p2", ")", ":", "return", "0", "# pull tokens", "tokens1", "=", "set", "(", "p1", ".", "split", "(", ")", ")", "tokens2", "=", "set", "(", "p2", ".", "split", "(", ")", ")", "intersection", "=", "tokens1", ".", "intersection", "(", "tokens2", ")", "diff1to2", "=", "tokens1", ".", "difference", "(", "tokens2", ")", "diff2to1", "=", "tokens2", ".", "difference", "(", "tokens1", ")", "sorted_sect", "=", "\" \"", ".", "join", "(", "sorted", "(", "intersection", ")", ")", "sorted_1to2", "=", "\" \"", ".", "join", "(", "sorted", "(", "diff1to2", ")", ")", "sorted_2to1", "=", "\" \"", ".", "join", "(", "sorted", "(", "diff2to1", ")", ")", "combined_1to2", "=", "sorted_sect", "+", "\" \"", "+", "sorted_1to2", "combined_2to1", "=", "sorted_sect", "+", "\" \"", "+", "sorted_2to1", "# strip", "sorted_sect", "=", "sorted_sect", ".", "strip", "(", ")", "combined_1to2", "=", "combined_1to2", ".", "strip", "(", ")", "combined_2to1", "=", "combined_2to1", ".", "strip", "(", ")", "if", "partial", ":", "ratio_func", "=", "partial_ratio", "else", ":", "ratio_func", "=", "ratio", "pairwise", "=", "[", "ratio_func", "(", "sorted_sect", ",", "combined_1to2", ")", ",", "ratio_func", "(", "sorted_sect", ",", "combined_2to1", ")", ",", "ratio_func", "(", "combined_1to2", ",", "combined_2to1", ")", "]", "return", "max", "(", "pairwise", ")" ]
Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches
[ "Find", "all", "alphanumeric", "tokens", "in", "each", "string", "...", "-", "treat", "them", "as", "a", "set", "-", "construct", "two", "strings", "of", "the", "form", ":", "<sorted_intersection", ">", "<sorted_remainder", ">", "-", "take", "ratios", "of", "those", "two", "strings", "-", "controls", "for", "unordered", "partial", "matches" ]
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L116-L165
0.001923
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_subscriptions
def get_subscriptions(self, limit=100, offset=0, params={}): """ Get all subscriptions """ url = self.SUBSCRIPTIONS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_subscriptions(self, limit=100, offset=0, params={}): """ Get all subscriptions """ url = self.SUBSCRIPTIONS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_subscriptions", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "params", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SUBSCRIPTIONS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all subscriptions
[ "Get", "all", "subscriptions" ]
train
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L695-L710
0.004132
ten10solutions/Geist
geist/match_position_finder_helpers.py
find_potential_match_regions
def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666): """To prevent prohibitively slow calculation of normalisation coefficient at each point in image find potential match points, and normalise these only these. This function uses the definitions of the matching functions to calculate the expected match value and finds positions in the transformed array matching these- normalisation will then eliminate false positives """ if method == 'correlation': match_value = np.sum(template**2) # this will be the value of the match in the elif method == 'squared difference': match_value = 0 elif method == 'correlation coefficient': temp_minus_mean = template - np.mean(template) match_value = np.sum(temp_minus_mean**2) else: raise ValueError('Matching method not implemented') condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) & (np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance))) return np.transpose(condition.nonzero())
python
def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666): """To prevent prohibitively slow calculation of normalisation coefficient at each point in image find potential match points, and normalise these only these. This function uses the definitions of the matching functions to calculate the expected match value and finds positions in the transformed array matching these- normalisation will then eliminate false positives """ if method == 'correlation': match_value = np.sum(template**2) # this will be the value of the match in the elif method == 'squared difference': match_value = 0 elif method == 'correlation coefficient': temp_minus_mean = template - np.mean(template) match_value = np.sum(temp_minus_mean**2) else: raise ValueError('Matching method not implemented') condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) & (np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance))) return np.transpose(condition.nonzero())
[ "def", "find_potential_match_regions", "(", "template", ",", "transformed_array", ",", "method", "=", "'correlation'", ",", "raw_tolerance", "=", "0.666", ")", ":", "if", "method", "==", "'correlation'", ":", "match_value", "=", "np", ".", "sum", "(", "template", "**", "2", ")", "# this will be the value of the match in the", "elif", "method", "==", "'squared difference'", ":", "match_value", "=", "0", "elif", "method", "==", "'correlation coefficient'", ":", "temp_minus_mean", "=", "template", "-", "np", ".", "mean", "(", "template", ")", "match_value", "=", "np", ".", "sum", "(", "temp_minus_mean", "**", "2", ")", "else", ":", "raise", "ValueError", "(", "'Matching method not implemented'", ")", "condition", "=", "(", "(", "np", ".", "round", "(", "transformed_array", ",", "decimals", "=", "3", ")", ">=", "match_value", "*", "raw_tolerance", ")", "&", "(", "np", ".", "round", "(", "transformed_array", ",", "decimals", "=", "3", ")", "<=", "match_value", "*", "(", "1.", "/", "raw_tolerance", ")", ")", ")", "return", "np", ".", "transpose", "(", "condition", ".", "nonzero", "(", ")", ")" ]
To prevent prohibitively slow calculation of normalisation coefficient at each point in image find potential match points, and normalise these only these. This function uses the definitions of the matching functions to calculate the expected match value and finds positions in the transformed array matching these- normalisation will then eliminate false positives
[ "To", "prevent", "prohibitively", "slow", "calculation", "of", "normalisation", "coefficient", "at", "each", "point", "in", "image", "find", "potential", "match", "points", "and", "normalise", "these", "only", "these", ".", "This", "function", "uses", "the", "definitions", "of", "the", "matching", "functions", "to", "calculate", "the", "expected", "match", "value", "and", "finds", "positions", "in", "the", "transformed", "array", "matching", "these", "-", "normalisation", "will", "then", "eliminate", "false", "positives" ]
train
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/match_position_finder_helpers.py#L4-L21
0.010563
peterbrittain/asciimatics
asciimatics/effects.py
_Trail._maybe_reseed
def _maybe_reseed(self, normal): """ Randomly create a new column once this one is finished. """ self._y += self._rate self._life -= 1 if self._life <= 0: self._clear = not self._clear if normal else True self._rate = randint(1, 2) if self._clear: self._y = 0 self._life = self._screen.height // self._rate else: self._y = randint(0, self._screen.height // 2) - \ self._screen.height // 4 self._life = \ randint(1, self._screen.height - self._y) // self._rate
python
def _maybe_reseed(self, normal): """ Randomly create a new column once this one is finished. """ self._y += self._rate self._life -= 1 if self._life <= 0: self._clear = not self._clear if normal else True self._rate = randint(1, 2) if self._clear: self._y = 0 self._life = self._screen.height // self._rate else: self._y = randint(0, self._screen.height // 2) - \ self._screen.height // 4 self._life = \ randint(1, self._screen.height - self._y) // self._rate
[ "def", "_maybe_reseed", "(", "self", ",", "normal", ")", ":", "self", ".", "_y", "+=", "self", ".", "_rate", "self", ".", "_life", "-=", "1", "if", "self", ".", "_life", "<=", "0", ":", "self", ".", "_clear", "=", "not", "self", ".", "_clear", "if", "normal", "else", "True", "self", ".", "_rate", "=", "randint", "(", "1", ",", "2", ")", "if", "self", ".", "_clear", ":", "self", ".", "_y", "=", "0", "self", ".", "_life", "=", "self", ".", "_screen", ".", "height", "//", "self", ".", "_rate", "else", ":", "self", ".", "_y", "=", "randint", "(", "0", ",", "self", ".", "_screen", ".", "height", "//", "2", ")", "-", "self", ".", "_screen", ".", "height", "//", "4", "self", ".", "_life", "=", "randint", "(", "1", ",", "self", ".", "_screen", ".", "height", "-", "self", ".", "_y", ")", "//", "self", ".", "_rate" ]
Randomly create a new column once this one is finished.
[ "Randomly", "create", "a", "new", "column", "once", "this", "one", "is", "finished", "." ]
train
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/effects.py#L510-L526
0.003035
jonhadfield/creds
lib/creds/users.py
Users.describe_users
def describe_users(self, users_filter=None): """Return a list of users matching a filter (if provided).""" user_list = Users(oktypes=User) for user in self._user_list: if users_filter and (users_filter.get('name') == user.name or users_filter.get('uid') == user.uid): user_list.append(user) return user_list
python
def describe_users(self, users_filter=None): """Return a list of users matching a filter (if provided).""" user_list = Users(oktypes=User) for user in self._user_list: if users_filter and (users_filter.get('name') == user.name or users_filter.get('uid') == user.uid): user_list.append(user) return user_list
[ "def", "describe_users", "(", "self", ",", "users_filter", "=", "None", ")", ":", "user_list", "=", "Users", "(", "oktypes", "=", "User", ")", "for", "user", "in", "self", ".", "_user_list", ":", "if", "users_filter", "and", "(", "users_filter", ".", "get", "(", "'name'", ")", "==", "user", ".", "name", "or", "users_filter", ".", "get", "(", "'uid'", ")", "==", "user", ".", "uid", ")", ":", "user_list", ".", "append", "(", "user", ")", "return", "user_list" ]
Return a list of users matching a filter (if provided).
[ "Return", "a", "list", "of", "users", "matching", "a", "filter", "(", "if", "provided", ")", "." ]
train
https://github.com/jonhadfield/creds/blob/b2053b43516cf742c6e4c2b79713bc625592f47c/lib/creds/users.py#L138-L144
0.008174
dmwm/DBS
Server/Python/src/dbs/web/DBSWriterModel.py
insertBulkBlock
def insertBulkBlock(self): """ API to insert a bulk block :param blockDump: Output of the block dump command :type blockDump: dict """ try: body = request.body.read() indata = cjson.decode(body) if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])): dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time", self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.") indata = validateJSONInputNoCopy("blockBulk", indata) self.dbsBlockInsert.putBlock(indata) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: #illegal variable name/number if str(ex).find("ORA-01036") != -1: dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex)) else: sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
python
def insertBulkBlock(self): """ API to insert a bulk block :param blockDump: Output of the block dump command :type blockDump: dict """ try: body = request.body.read() indata = cjson.decode(body) if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])): dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time", self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.") indata = validateJSONInputNoCopy("blockBulk", indata) self.dbsBlockInsert.putBlock(indata) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: #illegal variable name/number if str(ex).find("ORA-01036") != -1: dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex)) else: sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "insertBulkBlock", "(", "self", ")", ":", "try", ":", "body", "=", "request", ".", "body", ".", "read", "(", ")", "indata", "=", "cjson", ".", "decode", "(", "body", ")", "if", "(", "indata", ".", "get", "(", "\"file_parent_list\"", ",", "[", "]", ")", "and", "indata", ".", "get", "(", "\"dataset_parent_list\"", ",", "[", "]", ")", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"insertBulkBlock: dataset and file parentages cannot be in the input at the same time\"", ",", "self", ".", "logger", ".", "exception", ",", "\"insertBulkBlock: datset and file parentages cannot be in the input at the same time.\"", ")", "indata", "=", "validateJSONInputNoCopy", "(", "\"blockBulk\"", ",", "indata", ")", "self", ".", "dbsBlockInsert", ".", "putBlock", "(", "indata", ")", "except", "cjson", ".", "DecodeError", "as", "dc", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Wrong format/data from insert BulkBlock input\"", ",", "self", ".", "logger", ".", "exception", ",", "str", "(", "dc", ")", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "message", ")", "except", "HTTPError", "as", "he", ":", "raise", "he", "except", "Exception", "as", "ex", ":", "#illegal variable name/number", "if", "str", "(", "ex", ")", ".", "find", "(", "\"ORA-01036\"", ")", "!=", "-", "1", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"illegal variable name/number from input\"", ",", "self", ".", "logger", ".", "exception", ",", "str", "(", "ex", ")", ")", "else", ":", "sError", "=", "\"DBSWriterModel/insertBulkBlock. %s\\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")" ]
API to insert a bulk block :param blockDump: Output of the block dump command :type blockDump: dict
[ "API", "to", "insert", "a", "bulk", "block" ]
train
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSWriterModel.py#L254-L283
0.008818
Adarnof/adarnauth-esi
esi/managers.py
TokenManager.create_from_code
def create_from_code(self, code, user=None): """ Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token` """ # perform code exchange logger.debug("Creating new token from code {0}".format(code[:-5])) oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL) token = oauth.fetch_token(app_settings.ESI_TOKEN_URL, client_secret=app_settings.ESI_SSO_CLIENT_SECRET, code=code) r = oauth.request('get', app_settings.ESI_TOKEN_VERIFY_URL) r.raise_for_status() token_data = r.json() logger.debug(token_data) # translate returned data to a model model = self.create( character_id=token_data['CharacterID'], character_name=token_data['CharacterName'], character_owner_hash=token_data['CharacterOwnerHash'], access_token=token['access_token'], refresh_token=token['refresh_token'], token_type=token_data['TokenType'], user=user, ) # parse scopes if 'Scopes' in token_data: from esi.models import Scope for s in token_data['Scopes'].split(): try: scope = Scope.objects.get(name=s) model.scopes.add(scope) except Scope.DoesNotExist: # This scope isn't included in a data migration. Create a placeholder until it updates. try: help_text = s.split('.')[1].replace('_', ' ').capitalize() except IndexError: # Unusual scope name, missing periods. help_text = s.replace('_', ' ').capitalize() scope = Scope.objects.create(name=s, help_text=help_text) model.scopes.add(scope) logger.debug("Added {0} scopes to new token.".format(model.scopes.all().count())) if not app_settings.ESI_ALWAYS_CREATE_TOKEN: # see if we already have a token for this character and scope combination # if so, we don't need a new one queryset = self.get_queryset().equivalent_to(model) if queryset.exists(): logger.debug( "Identified {0} tokens equivalent to new token. Updating access and refresh tokens.".format( queryset.count())) queryset.update( access_token=model.access_token, refresh_token=model.refresh_token, created=model.created, ) if queryset.filter(user=model.user).exists(): logger.debug("Equivalent token with same user exists. Deleting new token.") model.delete() model = queryset.filter(user=model.user)[0] # pick one at random logger.debug("Successfully created {0} for user {1}".format(repr(model), user)) return model
python
def create_from_code(self, code, user=None): """ Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token` """ # perform code exchange logger.debug("Creating new token from code {0}".format(code[:-5])) oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL) token = oauth.fetch_token(app_settings.ESI_TOKEN_URL, client_secret=app_settings.ESI_SSO_CLIENT_SECRET, code=code) r = oauth.request('get', app_settings.ESI_TOKEN_VERIFY_URL) r.raise_for_status() token_data = r.json() logger.debug(token_data) # translate returned data to a model model = self.create( character_id=token_data['CharacterID'], character_name=token_data['CharacterName'], character_owner_hash=token_data['CharacterOwnerHash'], access_token=token['access_token'], refresh_token=token['refresh_token'], token_type=token_data['TokenType'], user=user, ) # parse scopes if 'Scopes' in token_data: from esi.models import Scope for s in token_data['Scopes'].split(): try: scope = Scope.objects.get(name=s) model.scopes.add(scope) except Scope.DoesNotExist: # This scope isn't included in a data migration. Create a placeholder until it updates. try: help_text = s.split('.')[1].replace('_', ' ').capitalize() except IndexError: # Unusual scope name, missing periods. help_text = s.replace('_', ' ').capitalize() scope = Scope.objects.create(name=s, help_text=help_text) model.scopes.add(scope) logger.debug("Added {0} scopes to new token.".format(model.scopes.all().count())) if not app_settings.ESI_ALWAYS_CREATE_TOKEN: # see if we already have a token for this character and scope combination # if so, we don't need a new one queryset = self.get_queryset().equivalent_to(model) if queryset.exists(): logger.debug( "Identified {0} tokens equivalent to new token. Updating access and refresh tokens.".format( queryset.count())) queryset.update( access_token=model.access_token, refresh_token=model.refresh_token, created=model.created, ) if queryset.filter(user=model.user).exists(): logger.debug("Equivalent token with same user exists. Deleting new token.") model.delete() model = queryset.filter(user=model.user)[0] # pick one at random logger.debug("Successfully created {0} for user {1}".format(repr(model), user)) return model
[ "def", "create_from_code", "(", "self", ",", "code", ",", "user", "=", "None", ")", ":", "# perform code exchange", "logger", ".", "debug", "(", "\"Creating new token from code {0}\"", ".", "format", "(", "code", "[", ":", "-", "5", "]", ")", ")", "oauth", "=", "OAuth2Session", "(", "app_settings", ".", "ESI_SSO_CLIENT_ID", ",", "redirect_uri", "=", "app_settings", ".", "ESI_SSO_CALLBACK_URL", ")", "token", "=", "oauth", ".", "fetch_token", "(", "app_settings", ".", "ESI_TOKEN_URL", ",", "client_secret", "=", "app_settings", ".", "ESI_SSO_CLIENT_SECRET", ",", "code", "=", "code", ")", "r", "=", "oauth", ".", "request", "(", "'get'", ",", "app_settings", ".", "ESI_TOKEN_VERIFY_URL", ")", "r", ".", "raise_for_status", "(", ")", "token_data", "=", "r", ".", "json", "(", ")", "logger", ".", "debug", "(", "token_data", ")", "# translate returned data to a model", "model", "=", "self", ".", "create", "(", "character_id", "=", "token_data", "[", "'CharacterID'", "]", ",", "character_name", "=", "token_data", "[", "'CharacterName'", "]", ",", "character_owner_hash", "=", "token_data", "[", "'CharacterOwnerHash'", "]", ",", "access_token", "=", "token", "[", "'access_token'", "]", ",", "refresh_token", "=", "token", "[", "'refresh_token'", "]", ",", "token_type", "=", "token_data", "[", "'TokenType'", "]", ",", "user", "=", "user", ",", ")", "# parse scopes", "if", "'Scopes'", "in", "token_data", ":", "from", "esi", ".", "models", "import", "Scope", "for", "s", "in", "token_data", "[", "'Scopes'", "]", ".", "split", "(", ")", ":", "try", ":", "scope", "=", "Scope", ".", "objects", ".", "get", "(", "name", "=", "s", ")", "model", ".", "scopes", ".", "add", "(", "scope", ")", "except", "Scope", ".", "DoesNotExist", ":", "# This scope isn't included in a data migration. Create a placeholder until it updates.", "try", ":", "help_text", "=", "s", ".", "split", "(", "'.'", ")", "[", "1", "]", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "capitalize", "(", ")", "except", "IndexError", ":", "# Unusual scope name, missing periods.", "help_text", "=", "s", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "capitalize", "(", ")", "scope", "=", "Scope", ".", "objects", ".", "create", "(", "name", "=", "s", ",", "help_text", "=", "help_text", ")", "model", ".", "scopes", ".", "add", "(", "scope", ")", "logger", ".", "debug", "(", "\"Added {0} scopes to new token.\"", ".", "format", "(", "model", ".", "scopes", ".", "all", "(", ")", ".", "count", "(", ")", ")", ")", "if", "not", "app_settings", ".", "ESI_ALWAYS_CREATE_TOKEN", ":", "# see if we already have a token for this character and scope combination", "# if so, we don't need a new one", "queryset", "=", "self", ".", "get_queryset", "(", ")", ".", "equivalent_to", "(", "model", ")", "if", "queryset", ".", "exists", "(", ")", ":", "logger", ".", "debug", "(", "\"Identified {0} tokens equivalent to new token. Updating access and refresh tokens.\"", ".", "format", "(", "queryset", ".", "count", "(", ")", ")", ")", "queryset", ".", "update", "(", "access_token", "=", "model", ".", "access_token", ",", "refresh_token", "=", "model", ".", "refresh_token", ",", "created", "=", "model", ".", "created", ",", ")", "if", "queryset", ".", "filter", "(", "user", "=", "model", ".", "user", ")", ".", "exists", "(", ")", ":", "logger", ".", "debug", "(", "\"Equivalent token with same user exists. Deleting new token.\"", ")", "model", ".", "delete", "(", ")", "model", "=", "queryset", ".", "filter", "(", "user", "=", "model", ".", "user", ")", "[", "0", "]", "# pick one at random", "logger", ".", "debug", "(", "\"Successfully created {0} for user {1}\"", ".", "format", "(", "repr", "(", "model", ")", ",", "user", ")", ")", "return", "model" ]
Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token`
[ "Perform", "OAuth", "code", "exchange", "to", "retrieve", "a", "token", ".", ":", "param", "code", ":", "OAuth", "grant", "code", ".", ":", "param", "user", ":", "User", "who", "will", "own", "token", ".", ":", "return", ":", ":", "class", ":", "esi", ".", "models", ".", "Token" ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L123-L189
0.003758
kylejusticemagnuson/pyti
pyti/vertical_horizontal_filter.py
vertical_horizontal_filter
def vertical_horizontal_filter(data, period): """ Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1)) """ catch_errors.check_for_period_error(data, period) vhf = [abs(np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1])) / sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))] vhf = fill_for_noncomputable_vals(data, vhf) return vhf
python
def vertical_horizontal_filter(data, period): """ Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1)) """ catch_errors.check_for_period_error(data, period) vhf = [abs(np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1])) / sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))] vhf = fill_for_noncomputable_vals(data, vhf) return vhf
[ "def", "vertical_horizontal_filter", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "vhf", "=", "[", "abs", "(", "np", ".", "max", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", "-", "np", ".", "min", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "/", "sum", "(", "[", "abs", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "[", "i", "]", "-", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", "[", "i", "-", "1", "]", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "data", "[", "idx", "+", "1", "-", "period", ":", "idx", "+", "1", "]", ")", ")", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "vhf", "=", "fill_for_noncomputable_vals", "(", "data", ",", "vhf", ")", "return", "vhf" ]
Vertical Horizontal Filter. Formula: ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1))
[ "Vertical", "Horizontal", "Filter", "." ]
train
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/vertical_horizontal_filter.py#L8-L22
0.007421
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/reftrackitemdata.py
reftrack_version_data
def reftrack_version_data(rt, role): """Return the data for the version that is loaded by the reftrack :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the version :rtype: depending on role :raises: None """ tfi = rt.get_taskfileinfo() if not tfi: return return filesysitemdata.taskfileinfo_version_data(tfi, role)
python
def reftrack_version_data(rt, role): """Return the data for the version that is loaded by the reftrack :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the version :rtype: depending on role :raises: None """ tfi = rt.get_taskfileinfo() if not tfi: return return filesysitemdata.taskfileinfo_version_data(tfi, role)
[ "def", "reftrack_version_data", "(", "rt", ",", "role", ")", ":", "tfi", "=", "rt", ".", "get_taskfileinfo", "(", ")", "if", "not", "tfi", ":", "return", "return", "filesysitemdata", ".", "taskfileinfo_version_data", "(", "tfi", ",", "role", ")" ]
Return the data for the version that is loaded by the reftrack :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the version :rtype: depending on role :raises: None
[ "Return", "the", "data", "for", "the", "version", "that", "is", "loaded", "by", "the", "reftrack" ]
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/reftrackitemdata.py#L132-L146
0.001916
podio/podio-py
pypodio2/areas.py
View.update_last_view
def update_last_view(self, app_id, attributes): """ Updates the last view for the active user :param app_id: the app id :param attributes: the body of the request in dictionary format """ if not isinstance(attributes, dict): raise TypeError('Must be of type dict') attribute_data = json.dumps(attributes) return self.transport.PUT(url='/view/app/{}/last'.format(app_id), body=attribute_data, type='application/json')
python
def update_last_view(self, app_id, attributes): """ Updates the last view for the active user :param app_id: the app id :param attributes: the body of the request in dictionary format """ if not isinstance(attributes, dict): raise TypeError('Must be of type dict') attribute_data = json.dumps(attributes) return self.transport.PUT(url='/view/app/{}/last'.format(app_id), body=attribute_data, type='application/json')
[ "def", "update_last_view", "(", "self", ",", "app_id", ",", "attributes", ")", ":", "if", "not", "isinstance", "(", "attributes", ",", "dict", ")", ":", "raise", "TypeError", "(", "'Must be of type dict'", ")", "attribute_data", "=", "json", ".", "dumps", "(", "attributes", ")", "return", "self", ".", "transport", ".", "PUT", "(", "url", "=", "'/view/app/{}/last'", ".", "format", "(", "app_id", ")", ",", "body", "=", "attribute_data", ",", "type", "=", "'application/json'", ")" ]
Updates the last view for the active user :param app_id: the app id :param attributes: the body of the request in dictionary format
[ "Updates", "the", "last", "view", "for", "the", "active", "user" ]
train
https://github.com/podio/podio-py/blob/5ce956034a06c98b0ef18fcd940b36da0908ad6c/pypodio2/areas.py#L589-L600
0.003795
PyThaiNLP/pythainlp
pythainlp/soundex/udom83.py
udom83
def udom83(text: str) -> str: """ Udom83 - It's a Thai soundex rule. :param str text: Thai word :return: Udom83 soundex """ if not text or not isinstance(text, str): return "" text = _RE_1.sub("ัน\\1", text) text = _RE_2.sub("ั\\1", text) text = _RE_3.sub("ัน\\1", text) text = _RE_4.sub("ัน", text) text = _RE_5.sub("\\1", text) text = _RE_6.sub("\\1ย", text) text = _RE_7.sub("ม\\1", text) text = _RE_8.sub("ม", text) text = _RE_9.sub("ม", text) text = _RE_10.sub("", text) text = _RE_11.sub("", text) if not text: return "" sd = text[0].translate(_TRANS1) sd += text[1:].translate(_TRANS2) return (sd + "000000")[:7]
python
def udom83(text: str) -> str: """ Udom83 - It's a Thai soundex rule. :param str text: Thai word :return: Udom83 soundex """ if not text or not isinstance(text, str): return "" text = _RE_1.sub("ัน\\1", text) text = _RE_2.sub("ั\\1", text) text = _RE_3.sub("ัน\\1", text) text = _RE_4.sub("ัน", text) text = _RE_5.sub("\\1", text) text = _RE_6.sub("\\1ย", text) text = _RE_7.sub("ม\\1", text) text = _RE_8.sub("ม", text) text = _RE_9.sub("ม", text) text = _RE_10.sub("", text) text = _RE_11.sub("", text) if not text: return "" sd = text[0].translate(_TRANS1) sd += text[1:].translate(_TRANS2) return (sd + "000000")[:7]
[ "def", "udom83", "(", "text", ":", "str", ")", "->", "str", ":", "if", "not", "text", "or", "not", "isinstance", "(", "text", ",", "str", ")", ":", "return", "\"\"", "text", "=", "_RE_1", ".", "sub", "(", "\"ัน\\\\1\", te", "x", ")", "", "text", "=", "_RE_2", ".", "sub", "(", "\"ั\\\\1\", ", "t", "xt)", "", "text", "=", "_RE_3", ".", "sub", "(", "\"ัน\\\\1\", te", "x", ")", "", "text", "=", "_RE_4", ".", "sub", "(", "\"ัน\", te", "x", ")", "", "text", "=", "_RE_5", ".", "sub", "(", "\"\\\\1\"", ",", "text", ")", "text", "=", "_RE_6", ".", "sub", "(", "\"\\\\1ย\", ", "t", "xt)", "", "text", "=", "_RE_7", ".", "sub", "(", "\"ม\\\\1\", ", "t", "xt)", "", "text", "=", "_RE_8", ".", "sub", "(", "\"ม\", ", "t", "xt)", "", "text", "=", "_RE_9", ".", "sub", "(", "\"ม\", ", "t", "xt)", "", "text", "=", "_RE_10", ".", "sub", "(", "\"\"", ",", "text", ")", "text", "=", "_RE_11", ".", "sub", "(", "\"\"", ",", "text", ")", "if", "not", "text", ":", "return", "\"\"", "sd", "=", "text", "[", "0", "]", ".", "translate", "(", "_TRANS1", ")", "sd", "+=", "text", "[", "1", ":", "]", ".", "translate", "(", "_TRANS2", ")", "return", "(", "sd", "+", "\"000000\"", ")", "[", ":", "7", "]" ]
Udom83 - It's a Thai soundex rule. :param str text: Thai word :return: Udom83 soundex
[ "Udom83", "-", "It", "s", "a", "Thai", "soundex", "rule", "." ]
train
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/soundex/udom83.py#L32-L61
0.001379
SINGROUP/SOAPLite
soaplite/core.py
get_soap_locals
def get_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0): """Get the RBF basis SOAP output for the given positions in a finite system. Args: obj(ase.Atoms): Atomic structure for which the SOAP output is calculated. Hpos: Positions at which to calculate SOAP alp: Alphas bet: Betas rCut: Radial cutoff. nMax: Maximum number of radial basis functions Lmax: Maximum spherical harmonics degree crossOver: all_atomtypes: Can be used to specify the atomic elements for which to calculate the output. If given the output is calculated only for the given species and is ordered by atomic number. eta: The gaussian smearing width. Returns: np.ndarray: SOAP output for the given positions. """ rCutHard = rCut + 5 assert Lmax <= 9, "l cannot exceed 9. Lmax={}".format(Lmax) assert Lmax >= 0, "l cannot be negative.Lmax={}".format(Lmax) assert rCutHard < 17.0001, "hard radius cuttof cannot be larger than 17 Angs. rCut={}".format(rCutHard) assert rCutHard > 1.999, "hard redius cuttof cannot be lower than 1 Ang. rCut={}".format(rCutHard) assert nMax >= 2, "number of basis functions cannot be lower than 2. nMax={}".format(nMax) assert nMax <= 13, "number of basis functions cannot exceed 12. nMax={}".format(nMax) assert eta >= 0.0001, "Eta cannot be zero or negative. nMax={}".format(eta) # get clusgeo internal format for c-code Apos, typeNs, py_Ntypes, atomtype_lst, totalAN = _format_ase2clusgeo(obj, all_atomtypes) Hpos = np.array(Hpos) py_Hsize = Hpos.shape[0] # flatten arrays Hpos = Hpos.flatten() alp = alp.flatten() bet = bet.flatten() # convert int to c_int lMax = c_int(Lmax) Hsize = c_int(py_Hsize) Ntypes = c_int(py_Ntypes) totalAN = c_int(totalAN) rCutHard = c_double(rCutHard) Nsize = c_int(nMax) c_eta = c_double(eta) #convert int array to c_int array typeNs = (c_int * len(typeNs))(*typeNs) # convert to c_double arrays # alphas alphas = (c_double * len(alp))(*alp.tolist()) # betas betas = (c_double * len(bet))(*bet.tolist()) #Apos axyz = (c_double * len(Apos))(*Apos.tolist()) #Hpos hxyz = (c_double * len(Hpos))(*Hpos.tolist()) ### START SOAP### #path_to_so = os.path.dirname(os.path.abspath(__file__)) _PATH_TO_SOAPLITE_SO = os.path.dirname(os.path.abspath(__file__)) _SOAPLITE_SOFILES = glob.glob( "".join([ _PATH_TO_SOAPLITE_SO, "/../lib/libsoap*.*so"]) ) ## NOT SURE ABOUT THIS if py_Ntypes == 1 or (not crossOver): substring = "lib/libsoapPySig." libsoap = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None)) libsoap.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double] libsoap.soap.restype = POINTER (c_double) c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes*py_Hsize))() libsoap.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta) else: substring = "lib/libsoapGTO." libsoapGTO = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None)) libsoapGTO.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double] libsoapGTO.soap.restype = POINTER (c_double) c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*int((py_Ntypes*(py_Ntypes +1))/2)*py_Hsize))() libsoapGTO.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta) # return c; if crossOver: crosTypes = int((py_Ntypes*(py_Ntypes+1))/2) shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*crosTypes) else: shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes) a = np.ctypeslib.as_array(c) a = a.reshape(shape) return a
python
def get_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0): """Get the RBF basis SOAP output for the given positions in a finite system. Args: obj(ase.Atoms): Atomic structure for which the SOAP output is calculated. Hpos: Positions at which to calculate SOAP alp: Alphas bet: Betas rCut: Radial cutoff. nMax: Maximum number of radial basis functions Lmax: Maximum spherical harmonics degree crossOver: all_atomtypes: Can be used to specify the atomic elements for which to calculate the output. If given the output is calculated only for the given species and is ordered by atomic number. eta: The gaussian smearing width. Returns: np.ndarray: SOAP output for the given positions. """ rCutHard = rCut + 5 assert Lmax <= 9, "l cannot exceed 9. Lmax={}".format(Lmax) assert Lmax >= 0, "l cannot be negative.Lmax={}".format(Lmax) assert rCutHard < 17.0001, "hard radius cuttof cannot be larger than 17 Angs. rCut={}".format(rCutHard) assert rCutHard > 1.999, "hard redius cuttof cannot be lower than 1 Ang. rCut={}".format(rCutHard) assert nMax >= 2, "number of basis functions cannot be lower than 2. nMax={}".format(nMax) assert nMax <= 13, "number of basis functions cannot exceed 12. nMax={}".format(nMax) assert eta >= 0.0001, "Eta cannot be zero or negative. nMax={}".format(eta) # get clusgeo internal format for c-code Apos, typeNs, py_Ntypes, atomtype_lst, totalAN = _format_ase2clusgeo(obj, all_atomtypes) Hpos = np.array(Hpos) py_Hsize = Hpos.shape[0] # flatten arrays Hpos = Hpos.flatten() alp = alp.flatten() bet = bet.flatten() # convert int to c_int lMax = c_int(Lmax) Hsize = c_int(py_Hsize) Ntypes = c_int(py_Ntypes) totalAN = c_int(totalAN) rCutHard = c_double(rCutHard) Nsize = c_int(nMax) c_eta = c_double(eta) #convert int array to c_int array typeNs = (c_int * len(typeNs))(*typeNs) # convert to c_double arrays # alphas alphas = (c_double * len(alp))(*alp.tolist()) # betas betas = (c_double * len(bet))(*bet.tolist()) #Apos axyz = (c_double * len(Apos))(*Apos.tolist()) #Hpos hxyz = (c_double * len(Hpos))(*Hpos.tolist()) ### START SOAP### #path_to_so = os.path.dirname(os.path.abspath(__file__)) _PATH_TO_SOAPLITE_SO = os.path.dirname(os.path.abspath(__file__)) _SOAPLITE_SOFILES = glob.glob( "".join([ _PATH_TO_SOAPLITE_SO, "/../lib/libsoap*.*so"]) ) ## NOT SURE ABOUT THIS if py_Ntypes == 1 or (not crossOver): substring = "lib/libsoapPySig." libsoap = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None)) libsoap.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double] libsoap.soap.restype = POINTER (c_double) c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes*py_Hsize))() libsoap.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta) else: substring = "lib/libsoapGTO." libsoapGTO = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None)) libsoapGTO.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double] libsoapGTO.soap.restype = POINTER (c_double) c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*int((py_Ntypes*(py_Ntypes +1))/2)*py_Hsize))() libsoapGTO.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta) # return c; if crossOver: crosTypes = int((py_Ntypes*(py_Ntypes+1))/2) shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*crosTypes) else: shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes) a = np.ctypeslib.as_array(c) a = a.reshape(shape) return a
[ "def", "get_soap_locals", "(", "obj", ",", "Hpos", ",", "alp", ",", "bet", ",", "rCut", "=", "5.0", ",", "nMax", "=", "5", ",", "Lmax", "=", "5", ",", "crossOver", "=", "True", ",", "all_atomtypes", "=", "None", ",", "eta", "=", "1.0", ")", ":", "rCutHard", "=", "rCut", "+", "5", "assert", "Lmax", "<=", "9", ",", "\"l cannot exceed 9. Lmax={}\"", ".", "format", "(", "Lmax", ")", "assert", "Lmax", ">=", "0", ",", "\"l cannot be negative.Lmax={}\"", ".", "format", "(", "Lmax", ")", "assert", "rCutHard", "<", "17.0001", ",", "\"hard radius cuttof cannot be larger than 17 Angs. rCut={}\"", ".", "format", "(", "rCutHard", ")", "assert", "rCutHard", ">", "1.999", ",", "\"hard redius cuttof cannot be lower than 1 Ang. rCut={}\"", ".", "format", "(", "rCutHard", ")", "assert", "nMax", ">=", "2", ",", "\"number of basis functions cannot be lower than 2. nMax={}\"", ".", "format", "(", "nMax", ")", "assert", "nMax", "<=", "13", ",", "\"number of basis functions cannot exceed 12. nMax={}\"", ".", "format", "(", "nMax", ")", "assert", "eta", ">=", "0.0001", ",", "\"Eta cannot be zero or negative. nMax={}\"", ".", "format", "(", "eta", ")", "# get clusgeo internal format for c-code", "Apos", ",", "typeNs", ",", "py_Ntypes", ",", "atomtype_lst", ",", "totalAN", "=", "_format_ase2clusgeo", "(", "obj", ",", "all_atomtypes", ")", "Hpos", "=", "np", ".", "array", "(", "Hpos", ")", "py_Hsize", "=", "Hpos", ".", "shape", "[", "0", "]", "# flatten arrays", "Hpos", "=", "Hpos", ".", "flatten", "(", ")", "alp", "=", "alp", ".", "flatten", "(", ")", "bet", "=", "bet", ".", "flatten", "(", ")", "# convert int to c_int", "lMax", "=", "c_int", "(", "Lmax", ")", "Hsize", "=", "c_int", "(", "py_Hsize", ")", "Ntypes", "=", "c_int", "(", "py_Ntypes", ")", "totalAN", "=", "c_int", "(", "totalAN", ")", "rCutHard", "=", "c_double", "(", "rCutHard", ")", "Nsize", "=", "c_int", "(", "nMax", ")", "c_eta", "=", "c_double", "(", "eta", ")", "#convert int array to c_int array", "typeNs", "=", "(", "c_int", "*", "len", "(", "typeNs", ")", ")", "(", "*", "typeNs", ")", "# convert to c_double arrays", "# alphas", "alphas", "=", "(", "c_double", "*", "len", "(", "alp", ")", ")", "(", "*", "alp", ".", "tolist", "(", ")", ")", "# betas", "betas", "=", "(", "c_double", "*", "len", "(", "bet", ")", ")", "(", "*", "bet", ".", "tolist", "(", ")", ")", "#Apos", "axyz", "=", "(", "c_double", "*", "len", "(", "Apos", ")", ")", "(", "*", "Apos", ".", "tolist", "(", ")", ")", "#Hpos", "hxyz", "=", "(", "c_double", "*", "len", "(", "Hpos", ")", ")", "(", "*", "Hpos", ".", "tolist", "(", ")", ")", "### START SOAP###", "#path_to_so = os.path.dirname(os.path.abspath(__file__))", "_PATH_TO_SOAPLITE_SO", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "_SOAPLITE_SOFILES", "=", "glob", ".", "glob", "(", "\"\"", ".", "join", "(", "[", "_PATH_TO_SOAPLITE_SO", ",", "\"/../lib/libsoap*.*so\"", "]", ")", ")", "## NOT SURE ABOUT THIS", "if", "py_Ntypes", "==", "1", "or", "(", "not", "crossOver", ")", ":", "substring", "=", "\"lib/libsoapPySig.\"", "libsoap", "=", "CDLL", "(", "next", "(", "(", "s", "for", "s", "in", "_SOAPLITE_SOFILES", "if", "substring", "in", "s", ")", ",", "None", ")", ")", "libsoap", ".", "soap", ".", "argtypes", "=", "[", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_int", ")", ",", "c_double", ",", "c_int", ",", "c_int", ",", "c_int", ",", "c_int", ",", "c_int", ",", "c_double", "]", "libsoap", ".", "soap", ".", "restype", "=", "POINTER", "(", "c_double", ")", "c", "=", "(", "c_double", "*", "(", "int", "(", "(", "nMax", "*", "(", "nMax", "+", "1", ")", ")", "/", "2", ")", "*", "(", "Lmax", "+", "1", ")", "*", "py_Ntypes", "*", "py_Hsize", ")", ")", "(", ")", "libsoap", ".", "soap", "(", "c", ",", "axyz", ",", "hxyz", ",", "alphas", ",", "betas", ",", "typeNs", ",", "rCutHard", ",", "totalAN", ",", "Ntypes", ",", "Nsize", ",", "lMax", ",", "Hsize", ",", "c_eta", ")", "else", ":", "substring", "=", "\"lib/libsoapGTO.\"", "libsoapGTO", "=", "CDLL", "(", "next", "(", "(", "s", "for", "s", "in", "_SOAPLITE_SOFILES", "if", "substring", "in", "s", ")", ",", "None", ")", ")", "libsoapGTO", ".", "soap", ".", "argtypes", "=", "[", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_double", ")", ",", "POINTER", "(", "c_int", ")", ",", "c_double", ",", "c_int", ",", "c_int", ",", "c_int", ",", "c_int", ",", "c_int", ",", "c_double", "]", "libsoapGTO", ".", "soap", ".", "restype", "=", "POINTER", "(", "c_double", ")", "c", "=", "(", "c_double", "*", "(", "int", "(", "(", "nMax", "*", "(", "nMax", "+", "1", ")", ")", "/", "2", ")", "*", "(", "Lmax", "+", "1", ")", "*", "int", "(", "(", "py_Ntypes", "*", "(", "py_Ntypes", "+", "1", ")", ")", "/", "2", ")", "*", "py_Hsize", ")", ")", "(", ")", "libsoapGTO", ".", "soap", "(", "c", ",", "axyz", ",", "hxyz", ",", "alphas", ",", "betas", ",", "typeNs", ",", "rCutHard", ",", "totalAN", ",", "Ntypes", ",", "Nsize", ",", "lMax", ",", "Hsize", ",", "c_eta", ")", "# return c;", "if", "crossOver", ":", "crosTypes", "=", "int", "(", "(", "py_Ntypes", "*", "(", "py_Ntypes", "+", "1", ")", ")", "/", "2", ")", "shape", "=", "(", "py_Hsize", ",", "int", "(", "(", "nMax", "*", "(", "nMax", "+", "1", ")", ")", "/", "2", ")", "*", "(", "Lmax", "+", "1", ")", "*", "crosTypes", ")", "else", ":", "shape", "=", "(", "py_Hsize", ",", "int", "(", "(", "nMax", "*", "(", "nMax", "+", "1", ")", ")", "/", "2", ")", "*", "(", "Lmax", "+", "1", ")", "*", "py_Ntypes", ")", "a", "=", "np", ".", "ctypeslib", ".", "as_array", "(", "c", ")", "a", "=", "a", ".", "reshape", "(", "shape", ")", "return", "a" ]
Get the RBF basis SOAP output for the given positions in a finite system. Args: obj(ase.Atoms): Atomic structure for which the SOAP output is calculated. Hpos: Positions at which to calculate SOAP alp: Alphas bet: Betas rCut: Radial cutoff. nMax: Maximum number of radial basis functions Lmax: Maximum spherical harmonics degree crossOver: all_atomtypes: Can be used to specify the atomic elements for which to calculate the output. If given the output is calculated only for the given species and is ordered by atomic number. eta: The gaussian smearing width. Returns: np.ndarray: SOAP output for the given positions.
[ "Get", "the", "RBF", "basis", "SOAP", "output", "for", "the", "given", "positions", "in", "a", "finite", "system", "." ]
train
https://github.com/SINGROUP/SOAPLite/blob/80e27cc8d5b4c887011542c5a799583bfc6ff643/soaplite/core.py#L80-L169
0.015329
autokey/autokey
lib/autokey/scripting.py
Engine.run_script_from_macro
def run_script_from_macro(self, args): """ Used internally by AutoKey for phrase macros """ self.__macroArgs = args["args"].split(',') try: self.run_script(args["name"]) except Exception as e: self.set_return_value("{ERROR: %s}" % str(e))
python
def run_script_from_macro(self, args): """ Used internally by AutoKey for phrase macros """ self.__macroArgs = args["args"].split(',') try: self.run_script(args["name"]) except Exception as e: self.set_return_value("{ERROR: %s}" % str(e))
[ "def", "run_script_from_macro", "(", "self", ",", "args", ")", ":", "self", ".", "__macroArgs", "=", "args", "[", "\"args\"", "]", ".", "split", "(", "','", ")", "try", ":", "self", ".", "run_script", "(", "args", "[", "\"name\"", "]", ")", "except", "Exception", "as", "e", ":", "self", ".", "set_return_value", "(", "\"{ERROR: %s}\"", "%", "str", "(", "e", ")", ")" ]
Used internally by AutoKey for phrase macros
[ "Used", "internally", "by", "AutoKey", "for", "phrase", "macros" ]
train
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/scripting.py#L1256-L1265
0.009404
rootpy/rootpy
rootpy/io/pickler.py
dump
def dump(obj, root_file, proto=0, key=None): """Dump an object into a ROOT TFile. `root_file` may be an open ROOT file or directory, or a string path to an existing ROOT file. """ if isinstance(root_file, string_types): root_file = root_open(root_file, 'recreate') own_file = True else: own_file = False ret = Pickler(root_file, proto).dump(obj, key) if own_file: root_file.Close() return ret
python
def dump(obj, root_file, proto=0, key=None): """Dump an object into a ROOT TFile. `root_file` may be an open ROOT file or directory, or a string path to an existing ROOT file. """ if isinstance(root_file, string_types): root_file = root_open(root_file, 'recreate') own_file = True else: own_file = False ret = Pickler(root_file, proto).dump(obj, key) if own_file: root_file.Close() return ret
[ "def", "dump", "(", "obj", ",", "root_file", ",", "proto", "=", "0", ",", "key", "=", "None", ")", ":", "if", "isinstance", "(", "root_file", ",", "string_types", ")", ":", "root_file", "=", "root_open", "(", "root_file", ",", "'recreate'", ")", "own_file", "=", "True", "else", ":", "own_file", "=", "False", "ret", "=", "Pickler", "(", "root_file", ",", "proto", ")", ".", "dump", "(", "obj", ",", "key", ")", "if", "own_file", ":", "root_file", ".", "Close", "(", ")", "return", "ret" ]
Dump an object into a ROOT TFile. `root_file` may be an open ROOT file or directory, or a string path to an existing ROOT file.
[ "Dump", "an", "object", "into", "a", "ROOT", "TFile", "." ]
train
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/io/pickler.py#L344-L358
0.002169
geronimp/graftM
graftm/sequence_search_results.py
DiamondSearchResult.import_from_daa_file
def import_from_daa_file(daa_filename): '''Generate new results object from the output of diamond blastx/p''' # blast m8 format is # 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore res = DiamondSearchResult() res.fields = [ SequenceSearchResult.QUERY_ID_FIELD, SequenceSearchResult.HIT_ID_FIELD, SequenceSearchResult.PERCENT_ID_FIELD, SequenceSearchResult.ALIGNMENT_LENGTH_FIELD, SequenceSearchResult.MISMATCH_FIELD, #skip SequenceSearchResult.QUERY_FROM_FIELD, SequenceSearchResult.QUERY_TO_FIELD, SequenceSearchResult.HIT_FROM_FIELD, SequenceSearchResult.HIT_TO_FIELD, SequenceSearchResult.EVALUE_FIELD, SequenceSearchResult.ALIGNMENT_BIT_SCORE, # extras SequenceSearchResult.ALIGNMENT_DIRECTION, SequenceSearchResult.HMM_NAME_FIELD ] cmd = "diamond view -a '%s'" % daa_filename logging.debug("Running cmd: %s" % cmd) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) stdout, stderr = process.communicate() reader = csv.reader(stdout.decode('ascii').splitlines(), delimiter='\t') if process.returncode != 0: raise Exception("Problem running diamond view with cmd: '%s'," "stderr was %s" % (cmd, stderr)) for row in reader: # 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore # 0 1 2 3 4 5 6 7 8 9 10 11 query_start = int(row[6]) query_end = int(row[7]) res.results.append([row[0], row[1], row[2], row[3], row[4], query_start, query_end, int(row[8]), int(row[9]), row[10], row[11], query_start < query_end, os.path.basename(daa_filename) ]) return res
python
def import_from_daa_file(daa_filename): '''Generate new results object from the output of diamond blastx/p''' # blast m8 format is # 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore res = DiamondSearchResult() res.fields = [ SequenceSearchResult.QUERY_ID_FIELD, SequenceSearchResult.HIT_ID_FIELD, SequenceSearchResult.PERCENT_ID_FIELD, SequenceSearchResult.ALIGNMENT_LENGTH_FIELD, SequenceSearchResult.MISMATCH_FIELD, #skip SequenceSearchResult.QUERY_FROM_FIELD, SequenceSearchResult.QUERY_TO_FIELD, SequenceSearchResult.HIT_FROM_FIELD, SequenceSearchResult.HIT_TO_FIELD, SequenceSearchResult.EVALUE_FIELD, SequenceSearchResult.ALIGNMENT_BIT_SCORE, # extras SequenceSearchResult.ALIGNMENT_DIRECTION, SequenceSearchResult.HMM_NAME_FIELD ] cmd = "diamond view -a '%s'" % daa_filename logging.debug("Running cmd: %s" % cmd) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) stdout, stderr = process.communicate() reader = csv.reader(stdout.decode('ascii').splitlines(), delimiter='\t') if process.returncode != 0: raise Exception("Problem running diamond view with cmd: '%s'," "stderr was %s" % (cmd, stderr)) for row in reader: # 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore # 0 1 2 3 4 5 6 7 8 9 10 11 query_start = int(row[6]) query_end = int(row[7]) res.results.append([row[0], row[1], row[2], row[3], row[4], query_start, query_end, int(row[8]), int(row[9]), row[10], row[11], query_start < query_end, os.path.basename(daa_filename) ]) return res
[ "def", "import_from_daa_file", "(", "daa_filename", ")", ":", "# blast m8 format is", "# 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore", "res", "=", "DiamondSearchResult", "(", ")", "res", ".", "fields", "=", "[", "SequenceSearchResult", ".", "QUERY_ID_FIELD", ",", "SequenceSearchResult", ".", "HIT_ID_FIELD", ",", "SequenceSearchResult", ".", "PERCENT_ID_FIELD", ",", "SequenceSearchResult", ".", "ALIGNMENT_LENGTH_FIELD", ",", "SequenceSearchResult", ".", "MISMATCH_FIELD", ",", "#skip", "SequenceSearchResult", ".", "QUERY_FROM_FIELD", ",", "SequenceSearchResult", ".", "QUERY_TO_FIELD", ",", "SequenceSearchResult", ".", "HIT_FROM_FIELD", ",", "SequenceSearchResult", ".", "HIT_TO_FIELD", ",", "SequenceSearchResult", ".", "EVALUE_FIELD", ",", "SequenceSearchResult", ".", "ALIGNMENT_BIT_SCORE", ",", "# extras", "SequenceSearchResult", ".", "ALIGNMENT_DIRECTION", ",", "SequenceSearchResult", ".", "HMM_NAME_FIELD", "]", "cmd", "=", "\"diamond view -a '%s'\"", "%", "daa_filename", "logging", ".", "debug", "(", "\"Running cmd: %s\"", "%", "cmd", ")", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ")", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", ")", "reader", "=", "csv", ".", "reader", "(", "stdout", ".", "decode", "(", "'ascii'", ")", ".", "splitlines", "(", ")", ",", "delimiter", "=", "'\\t'", ")", "if", "process", ".", "returncode", "!=", "0", ":", "raise", "Exception", "(", "\"Problem running diamond view with cmd: '%s',\"", "\"stderr was %s\"", "%", "(", "cmd", ",", "stderr", ")", ")", "for", "row", "in", "reader", ":", "# 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore", "# 0 1 2 3 4 5 6 7 8 9 10 11", "query_start", "=", "int", "(", "row", "[", "6", "]", ")", "query_end", "=", "int", "(", "row", "[", "7", "]", ")", "res", ".", "results", ".", "append", "(", "[", "row", "[", "0", "]", ",", "row", "[", "1", "]", ",", "row", "[", "2", "]", ",", "row", "[", "3", "]", ",", "row", "[", "4", "]", ",", "query_start", ",", "query_end", ",", "int", "(", "row", "[", "8", "]", ")", ",", "int", "(", "row", "[", "9", "]", ")", ",", "row", "[", "10", "]", ",", "row", "[", "11", "]", ",", "query_start", "<", "query_end", ",", "os", ".", "path", ".", "basename", "(", "daa_filename", ")", "]", ")", "return", "res" ]
Generate new results object from the output of diamond blastx/p
[ "Generate", "new", "results", "object", "from", "the", "output", "of", "diamond", "blastx", "/", "p" ]
train
https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_search_results.py#L57-L111
0.009016
libtcod/python-tcod
fonts/X11/bdf/bdf2png.py
Glyph.parseBits
def parseBits(self, hexcode, width): """enumerate over bits in a line of data""" bitarray = [] for byte in hexcode[::-1]: bits = int(byte, 16) for x in range(4): bitarray.append(bool((2 ** x) & bits)) bitarray = bitarray[::-1] return enumerate(bitarray[:width])
python
def parseBits(self, hexcode, width): """enumerate over bits in a line of data""" bitarray = [] for byte in hexcode[::-1]: bits = int(byte, 16) for x in range(4): bitarray.append(bool((2 ** x) & bits)) bitarray = bitarray[::-1] return enumerate(bitarray[:width])
[ "def", "parseBits", "(", "self", ",", "hexcode", ",", "width", ")", ":", "bitarray", "=", "[", "]", "for", "byte", "in", "hexcode", "[", ":", ":", "-", "1", "]", ":", "bits", "=", "int", "(", "byte", ",", "16", ")", "for", "x", "in", "range", "(", "4", ")", ":", "bitarray", ".", "append", "(", "bool", "(", "(", "2", "**", "x", ")", "&", "bits", ")", ")", "bitarray", "=", "bitarray", "[", ":", ":", "-", "1", "]", "return", "enumerate", "(", "bitarray", "[", ":", "width", "]", ")" ]
enumerate over bits in a line of data
[ "enumerate", "over", "bits", "in", "a", "line", "of", "data" ]
train
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/fonts/X11/bdf/bdf2png.py#L89-L97
0.005865
peterwittek/somoclu
src/Python/somoclu/train.py
Somoclu.view_activation_map
def view_activation_map(self, data_vector=None, data_index=None, activation_map=None, figsize=None, colormap=cm.Spectral_r, colorbar=False, bestmatches=False, bestmatchcolors=None, labels=None, zoom=None, filename=None): """Plot the activation map of a given data instance or a new data vector :param data_vector: Optional parameter for a new vector :type data_vector: numpy.array :param data_index: Optional parameter for the index of the data instance :type data_index: int. :param activation_map: Optional parameter to pass the an activation map :type activation_map: numpy.array :param figsize: Optional parameter to specify the size of the figure. :type figsize: (int, int) :param colormap: Optional parameter to specify the color map to be used. :type colormap: matplotlib.colors.Colormap :param colorbar: Optional parameter to include a colormap as legend. :type colorbar: bool. :param bestmatches: Optional parameter to plot best matching units. :type bestmatches: bool. :param bestmatchcolors: Optional parameter to specify the color of each best matching unit. :type bestmatchcolors: list of int. :param labels: Optional parameter to specify the label of each point. :type labels: list of str. :param zoom: Optional parameter to zoom into a region on the map. The first two coordinates of the tuple are the row limits, the second tuple contains the column limits. :type zoom: ((int, int), (int, int)) :param filename: If specified, the plot will not be shown but saved to this file. :type filename: str. """ if data_vector is None and data_index is None: raise Exception("Either specify a vector to see its activation " "or give an index of the training data instances") if data_vector is not None and data_index is not None: raise Exception("You cannot specify both a data vector and the " "index of a training data instance") if data_vector is not None and activation_map is not None: raise Exception("You cannot pass a previously computated" "activation map with a data vector") if data_vector is not None: try: d1, _ = data_vector.shape w = data_vector.copy() except ValueError: d1, _ = data_vector.shape w = data_vector.reshape(1, d1) if w.shape[1] == 1: w = w.T matrix = cdist(self.codebook.reshape((self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])), w, 'euclidean').T matrix.shape = (self.codebook.shape[0], self.codebook.shape[1]) else: if activation_map is None and self.activation_map is None: self.get_surface_state() if activation_map is None: activation_map = self.activation_map matrix = activation_map[data_index].reshape((self.codebook.shape[0], self.codebook.shape[1])) return self._view_matrix(matrix, figsize, colormap, colorbar, bestmatches, bestmatchcolors, labels, zoom, filename)
python
def view_activation_map(self, data_vector=None, data_index=None, activation_map=None, figsize=None, colormap=cm.Spectral_r, colorbar=False, bestmatches=False, bestmatchcolors=None, labels=None, zoom=None, filename=None): """Plot the activation map of a given data instance or a new data vector :param data_vector: Optional parameter for a new vector :type data_vector: numpy.array :param data_index: Optional parameter for the index of the data instance :type data_index: int. :param activation_map: Optional parameter to pass the an activation map :type activation_map: numpy.array :param figsize: Optional parameter to specify the size of the figure. :type figsize: (int, int) :param colormap: Optional parameter to specify the color map to be used. :type colormap: matplotlib.colors.Colormap :param colorbar: Optional parameter to include a colormap as legend. :type colorbar: bool. :param bestmatches: Optional parameter to plot best matching units. :type bestmatches: bool. :param bestmatchcolors: Optional parameter to specify the color of each best matching unit. :type bestmatchcolors: list of int. :param labels: Optional parameter to specify the label of each point. :type labels: list of str. :param zoom: Optional parameter to zoom into a region on the map. The first two coordinates of the tuple are the row limits, the second tuple contains the column limits. :type zoom: ((int, int), (int, int)) :param filename: If specified, the plot will not be shown but saved to this file. :type filename: str. """ if data_vector is None and data_index is None: raise Exception("Either specify a vector to see its activation " "or give an index of the training data instances") if data_vector is not None and data_index is not None: raise Exception("You cannot specify both a data vector and the " "index of a training data instance") if data_vector is not None and activation_map is not None: raise Exception("You cannot pass a previously computated" "activation map with a data vector") if data_vector is not None: try: d1, _ = data_vector.shape w = data_vector.copy() except ValueError: d1, _ = data_vector.shape w = data_vector.reshape(1, d1) if w.shape[1] == 1: w = w.T matrix = cdist(self.codebook.reshape((self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])), w, 'euclidean').T matrix.shape = (self.codebook.shape[0], self.codebook.shape[1]) else: if activation_map is None and self.activation_map is None: self.get_surface_state() if activation_map is None: activation_map = self.activation_map matrix = activation_map[data_index].reshape((self.codebook.shape[0], self.codebook.shape[1])) return self._view_matrix(matrix, figsize, colormap, colorbar, bestmatches, bestmatchcolors, labels, zoom, filename)
[ "def", "view_activation_map", "(", "self", ",", "data_vector", "=", "None", ",", "data_index", "=", "None", ",", "activation_map", "=", "None", ",", "figsize", "=", "None", ",", "colormap", "=", "cm", ".", "Spectral_r", ",", "colorbar", "=", "False", ",", "bestmatches", "=", "False", ",", "bestmatchcolors", "=", "None", ",", "labels", "=", "None", ",", "zoom", "=", "None", ",", "filename", "=", "None", ")", ":", "if", "data_vector", "is", "None", "and", "data_index", "is", "None", ":", "raise", "Exception", "(", "\"Either specify a vector to see its activation \"", "\"or give an index of the training data instances\"", ")", "if", "data_vector", "is", "not", "None", "and", "data_index", "is", "not", "None", ":", "raise", "Exception", "(", "\"You cannot specify both a data vector and the \"", "\"index of a training data instance\"", ")", "if", "data_vector", "is", "not", "None", "and", "activation_map", "is", "not", "None", ":", "raise", "Exception", "(", "\"You cannot pass a previously computated\"", "\"activation map with a data vector\"", ")", "if", "data_vector", "is", "not", "None", ":", "try", ":", "d1", ",", "_", "=", "data_vector", ".", "shape", "w", "=", "data_vector", ".", "copy", "(", ")", "except", "ValueError", ":", "d1", ",", "_", "=", "data_vector", ".", "shape", "w", "=", "data_vector", ".", "reshape", "(", "1", ",", "d1", ")", "if", "w", ".", "shape", "[", "1", "]", "==", "1", ":", "w", "=", "w", ".", "T", "matrix", "=", "cdist", "(", "self", ".", "codebook", ".", "reshape", "(", "(", "self", ".", "codebook", ".", "shape", "[", "0", "]", "*", "self", ".", "codebook", ".", "shape", "[", "1", "]", ",", "self", ".", "codebook", ".", "shape", "[", "2", "]", ")", ")", ",", "w", ",", "'euclidean'", ")", ".", "T", "matrix", ".", "shape", "=", "(", "self", ".", "codebook", ".", "shape", "[", "0", "]", ",", "self", ".", "codebook", ".", "shape", "[", "1", "]", ")", "else", ":", "if", "activation_map", "is", "None", "and", "self", ".", "activation_map", "is", "None", ":", "self", ".", "get_surface_state", "(", ")", "if", "activation_map", "is", "None", ":", "activation_map", "=", "self", ".", "activation_map", "matrix", "=", "activation_map", "[", "data_index", "]", ".", "reshape", "(", "(", "self", ".", "codebook", ".", "shape", "[", "0", "]", ",", "self", ".", "codebook", ".", "shape", "[", "1", "]", ")", ")", "return", "self", ".", "_view_matrix", "(", "matrix", ",", "figsize", ",", "colormap", ",", "colorbar", ",", "bestmatches", ",", "bestmatchcolors", ",", "labels", ",", "zoom", ",", "filename", ")" ]
Plot the activation map of a given data instance or a new data vector :param data_vector: Optional parameter for a new vector :type data_vector: numpy.array :param data_index: Optional parameter for the index of the data instance :type data_index: int. :param activation_map: Optional parameter to pass the an activation map :type activation_map: numpy.array :param figsize: Optional parameter to specify the size of the figure. :type figsize: (int, int) :param colormap: Optional parameter to specify the color map to be used. :type colormap: matplotlib.colors.Colormap :param colorbar: Optional parameter to include a colormap as legend. :type colorbar: bool. :param bestmatches: Optional parameter to plot best matching units. :type bestmatches: bool. :param bestmatchcolors: Optional parameter to specify the color of each best matching unit. :type bestmatchcolors: list of int. :param labels: Optional parameter to specify the label of each point. :type labels: list of str. :param zoom: Optional parameter to zoom into a region on the map. The first two coordinates of the tuple are the row limits, the second tuple contains the column limits. :type zoom: ((int, int), (int, int)) :param filename: If specified, the plot will not be shown but saved to this file. :type filename: str.
[ "Plot", "the", "activation", "map", "of", "a", "given", "data", "instance", "or", "a", "new", "data", "vector" ]
train
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L329-L397
0.002369
saltstack/salt
salt/netapi/rest_tornado/event_processor.py
SaltInfo.publish
def publish(self, key, data): ''' Publishes the data to the event stream. ''' publish_data = {key: data} pub = salt.utils.json.dumps(publish_data) + str('\n\n') # future lint: disable=blacklisted-function self.handler.write_message(pub)
python
def publish(self, key, data): ''' Publishes the data to the event stream. ''' publish_data = {key: data} pub = salt.utils.json.dumps(publish_data) + str('\n\n') # future lint: disable=blacklisted-function self.handler.write_message(pub)
[ "def", "publish", "(", "self", ",", "key", ",", "data", ")", ":", "publish_data", "=", "{", "key", ":", "data", "}", "pub", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "publish_data", ")", "+", "str", "(", "'\\n\\n'", ")", "# future lint: disable=blacklisted-function", "self", ".", "handler", ".", "write_message", "(", "pub", ")" ]
Publishes the data to the event stream.
[ "Publishes", "the", "data", "to", "the", "event", "stream", "." ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/event_processor.py#L52-L58
0.010526
openstack/networking-arista
networking_arista/l3Plugin/l3_arista.py
AristaL3ServicePlugin.add_router_interface
def add_router_interface(self, context, router_id, interface_info): """Add a subnet of a network to an existing router.""" new_router = super(AristaL3ServicePlugin, self).add_router_interface( context, router_id, interface_info) core = directory.get_plugin() # Get network info for the subnet that is being added to the router. # Check if the interface information is by port-id or subnet-id add_by_port, add_by_sub = self._validate_interface_info(interface_info) if add_by_sub: subnet = core.get_subnet(context, interface_info['subnet_id']) elif add_by_port: port = core.get_port(context, interface_info['port_id']) subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = core.get_subnet(context, subnet_id) network_id = subnet['network_id'] # To create SVI's in Arista HW, the segmentation Id is required # for this network. ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] # Package all the info needed for Hw programming router = self.get_router(context, router_id) router_info = copy.deepcopy(new_router) router_info['seg_id'] = seg_id router_info['name'] = router['name'] router_info['cidr'] = subnet['cidr'] router_info['gip'] = subnet['gateway_ip'] router_info['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(context, router_info) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error Adding subnet %(subnet)s to " "router %(router_id)s on Arista HW"), {'subnet': subnet, 'router_id': router_id}) super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info)
python
def add_router_interface(self, context, router_id, interface_info): """Add a subnet of a network to an existing router.""" new_router = super(AristaL3ServicePlugin, self).add_router_interface( context, router_id, interface_info) core = directory.get_plugin() # Get network info for the subnet that is being added to the router. # Check if the interface information is by port-id or subnet-id add_by_port, add_by_sub = self._validate_interface_info(interface_info) if add_by_sub: subnet = core.get_subnet(context, interface_info['subnet_id']) elif add_by_port: port = core.get_port(context, interface_info['port_id']) subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = core.get_subnet(context, subnet_id) network_id = subnet['network_id'] # To create SVI's in Arista HW, the segmentation Id is required # for this network. ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] # Package all the info needed for Hw programming router = self.get_router(context, router_id) router_info = copy.deepcopy(new_router) router_info['seg_id'] = seg_id router_info['name'] = router['name'] router_info['cidr'] = subnet['cidr'] router_info['gip'] = subnet['gateway_ip'] router_info['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(context, router_info) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error Adding subnet %(subnet)s to " "router %(router_id)s on Arista HW"), {'subnet': subnet, 'router_id': router_id}) super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info)
[ "def", "add_router_interface", "(", "self", ",", "context", ",", "router_id", ",", "interface_info", ")", ":", "new_router", "=", "super", "(", "AristaL3ServicePlugin", ",", "self", ")", ".", "add_router_interface", "(", "context", ",", "router_id", ",", "interface_info", ")", "core", "=", "directory", ".", "get_plugin", "(", ")", "# Get network info for the subnet that is being added to the router.", "# Check if the interface information is by port-id or subnet-id", "add_by_port", ",", "add_by_sub", "=", "self", ".", "_validate_interface_info", "(", "interface_info", ")", "if", "add_by_sub", ":", "subnet", "=", "core", ".", "get_subnet", "(", "context", ",", "interface_info", "[", "'subnet_id'", "]", ")", "elif", "add_by_port", ":", "port", "=", "core", ".", "get_port", "(", "context", ",", "interface_info", "[", "'port_id'", "]", ")", "subnet_id", "=", "port", "[", "'fixed_ips'", "]", "[", "0", "]", "[", "'subnet_id'", "]", "subnet", "=", "core", ".", "get_subnet", "(", "context", ",", "subnet_id", ")", "network_id", "=", "subnet", "[", "'network_id'", "]", "# To create SVI's in Arista HW, the segmentation Id is required", "# for this network.", "ml2_db", "=", "NetworkContext", "(", "self", ",", "context", ",", "{", "'id'", ":", "network_id", "}", ")", "seg_id", "=", "ml2_db", ".", "network_segments", "[", "0", "]", "[", "'segmentation_id'", "]", "# Package all the info needed for Hw programming", "router", "=", "self", ".", "get_router", "(", "context", ",", "router_id", ")", "router_info", "=", "copy", ".", "deepcopy", "(", "new_router", ")", "router_info", "[", "'seg_id'", "]", "=", "seg_id", "router_info", "[", "'name'", "]", "=", "router", "[", "'name'", "]", "router_info", "[", "'cidr'", "]", "=", "subnet", "[", "'cidr'", "]", "router_info", "[", "'gip'", "]", "=", "subnet", "[", "'gateway_ip'", "]", "router_info", "[", "'ip_version'", "]", "=", "subnet", "[", "'ip_version'", "]", "try", ":", "self", ".", "driver", ".", "add_router_interface", "(", "context", ",", "router_info", ")", "return", "new_router", "except", "Exception", ":", "with", "excutils", ".", "save_and_reraise_exception", "(", ")", ":", "LOG", ".", "error", "(", "_LE", "(", "\"Error Adding subnet %(subnet)s to \"", "\"router %(router_id)s on Arista HW\"", ")", ",", "{", "'subnet'", ":", "subnet", ",", "'router_id'", ":", "router_id", "}", ")", "super", "(", "AristaL3ServicePlugin", ",", "self", ")", ".", "remove_router_interface", "(", "context", ",", "router_id", ",", "interface_info", ")" ]
Add a subnet of a network to an existing router.
[ "Add", "a", "subnet", "of", "a", "network", "to", "an", "existing", "router", "." ]
train
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/l3Plugin/l3_arista.py#L287-L331
0.000965
b3j0f/utils
b3j0f/utils/property.py
put_property
def put_property(elt, key, value, ttl=None, ctx=None): """Put properties in elt. :param elt: properties elt to put. Not None methods. :param number ttl: If not None, property time to leave. :param ctx: elt ctx from where put properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :param dict properties: properties to put in elt. elt and ttl are exclude. :return: Timer if ttl is not None. :rtype: Timer """ return put_properties(elt=elt, properties={key: value}, ttl=ttl, ctx=ctx)
python
def put_property(elt, key, value, ttl=None, ctx=None): """Put properties in elt. :param elt: properties elt to put. Not None methods. :param number ttl: If not None, property time to leave. :param ctx: elt ctx from where put properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :param dict properties: properties to put in elt. elt and ttl are exclude. :return: Timer if ttl is not None. :rtype: Timer """ return put_properties(elt=elt, properties={key: value}, ttl=ttl, ctx=ctx)
[ "def", "put_property", "(", "elt", ",", "key", ",", "value", ",", "ttl", "=", "None", ",", "ctx", "=", "None", ")", ":", "return", "put_properties", "(", "elt", "=", "elt", ",", "properties", "=", "{", "key", ":", "value", "}", ",", "ttl", "=", "ttl", ",", "ctx", "=", "ctx", ")" ]
Put properties in elt. :param elt: properties elt to put. Not None methods. :param number ttl: If not None, property time to leave. :param ctx: elt ctx from where put properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :param dict properties: properties to put in elt. elt and ttl are exclude. :return: Timer if ttl is not None. :rtype: Timer
[ "Put", "properties", "in", "elt", "." ]
train
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/property.py#L513-L526
0.001597
gem/oq-engine
openquake/risklib/riskinput.py
CompositeRiskModel.get_dmg_csq
def get_dmg_csq(self, assets_by_site, gmf): """ :returns: an array of shape (A, L, 1, D + 1) with the number of buildings in each damage state for each asset and loss type """ A = sum(len(assets) for assets in assets_by_site) L = len(self.loss_types) D = len(self.damage_states) out = numpy.zeros((A, L, 1, D + 1), F32) for assets, gmv in zip(assets_by_site, gmf): group = group_array(assets, 'taxonomy') for taxonomy, assets in group.items(): for l, loss_type in enumerate(self.loss_types): fracs = self[taxonomy](loss_type, assets, [gmv]) for asset, frac in zip(assets, fracs): dmg = asset['number'] * frac[0, :D] csq = asset['value-' + loss_type] * frac[0, D] out[asset['ordinal'], l, 0, :D] = dmg out[asset['ordinal'], l, 0, D] = csq return out
python
def get_dmg_csq(self, assets_by_site, gmf): """ :returns: an array of shape (A, L, 1, D + 1) with the number of buildings in each damage state for each asset and loss type """ A = sum(len(assets) for assets in assets_by_site) L = len(self.loss_types) D = len(self.damage_states) out = numpy.zeros((A, L, 1, D + 1), F32) for assets, gmv in zip(assets_by_site, gmf): group = group_array(assets, 'taxonomy') for taxonomy, assets in group.items(): for l, loss_type in enumerate(self.loss_types): fracs = self[taxonomy](loss_type, assets, [gmv]) for asset, frac in zip(assets, fracs): dmg = asset['number'] * frac[0, :D] csq = asset['value-' + loss_type] * frac[0, D] out[asset['ordinal'], l, 0, :D] = dmg out[asset['ordinal'], l, 0, D] = csq return out
[ "def", "get_dmg_csq", "(", "self", ",", "assets_by_site", ",", "gmf", ")", ":", "A", "=", "sum", "(", "len", "(", "assets", ")", "for", "assets", "in", "assets_by_site", ")", "L", "=", "len", "(", "self", ".", "loss_types", ")", "D", "=", "len", "(", "self", ".", "damage_states", ")", "out", "=", "numpy", ".", "zeros", "(", "(", "A", ",", "L", ",", "1", ",", "D", "+", "1", ")", ",", "F32", ")", "for", "assets", ",", "gmv", "in", "zip", "(", "assets_by_site", ",", "gmf", ")", ":", "group", "=", "group_array", "(", "assets", ",", "'taxonomy'", ")", "for", "taxonomy", ",", "assets", "in", "group", ".", "items", "(", ")", ":", "for", "l", ",", "loss_type", "in", "enumerate", "(", "self", ".", "loss_types", ")", ":", "fracs", "=", "self", "[", "taxonomy", "]", "(", "loss_type", ",", "assets", ",", "[", "gmv", "]", ")", "for", "asset", ",", "frac", "in", "zip", "(", "assets", ",", "fracs", ")", ":", "dmg", "=", "asset", "[", "'number'", "]", "*", "frac", "[", "0", ",", ":", "D", "]", "csq", "=", "asset", "[", "'value-'", "+", "loss_type", "]", "*", "frac", "[", "0", ",", "D", "]", "out", "[", "asset", "[", "'ordinal'", "]", ",", "l", ",", "0", ",", ":", "D", "]", "=", "dmg", "out", "[", "asset", "[", "'ordinal'", "]", ",", "l", ",", "0", ",", "D", "]", "=", "csq", "return", "out" ]
:returns: an array of shape (A, L, 1, D + 1) with the number of buildings in each damage state for each asset and loss type
[ ":", "returns", ":", "an", "array", "of", "shape", "(", "A", "L", "1", "D", "+", "1", ")", "with", "the", "number", "of", "buildings", "in", "each", "damage", "state", "for", "each", "asset", "and", "loss", "type" ]
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/riskinput.py#L325-L345
0.002941
hfaran/piazza-api
piazza_api/network.py
Network.resolve_post
def resolve_post(self, post): """Mark post as resolved :type post: dict|str|int :param post: Either the post dict returned by another API method, or the `cid` field of that post. :returns: True if it is successful. False otherwise """ try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "resolved": "true" } return self._rpc.content_mark_resolved(params)
python
def resolve_post(self, post): """Mark post as resolved :type post: dict|str|int :param post: Either the post dict returned by another API method, or the `cid` field of that post. :returns: True if it is successful. False otherwise """ try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "resolved": "true" } return self._rpc.content_mark_resolved(params)
[ "def", "resolve_post", "(", "self", ",", "post", ")", ":", "try", ":", "cid", "=", "post", "[", "\"id\"", "]", "except", "KeyError", ":", "cid", "=", "post", "params", "=", "{", "\"cid\"", ":", "cid", ",", "\"resolved\"", ":", "\"true\"", "}", "return", "self", ".", "_rpc", ".", "content_mark_resolved", "(", "params", ")" ]
Mark post as resolved :type post: dict|str|int :param post: Either the post dict returned by another API method, or the `cid` field of that post. :returns: True if it is successful. False otherwise
[ "Mark", "post", "as", "resolved" ]
train
https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/network.py#L270-L288
0.003854
sentinel-hub/sentinelhub-py
sentinelhub/io_utils.py
get_jp2_bit_depth
def get_jp2_bit_depth(stream): """Reads bit encoding depth of jpeg2000 file in binary stream format :param stream: binary stream format :type stream: Binary I/O (e.g. io.BytesIO, io.BufferedReader, ...) :return: bit depth :rtype: int """ stream.seek(0) while True: read_buffer = stream.read(8) if len(read_buffer) < 8: raise ValueError('Image Header Box not found in Jpeg2000 file') _, box_id = struct.unpack('>I4s', read_buffer) if box_id == b'ihdr': read_buffer = stream.read(14) params = struct.unpack('>IIHBBBB', read_buffer) return (params[3] & 0x7f) + 1
python
def get_jp2_bit_depth(stream): """Reads bit encoding depth of jpeg2000 file in binary stream format :param stream: binary stream format :type stream: Binary I/O (e.g. io.BytesIO, io.BufferedReader, ...) :return: bit depth :rtype: int """ stream.seek(0) while True: read_buffer = stream.read(8) if len(read_buffer) < 8: raise ValueError('Image Header Box not found in Jpeg2000 file') _, box_id = struct.unpack('>I4s', read_buffer) if box_id == b'ihdr': read_buffer = stream.read(14) params = struct.unpack('>IIHBBBB', read_buffer) return (params[3] & 0x7f) + 1
[ "def", "get_jp2_bit_depth", "(", "stream", ")", ":", "stream", ".", "seek", "(", "0", ")", "while", "True", ":", "read_buffer", "=", "stream", ".", "read", "(", "8", ")", "if", "len", "(", "read_buffer", ")", "<", "8", ":", "raise", "ValueError", "(", "'Image Header Box not found in Jpeg2000 file'", ")", "_", ",", "box_id", "=", "struct", ".", "unpack", "(", "'>I4s'", ",", "read_buffer", ")", "if", "box_id", "==", "b'ihdr'", ":", "read_buffer", "=", "stream", ".", "read", "(", "14", ")", "params", "=", "struct", ".", "unpack", "(", "'>IIHBBBB'", ",", "read_buffer", ")", "return", "(", "params", "[", "3", "]", "&", "0x7f", ")", "+", "1" ]
Reads bit encoding depth of jpeg2000 file in binary stream format :param stream: binary stream format :type stream: Binary I/O (e.g. io.BytesIO, io.BufferedReader, ...) :return: bit depth :rtype: int
[ "Reads", "bit", "encoding", "depth", "of", "jpeg2000", "file", "in", "binary", "stream", "format" ]
train
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/io_utils.py#L322-L341
0.001484
Dentosal/python-sc2
sc2/unit.py
Unit.research
def research(self, upgrade, *args, **kwargs): """ Requires UpgradeId to be passed instead of AbilityId """ return self(self._game_data.upgrades[upgrade.value].research_ability.id, *args, **kwargs)
python
def research(self, upgrade, *args, **kwargs): """ Requires UpgradeId to be passed instead of AbilityId """ return self(self._game_data.upgrades[upgrade.value].research_ability.id, *args, **kwargs)
[ "def", "research", "(", "self", ",", "upgrade", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", "(", "self", ".", "_game_data", ".", "upgrades", "[", "upgrade", ".", "value", "]", ".", "research_ability", ".", "id", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Requires UpgradeId to be passed instead of AbilityId
[ "Requires", "UpgradeId", "to", "be", "passed", "instead", "of", "AbilityId" ]
train
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/unit.py#L575-L577
0.014151
jobovy/galpy
galpy/potential/Potential.py
turn_physical_off
def turn_physical_off(Pot): """ NAME: turn_physical_off PURPOSE: turn off automatic returning of outputs in physical units INPUT: (none) OUTPUT: (none) HISTORY: 2016-01-30 - Written - Bovy (UofT) """ if isinstance(Pot,list): for pot in Pot: turn_physical_off(pot) else: Pot.turn_physical_off() return None
python
def turn_physical_off(Pot): """ NAME: turn_physical_off PURPOSE: turn off automatic returning of outputs in physical units INPUT: (none) OUTPUT: (none) HISTORY: 2016-01-30 - Written - Bovy (UofT) """ if isinstance(Pot,list): for pot in Pot: turn_physical_off(pot) else: Pot.turn_physical_off() return None
[ "def", "turn_physical_off", "(", "Pot", ")", ":", "if", "isinstance", "(", "Pot", ",", "list", ")", ":", "for", "pot", "in", "Pot", ":", "turn_physical_off", "(", "pot", ")", "else", ":", "Pot", ".", "turn_physical_off", "(", ")", "return", "None" ]
NAME: turn_physical_off PURPOSE: turn off automatic returning of outputs in physical units INPUT: (none) OUTPUT: (none) HISTORY: 2016-01-30 - Written - Bovy (UofT)
[ "NAME", ":", "turn_physical_off" ]
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L2872-L2900
0.007109
RiotGames/cloud-inquisitor
plugins/public/cinq-collector-aws/cinq_collector_aws/region.py
AWSRegionCollector.update_snapshots
def update_snapshots(self): """Update list of EBS Snapshots for the account / region Returns: `None` """ self.log.debug('Updating EBSSnapshots for {}/{}'.format(self.account.account_name, self.region)) ec2 = self.session.resource('ec2', region_name=self.region) try: existing_snapshots = EBSSnapshot.get_all(self.account, self.region) snapshots = {x.id: x for x in ec2.snapshots.filter(OwnerIds=[self.account.account_number])} for data in list(snapshots.values()): if data.id in existing_snapshots: snapshot = existing_snapshots[data.id] if snapshot.update(data): self.log.debug('Change detected for EBSSnapshot {}/{}/{}'.format( self.account.account_name, self.region, snapshot.resource.resource_id )) else: properties = { 'create_time': data.start_time, 'encrypted': data.encrypted, 'kms_key_id': data.kms_key_id, 'state': data.state, 'state_message': data.state_message, 'volume_id': data.volume_id, 'volume_size': data.volume_size, } tags = {t['Key']: t['Value'] for t in data.tags or {}} snapshot = EBSSnapshot.create( data.id, account_id=self.account.account_id, location=self.region, properties=properties, tags=tags ) self.log.debug('Added new EBSSnapshot {}/{}/{}'.format( self.account.account_name, self.region, snapshot.resource.resource_id )) db.session.commit() vk = set(list(snapshots.keys())) evk = set(list(existing_snapshots.keys())) try: for snapshotID in evk - vk: db.session.delete(existing_snapshots[snapshotID].resource) self.log.debug('Deleted EBSSnapshot {}/{}/{}'.format( self.account.account_name, self.region, snapshotID )) db.session.commit() except: self.log.exception('Failed removing deleted snapshots') db.session.rollback() finally: del ec2
python
def update_snapshots(self): """Update list of EBS Snapshots for the account / region Returns: `None` """ self.log.debug('Updating EBSSnapshots for {}/{}'.format(self.account.account_name, self.region)) ec2 = self.session.resource('ec2', region_name=self.region) try: existing_snapshots = EBSSnapshot.get_all(self.account, self.region) snapshots = {x.id: x for x in ec2.snapshots.filter(OwnerIds=[self.account.account_number])} for data in list(snapshots.values()): if data.id in existing_snapshots: snapshot = existing_snapshots[data.id] if snapshot.update(data): self.log.debug('Change detected for EBSSnapshot {}/{}/{}'.format( self.account.account_name, self.region, snapshot.resource.resource_id )) else: properties = { 'create_time': data.start_time, 'encrypted': data.encrypted, 'kms_key_id': data.kms_key_id, 'state': data.state, 'state_message': data.state_message, 'volume_id': data.volume_id, 'volume_size': data.volume_size, } tags = {t['Key']: t['Value'] for t in data.tags or {}} snapshot = EBSSnapshot.create( data.id, account_id=self.account.account_id, location=self.region, properties=properties, tags=tags ) self.log.debug('Added new EBSSnapshot {}/{}/{}'.format( self.account.account_name, self.region, snapshot.resource.resource_id )) db.session.commit() vk = set(list(snapshots.keys())) evk = set(list(existing_snapshots.keys())) try: for snapshotID in evk - vk: db.session.delete(existing_snapshots[snapshotID].resource) self.log.debug('Deleted EBSSnapshot {}/{}/{}'.format( self.account.account_name, self.region, snapshotID )) db.session.commit() except: self.log.exception('Failed removing deleted snapshots') db.session.rollback() finally: del ec2
[ "def", "update_snapshots", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "'Updating EBSSnapshots for {}/{}'", ".", "format", "(", "self", ".", "account", ".", "account_name", ",", "self", ".", "region", ")", ")", "ec2", "=", "self", ".", "session", ".", "resource", "(", "'ec2'", ",", "region_name", "=", "self", ".", "region", ")", "try", ":", "existing_snapshots", "=", "EBSSnapshot", ".", "get_all", "(", "self", ".", "account", ",", "self", ".", "region", ")", "snapshots", "=", "{", "x", ".", "id", ":", "x", "for", "x", "in", "ec2", ".", "snapshots", ".", "filter", "(", "OwnerIds", "=", "[", "self", ".", "account", ".", "account_number", "]", ")", "}", "for", "data", "in", "list", "(", "snapshots", ".", "values", "(", ")", ")", ":", "if", "data", ".", "id", "in", "existing_snapshots", ":", "snapshot", "=", "existing_snapshots", "[", "data", ".", "id", "]", "if", "snapshot", ".", "update", "(", "data", ")", ":", "self", ".", "log", ".", "debug", "(", "'Change detected for EBSSnapshot {}/{}/{}'", ".", "format", "(", "self", ".", "account", ".", "account_name", ",", "self", ".", "region", ",", "snapshot", ".", "resource", ".", "resource_id", ")", ")", "else", ":", "properties", "=", "{", "'create_time'", ":", "data", ".", "start_time", ",", "'encrypted'", ":", "data", ".", "encrypted", ",", "'kms_key_id'", ":", "data", ".", "kms_key_id", ",", "'state'", ":", "data", ".", "state", ",", "'state_message'", ":", "data", ".", "state_message", ",", "'volume_id'", ":", "data", ".", "volume_id", ",", "'volume_size'", ":", "data", ".", "volume_size", ",", "}", "tags", "=", "{", "t", "[", "'Key'", "]", ":", "t", "[", "'Value'", "]", "for", "t", "in", "data", ".", "tags", "or", "{", "}", "}", "snapshot", "=", "EBSSnapshot", ".", "create", "(", "data", ".", "id", ",", "account_id", "=", "self", ".", "account", ".", "account_id", ",", "location", "=", "self", ".", "region", ",", "properties", "=", "properties", ",", "tags", "=", "tags", ")", "self", ".", "log", ".", "debug", "(", "'Added new EBSSnapshot {}/{}/{}'", ".", "format", "(", "self", ".", "account", ".", "account_name", ",", "self", ".", "region", ",", "snapshot", ".", "resource", ".", "resource_id", ")", ")", "db", ".", "session", ".", "commit", "(", ")", "vk", "=", "set", "(", "list", "(", "snapshots", ".", "keys", "(", ")", ")", ")", "evk", "=", "set", "(", "list", "(", "existing_snapshots", ".", "keys", "(", ")", ")", ")", "try", ":", "for", "snapshotID", "in", "evk", "-", "vk", ":", "db", ".", "session", ".", "delete", "(", "existing_snapshots", "[", "snapshotID", "]", ".", "resource", ")", "self", ".", "log", ".", "debug", "(", "'Deleted EBSSnapshot {}/{}/{}'", ".", "format", "(", "self", ".", "account", ".", "account_name", ",", "self", ".", "region", ",", "snapshotID", ")", ")", "db", ".", "session", ".", "commit", "(", ")", "except", ":", "self", ".", "log", ".", "exception", "(", "'Failed removing deleted snapshots'", ")", "db", ".", "session", ".", "rollback", "(", ")", "finally", ":", "del", "ec2" ]
Update list of EBS Snapshots for the account / region Returns: `None`
[ "Update", "list", "of", "EBS", "Snapshots", "for", "the", "account", "/", "region" ]
train
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-collector-aws/cinq_collector_aws/region.py#L307-L374
0.002175
tsileo/globster
globster.py
Globster.is_pattern_valid
def is_pattern_valid(pattern): """Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern """ result = True translator = Globster.pattern_info[Globster.identify(pattern)]["translator"] tpattern = '(%s)' % translator(pattern) try: re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE) re_obj.search("") # force compile except Exception as e: result = False return result
python
def is_pattern_valid(pattern): """Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern """ result = True translator = Globster.pattern_info[Globster.identify(pattern)]["translator"] tpattern = '(%s)' % translator(pattern) try: re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE) re_obj.search("") # force compile except Exception as e: result = False return result
[ "def", "is_pattern_valid", "(", "pattern", ")", ":", "result", "=", "True", "translator", "=", "Globster", ".", "pattern_info", "[", "Globster", ".", "identify", "(", "pattern", ")", "]", "[", "\"translator\"", "]", "tpattern", "=", "'(%s)'", "%", "translator", "(", "pattern", ")", "try", ":", "re_obj", "=", "lazy_regex", ".", "lazy_compile", "(", "tpattern", ",", "re", ".", "UNICODE", ")", "re_obj", ".", "search", "(", "\"\"", ")", "# force compile", "except", "Exception", "as", "e", ":", "result", "=", "False", "return", "result" ]
Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern
[ "Returns", "True", "if", "pattern", "is", "valid", "." ]
train
https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/globster.py#L292-L307
0.006734
tomislater/RandomWords
random_words/random_words.py
Random.load_nicknames
def load_nicknames(self, file): """ Load dict from file for random nicknames. :param str file: filename """ with open(os.path.join(main_dir, file + '.dat'), 'r') as f: self.nicknames = json.load(f)
python
def load_nicknames(self, file): """ Load dict from file for random nicknames. :param str file: filename """ with open(os.path.join(main_dir, file + '.dat'), 'r') as f: self.nicknames = json.load(f)
[ "def", "load_nicknames", "(", "self", ",", "file", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "main_dir", ",", "file", "+", "'.dat'", ")", ",", "'r'", ")", "as", "f", ":", "self", ".", "nicknames", "=", "json", ".", "load", "(", "f", ")" ]
Load dict from file for random nicknames. :param str file: filename
[ "Load", "dict", "from", "file", "for", "random", "nicknames", "." ]
train
https://github.com/tomislater/RandomWords/blob/601aa48732d3c389f4c17ba0ed98ffe0e4821d78/random_words/random_words.py#L48-L55
0.008
tensorflow/tensor2tensor
tensor2tensor/utils/optimize.py
optimize
def optimize(loss, learning_rate, hparams, use_tpu=False, variables=None): """Minimize loss.""" loss = weight_decay_and_noise(loss, hparams, learning_rate) loss = tf.identity(loss, name="total_loss") if variables is None: variables = tf.trainable_variables() # Print trainable variables. log_variable_sizes(variables, verbose=hparams.summarize_vars) # Print non-trainable variables. non_trainable_variables = list( set(tf.global_variables()) - set(variables)) log_variable_sizes(non_trainable_variables, tag="Non-trainable variables", verbose=hparams.summarize_vars) if hparams.summarize_vars: summarize_variables(variables) # Summarize non-trainable variables as well summarize_variables(non_trainable_variables, tag="Non-trainable variables") diet_vars = [ v for v in tf.global_variables() if v.dtype == dtypes.float16_ref ] log_variable_sizes( diet_vars, "Diet Variables", verbose=hparams.summarize_vars) opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams, use_tpu) if use_tpu: opt = tf.contrib.tpu.CrossShardOptimizer(opt) opt_summaries = [] if common_layers.should_generate_summaries(): tf.summary.scalar("learning_rate", learning_rate) opt_summaries.append("loss") if hparams.summarize_grads: tf.logging.info("Summarizing gradients") opt_summaries.extend( ["gradients", "gradient_norm", "global_gradient_norm"]) if hparams.clip_grad_norm: tf.logging.info("Clipping gradients, norm: %0.5f", hparams.clip_grad_norm) if hparams.grad_noise_scale: tf.logging.info("Adding noise to gradients, noise scale: %0.5f", hparams.grad_noise_scale) train_op = tf.contrib.layers.optimize_loss( name="training", loss=loss, global_step=tf.train.get_or_create_global_step(), learning_rate=learning_rate, clip_gradients=hparams.clip_grad_norm or None, gradient_noise_scale=hparams.grad_noise_scale or None, optimizer=opt, summaries=opt_summaries, colocate_gradients_with_ops=True, variables=variables) return train_op
python
def optimize(loss, learning_rate, hparams, use_tpu=False, variables=None): """Minimize loss.""" loss = weight_decay_and_noise(loss, hparams, learning_rate) loss = tf.identity(loss, name="total_loss") if variables is None: variables = tf.trainable_variables() # Print trainable variables. log_variable_sizes(variables, verbose=hparams.summarize_vars) # Print non-trainable variables. non_trainable_variables = list( set(tf.global_variables()) - set(variables)) log_variable_sizes(non_trainable_variables, tag="Non-trainable variables", verbose=hparams.summarize_vars) if hparams.summarize_vars: summarize_variables(variables) # Summarize non-trainable variables as well summarize_variables(non_trainable_variables, tag="Non-trainable variables") diet_vars = [ v for v in tf.global_variables() if v.dtype == dtypes.float16_ref ] log_variable_sizes( diet_vars, "Diet Variables", verbose=hparams.summarize_vars) opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams, use_tpu) if use_tpu: opt = tf.contrib.tpu.CrossShardOptimizer(opt) opt_summaries = [] if common_layers.should_generate_summaries(): tf.summary.scalar("learning_rate", learning_rate) opt_summaries.append("loss") if hparams.summarize_grads: tf.logging.info("Summarizing gradients") opt_summaries.extend( ["gradients", "gradient_norm", "global_gradient_norm"]) if hparams.clip_grad_norm: tf.logging.info("Clipping gradients, norm: %0.5f", hparams.clip_grad_norm) if hparams.grad_noise_scale: tf.logging.info("Adding noise to gradients, noise scale: %0.5f", hparams.grad_noise_scale) train_op = tf.contrib.layers.optimize_loss( name="training", loss=loss, global_step=tf.train.get_or_create_global_step(), learning_rate=learning_rate, clip_gradients=hparams.clip_grad_norm or None, gradient_noise_scale=hparams.grad_noise_scale or None, optimizer=opt, summaries=opt_summaries, colocate_gradients_with_ops=True, variables=variables) return train_op
[ "def", "optimize", "(", "loss", ",", "learning_rate", ",", "hparams", ",", "use_tpu", "=", "False", ",", "variables", "=", "None", ")", ":", "loss", "=", "weight_decay_and_noise", "(", "loss", ",", "hparams", ",", "learning_rate", ")", "loss", "=", "tf", ".", "identity", "(", "loss", ",", "name", "=", "\"total_loss\"", ")", "if", "variables", "is", "None", ":", "variables", "=", "tf", ".", "trainable_variables", "(", ")", "# Print trainable variables.", "log_variable_sizes", "(", "variables", ",", "verbose", "=", "hparams", ".", "summarize_vars", ")", "# Print non-trainable variables.", "non_trainable_variables", "=", "list", "(", "set", "(", "tf", ".", "global_variables", "(", ")", ")", "-", "set", "(", "variables", ")", ")", "log_variable_sizes", "(", "non_trainable_variables", ",", "tag", "=", "\"Non-trainable variables\"", ",", "verbose", "=", "hparams", ".", "summarize_vars", ")", "if", "hparams", ".", "summarize_vars", ":", "summarize_variables", "(", "variables", ")", "# Summarize non-trainable variables as well", "summarize_variables", "(", "non_trainable_variables", ",", "tag", "=", "\"Non-trainable variables\"", ")", "diet_vars", "=", "[", "v", "for", "v", "in", "tf", ".", "global_variables", "(", ")", "if", "v", ".", "dtype", "==", "dtypes", ".", "float16_ref", "]", "log_variable_sizes", "(", "diet_vars", ",", "\"Diet Variables\"", ",", "verbose", "=", "hparams", ".", "summarize_vars", ")", "opt", "=", "ConditionalOptimizer", "(", "hparams", ".", "optimizer", ",", "learning_rate", ",", "hparams", ",", "use_tpu", ")", "if", "use_tpu", ":", "opt", "=", "tf", ".", "contrib", ".", "tpu", ".", "CrossShardOptimizer", "(", "opt", ")", "opt_summaries", "=", "[", "]", "if", "common_layers", ".", "should_generate_summaries", "(", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "\"learning_rate\"", ",", "learning_rate", ")", "opt_summaries", ".", "append", "(", "\"loss\"", ")", "if", "hparams", ".", "summarize_grads", ":", "tf", ".", "logging", ".", "info", "(", "\"Summarizing gradients\"", ")", "opt_summaries", ".", "extend", "(", "[", "\"gradients\"", ",", "\"gradient_norm\"", ",", "\"global_gradient_norm\"", "]", ")", "if", "hparams", ".", "clip_grad_norm", ":", "tf", ".", "logging", ".", "info", "(", "\"Clipping gradients, norm: %0.5f\"", ",", "hparams", ".", "clip_grad_norm", ")", "if", "hparams", ".", "grad_noise_scale", ":", "tf", ".", "logging", ".", "info", "(", "\"Adding noise to gradients, noise scale: %0.5f\"", ",", "hparams", ".", "grad_noise_scale", ")", "train_op", "=", "tf", ".", "contrib", ".", "layers", ".", "optimize_loss", "(", "name", "=", "\"training\"", ",", "loss", "=", "loss", ",", "global_step", "=", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", ",", "learning_rate", "=", "learning_rate", ",", "clip_gradients", "=", "hparams", ".", "clip_grad_norm", "or", "None", ",", "gradient_noise_scale", "=", "hparams", ".", "grad_noise_scale", "or", "None", ",", "optimizer", "=", "opt", ",", "summaries", "=", "opt_summaries", ",", "colocate_gradients_with_ops", "=", "True", ",", "variables", "=", "variables", ")", "return", "train_op" ]
Minimize loss.
[ "Minimize", "loss", "." ]
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/optimize.py#L43-L94
0.011184
saltstack/salt
salt/modules/smf_service.py
status
def status(name, sig=None): ''' Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: cmd = '/usr/bin/svcs -H -o STATE {0}'.format(service) line = __salt__['cmd.run'](cmd, python_shell=False) results[service] = line == 'online' if contains_globbing: return results return results[name]
python
def status(name, sig=None): ''' Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: cmd = '/usr/bin/svcs -H -o STATE {0}'.format(service) line = __salt__['cmd.run'](cmd, python_shell=False) results[service] = line == 'online' if contains_globbing: return results return results[name]
[ "def", "status", "(", "name", ",", "sig", "=", "None", ")", ":", "contains_globbing", "=", "bool", "(", "re", ".", "search", "(", "r'\\*|\\?|\\[.+\\]'", ",", "name", ")", ")", "if", "contains_globbing", ":", "services", "=", "fnmatch", ".", "filter", "(", "get_all", "(", ")", ",", "name", ")", "else", ":", "services", "=", "[", "name", "]", "results", "=", "{", "}", "for", "service", "in", "services", ":", "cmd", "=", "'/usr/bin/svcs -H -o STATE {0}'", ".", "format", "(", "service", ")", "line", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "results", "[", "service", "]", "=", "line", "==", "'online'", "if", "contains_globbing", ":", "return", "results", "return", "results", "[", "name", "]" ]
Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name>
[ "Return", "the", "status", "for", "a", "service", ".", "If", "the", "name", "contains", "globbing", "a", "dict", "mapping", "service", "name", "to", "True", "/", "False", "values", "is", "returned", "." ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smf_service.py#L229-L264
0.000946
PaulHancock/Aegean
AegeanTools/regions.py
Region.vec2sky
def vec2sky(cls, vec, degrees=False): """ Convert [x,y,z] vectors into sky coordinates ra,dec Parameters ---------- vec : numpy.array Unit vectors as an array of (x,y,z) degrees Returns ------- sky : numpy.array Sky coordinates as an array of (ra,dec) See Also -------- :func:`AegeanTools.regions.Region.sky2vec` """ theta, phi = hp.vec2ang(vec) ra = phi dec = np.pi/2-theta if degrees: ra = np.degrees(ra) dec = np.degrees(dec) return cls.radec2sky(ra, dec)
python
def vec2sky(cls, vec, degrees=False): """ Convert [x,y,z] vectors into sky coordinates ra,dec Parameters ---------- vec : numpy.array Unit vectors as an array of (x,y,z) degrees Returns ------- sky : numpy.array Sky coordinates as an array of (ra,dec) See Also -------- :func:`AegeanTools.regions.Region.sky2vec` """ theta, phi = hp.vec2ang(vec) ra = phi dec = np.pi/2-theta if degrees: ra = np.degrees(ra) dec = np.degrees(dec) return cls.radec2sky(ra, dec)
[ "def", "vec2sky", "(", "cls", ",", "vec", ",", "degrees", "=", "False", ")", ":", "theta", ",", "phi", "=", "hp", ".", "vec2ang", "(", "vec", ")", "ra", "=", "phi", "dec", "=", "np", ".", "pi", "/", "2", "-", "theta", "if", "degrees", ":", "ra", "=", "np", ".", "degrees", "(", "ra", ")", "dec", "=", "np", ".", "degrees", "(", "dec", ")", "return", "cls", ".", "radec2sky", "(", "ra", ",", "dec", ")" ]
Convert [x,y,z] vectors into sky coordinates ra,dec Parameters ---------- vec : numpy.array Unit vectors as an array of (x,y,z) degrees Returns ------- sky : numpy.array Sky coordinates as an array of (ra,dec) See Also -------- :func:`AegeanTools.regions.Region.sky2vec`
[ "Convert", "[", "x", "y", "z", "]", "vectors", "into", "sky", "coordinates", "ra", "dec" ]
train
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/regions.py#L496-L523
0.003053
maxalbert/tohu
tohu/v2/custom_generator.py
add_new_init_method
def add_new_init_method(obj): """ Replace the existing obj.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do.. """ orig_init = obj.__init__ def new_init(self, *args, **kwargs): logger.debug(f"Initialising new {self}") # Call original __init__ function to ensure we pick up # any tohu generators that are defined there. orig_init(self, *args, **kwargs) # # Find field generator templates and attach spawned copies # field_gens_templates = find_field_generators(self) logger.debug(f'Found {len(field_gens_templates)} field generator template(s):') debug_print_dict(field_gens_templates) def find_orig_parent(dep_gen, origs): """ Find name and instance of the parent of the dependent generator `dep_gen` amongst the generators in `origs`. """ for parent_name, parent in origs.items(): if dep_gen.parent is parent: return parent_name, parent raise RuntimeError(f"Parent of dependent generator {dep_gen} not defined in the same custom generator") logger.debug('Spawning field generator templates...') origs = {} spawned = {} for name, gen in field_gens_templates.items(): if isinstance(gen, IndependentGenerator) and gen in origs.values(): logger.debug(f'Cloning generator {name}={gen} because it is an alias for an existing generator') gen = gen.clone() if isinstance(gen, IndependentGenerator): origs[name] = gen spawned[name] = gen._spawn() logger.debug(f'Spawning generator {gen}. New spawn: {spawned[name]}') elif isinstance(gen, DependentGenerator): orig_parent_name, orig_parent = find_orig_parent(gen, origs) new_parent = spawned[orig_parent_name] #spawned[name] = new_parent.clone() spawned[name] = gen._spawn_and_reattach_parent(new_parent) else: pass self.field_gens = spawned self.__dict__.update(self.field_gens) logger.debug(f'Field generators attached to custom generator instance:') debug_print_dict(self.field_gens) # # Add seed generator # self.seed_generator = SeedGenerator() # # Create class for the items produced by this generator # self.__class__.item_cls = make_item_class_for_custom_generator(self) obj.__init__ = new_init
python
def add_new_init_method(obj): """ Replace the existing obj.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do.. """ orig_init = obj.__init__ def new_init(self, *args, **kwargs): logger.debug(f"Initialising new {self}") # Call original __init__ function to ensure we pick up # any tohu generators that are defined there. orig_init(self, *args, **kwargs) # # Find field generator templates and attach spawned copies # field_gens_templates = find_field_generators(self) logger.debug(f'Found {len(field_gens_templates)} field generator template(s):') debug_print_dict(field_gens_templates) def find_orig_parent(dep_gen, origs): """ Find name and instance of the parent of the dependent generator `dep_gen` amongst the generators in `origs`. """ for parent_name, parent in origs.items(): if dep_gen.parent is parent: return parent_name, parent raise RuntimeError(f"Parent of dependent generator {dep_gen} not defined in the same custom generator") logger.debug('Spawning field generator templates...') origs = {} spawned = {} for name, gen in field_gens_templates.items(): if isinstance(gen, IndependentGenerator) and gen in origs.values(): logger.debug(f'Cloning generator {name}={gen} because it is an alias for an existing generator') gen = gen.clone() if isinstance(gen, IndependentGenerator): origs[name] = gen spawned[name] = gen._spawn() logger.debug(f'Spawning generator {gen}. New spawn: {spawned[name]}') elif isinstance(gen, DependentGenerator): orig_parent_name, orig_parent = find_orig_parent(gen, origs) new_parent = spawned[orig_parent_name] #spawned[name] = new_parent.clone() spawned[name] = gen._spawn_and_reattach_parent(new_parent) else: pass self.field_gens = spawned self.__dict__.update(self.field_gens) logger.debug(f'Field generators attached to custom generator instance:') debug_print_dict(self.field_gens) # # Add seed generator # self.seed_generator = SeedGenerator() # # Create class for the items produced by this generator # self.__class__.item_cls = make_item_class_for_custom_generator(self) obj.__init__ = new_init
[ "def", "add_new_init_method", "(", "obj", ")", ":", "orig_init", "=", "obj", ".", "__init__", "def", "new_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "f\"Initialising new {self}\"", ")", "# Call original __init__ function to ensure we pick up", "# any tohu generators that are defined there.", "orig_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "#", "# Find field generator templates and attach spawned copies", "#", "field_gens_templates", "=", "find_field_generators", "(", "self", ")", "logger", ".", "debug", "(", "f'Found {len(field_gens_templates)} field generator template(s):'", ")", "debug_print_dict", "(", "field_gens_templates", ")", "def", "find_orig_parent", "(", "dep_gen", ",", "origs", ")", ":", "\"\"\"\n Find name and instance of the parent of the dependent\n generator `dep_gen` amongst the generators in `origs`.\n \"\"\"", "for", "parent_name", ",", "parent", "in", "origs", ".", "items", "(", ")", ":", "if", "dep_gen", ".", "parent", "is", "parent", ":", "return", "parent_name", ",", "parent", "raise", "RuntimeError", "(", "f\"Parent of dependent generator {dep_gen} not defined in the same custom generator\"", ")", "logger", ".", "debug", "(", "'Spawning field generator templates...'", ")", "origs", "=", "{", "}", "spawned", "=", "{", "}", "for", "name", ",", "gen", "in", "field_gens_templates", ".", "items", "(", ")", ":", "if", "isinstance", "(", "gen", ",", "IndependentGenerator", ")", "and", "gen", "in", "origs", ".", "values", "(", ")", ":", "logger", ".", "debug", "(", "f'Cloning generator {name}={gen} because it is an alias for an existing generator'", ")", "gen", "=", "gen", ".", "clone", "(", ")", "if", "isinstance", "(", "gen", ",", "IndependentGenerator", ")", ":", "origs", "[", "name", "]", "=", "gen", "spawned", "[", "name", "]", "=", "gen", ".", "_spawn", "(", ")", "logger", ".", "debug", "(", "f'Spawning generator {gen}. New spawn: {spawned[name]}'", ")", "elif", "isinstance", "(", "gen", ",", "DependentGenerator", ")", ":", "orig_parent_name", ",", "orig_parent", "=", "find_orig_parent", "(", "gen", ",", "origs", ")", "new_parent", "=", "spawned", "[", "orig_parent_name", "]", "#spawned[name] = new_parent.clone()", "spawned", "[", "name", "]", "=", "gen", ".", "_spawn_and_reattach_parent", "(", "new_parent", ")", "else", ":", "pass", "self", ".", "field_gens", "=", "spawned", "self", ".", "__dict__", ".", "update", "(", "self", ".", "field_gens", ")", "logger", ".", "debug", "(", "f'Field generators attached to custom generator instance:'", ")", "debug_print_dict", "(", "self", ".", "field_gens", ")", "#", "# Add seed generator", "#", "self", ".", "seed_generator", "=", "SeedGenerator", "(", ")", "#", "# Create class for the items produced by this generator", "#", "self", ".", "__class__", ".", "item_cls", "=", "make_item_class_for_custom_generator", "(", "self", ")", "obj", ".", "__init__", "=", "new_init" ]
Replace the existing obj.__init__() method with a new one which calls the original one and in addition performs the following actions: (1) Finds all instances of tohu.BaseGenerator in the namespace and collects them in the dictionary `self.field_gens`. (2) ..to do..
[ "Replace", "the", "existing", "obj", ".", "__init__", "()", "method", "with", "a", "new", "one", "which", "calls", "the", "original", "one", "and", "in", "addition", "performs", "the", "following", "actions", ":" ]
train
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator.py#L118-L192
0.00283
gwastro/pycbc
pycbc/detector.py
Detector.optimal_orientation
def optimal_orientation(self, t_gps): """Return the optimal orientation in right ascension and declination for a given GPS time. Parameters ---------- t_gps: float Time in gps seconds Returns ------- ra: float Right ascension that is optimally oriented for the detector dec: float Declination that is optimally oriented for the detector """ ra = self.longitude + (self.gmst_estimate(t_gps) % (2.0*np.pi)) dec = self.latitude return ra, dec
python
def optimal_orientation(self, t_gps): """Return the optimal orientation in right ascension and declination for a given GPS time. Parameters ---------- t_gps: float Time in gps seconds Returns ------- ra: float Right ascension that is optimally oriented for the detector dec: float Declination that is optimally oriented for the detector """ ra = self.longitude + (self.gmst_estimate(t_gps) % (2.0*np.pi)) dec = self.latitude return ra, dec
[ "def", "optimal_orientation", "(", "self", ",", "t_gps", ")", ":", "ra", "=", "self", ".", "longitude", "+", "(", "self", ".", "gmst_estimate", "(", "t_gps", ")", "%", "(", "2.0", "*", "np", ".", "pi", ")", ")", "dec", "=", "self", ".", "latitude", "return", "ra", ",", "dec" ]
Return the optimal orientation in right ascension and declination for a given GPS time. Parameters ---------- t_gps: float Time in gps seconds Returns ------- ra: float Right ascension that is optimally oriented for the detector dec: float Declination that is optimally oriented for the detector
[ "Return", "the", "optimal", "orientation", "in", "right", "ascension", "and", "declination", "for", "a", "given", "GPS", "time", "." ]
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/detector.py#L258-L276
0.003425
riggsd/davies
davies/compass/__init__.py
Project.set_base_location
def set_base_location(self, location): """Configure the project's base location""" self.base_location = location self._utm_zone = location.zone self._utm_datum = location.datum self._utm_convergence = location.convergence
python
def set_base_location(self, location): """Configure the project's base location""" self.base_location = location self._utm_zone = location.zone self._utm_datum = location.datum self._utm_convergence = location.convergence
[ "def", "set_base_location", "(", "self", ",", "location", ")", ":", "self", ".", "base_location", "=", "location", "self", ".", "_utm_zone", "=", "location", ".", "zone", "self", ".", "_utm_datum", "=", "location", ".", "datum", "self", ".", "_utm_convergence", "=", "location", ".", "convergence" ]
Configure the project's base location
[ "Configure", "the", "project", "s", "base", "location" ]
train
https://github.com/riggsd/davies/blob/8566c626202a875947ad01c087300108c68d80b5/davies/compass/__init__.py#L404-L409
0.007663
cihai/cihai
cihai/conversion.py
parse_var
def parse_var(var): """ Returns a tuple consisting of a string and a tag, or None, if none is specified. """ bits = var.split("<", 1) if len(bits) < 2: tag = None else: tag = bits[1] return ucn_to_unicode(bits[0]), tag
python
def parse_var(var): """ Returns a tuple consisting of a string and a tag, or None, if none is specified. """ bits = var.split("<", 1) if len(bits) < 2: tag = None else: tag = bits[1] return ucn_to_unicode(bits[0]), tag
[ "def", "parse_var", "(", "var", ")", ":", "bits", "=", "var", ".", "split", "(", "\"<\"", ",", "1", ")", "if", "len", "(", "bits", ")", "<", "2", ":", "tag", "=", "None", "else", ":", "tag", "=", "bits", "[", "1", "]", "return", "ucn_to_unicode", "(", "bits", "[", "0", "]", ")", ",", "tag" ]
Returns a tuple consisting of a string and a tag, or None, if none is specified.
[ "Returns", "a", "tuple", "consisting", "of", "a", "string", "and", "a", "tag", "or", "None", "if", "none", "is", "specified", "." ]
train
https://github.com/cihai/cihai/blob/43b0c2931da18c1ef1ff1cdd71e4b1c5eca24a41/cihai/conversion.py#L255-L265
0.003759
kalefranz/auxlib
auxlib/decorators.py
memoizemethod
def memoizemethod(method): """ Decorator to cause a method to cache it's results in self for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> class Foo (object): ... @memoizemethod ... def foo(self, x, y=0): ... print('running method with', x, y) ... return x + y + 3 ... >>> foo1 = Foo() >>> foo2 = Foo() >>> foo1.foo(10) running method with 10 0 13 >>> foo1.foo(10) 13 >>> foo2.foo(11, y=7) running method with 11 7 21 >>> foo2.foo(11) running method with 11 0 14 >>> foo2.foo(11, y=7) 21 >>> class Foo (object): ... def __init__(self, lower): ... self.lower = lower ... @memoizemethod ... def range_tuple(self, upper): ... print('running function') ... return tuple(i for i in range(self.lower, upper)) ... @memoizemethod ... def range_iter(self, upper): ... print('running function') ... return (i for i in range(self.lower, upper)) ... >>> foo = Foo(3) >>> foo.range_tuple(6) running function (3, 4, 5) >>> foo.range_tuple(7) running function (3, 4, 5, 6) >>> foo.range_tuple(6) (3, 4, 5) >>> foo.range_iter(6) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object! """ @wraps(method) def _wrapper(self, *args, **kwargs): # NOTE: a __dict__ check is performed here rather than using the # built-in hasattr function because hasattr will look up to an object's # class if the attr is not directly found in the object's dict. That's # bad for this if the class itself has a memoized classmethod for # example that has been called before the memoized instance method, # then the instance method will use the class's result cache, causing # its results to be globally stored rather than on a per instance # basis. if '_memoized_results' not in self.__dict__: self._memoized_results = {} memoized_results = self._memoized_results key = (method.__name__, args, tuple(sorted(kwargs.items()))) if key in memoized_results: return memoized_results[key] else: try: result = method(self, *args, **kwargs) except KeyError as e: if '__wrapped__' in str(e): result = None # is this the right thing to do? happened during py3 conversion else: raise if isinstance(result, GeneratorType) or not isinstance(result, Hashable): raise TypeError("Can't memoize a generator or non-hashable object!") return memoized_results.setdefault(key, result) return _wrapper
python
def memoizemethod(method): """ Decorator to cause a method to cache it's results in self for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> class Foo (object): ... @memoizemethod ... def foo(self, x, y=0): ... print('running method with', x, y) ... return x + y + 3 ... >>> foo1 = Foo() >>> foo2 = Foo() >>> foo1.foo(10) running method with 10 0 13 >>> foo1.foo(10) 13 >>> foo2.foo(11, y=7) running method with 11 7 21 >>> foo2.foo(11) running method with 11 0 14 >>> foo2.foo(11, y=7) 21 >>> class Foo (object): ... def __init__(self, lower): ... self.lower = lower ... @memoizemethod ... def range_tuple(self, upper): ... print('running function') ... return tuple(i for i in range(self.lower, upper)) ... @memoizemethod ... def range_iter(self, upper): ... print('running function') ... return (i for i in range(self.lower, upper)) ... >>> foo = Foo(3) >>> foo.range_tuple(6) running function (3, 4, 5) >>> foo.range_tuple(7) running function (3, 4, 5, 6) >>> foo.range_tuple(6) (3, 4, 5) >>> foo.range_iter(6) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object! """ @wraps(method) def _wrapper(self, *args, **kwargs): # NOTE: a __dict__ check is performed here rather than using the # built-in hasattr function because hasattr will look up to an object's # class if the attr is not directly found in the object's dict. That's # bad for this if the class itself has a memoized classmethod for # example that has been called before the memoized instance method, # then the instance method will use the class's result cache, causing # its results to be globally stored rather than on a per instance # basis. if '_memoized_results' not in self.__dict__: self._memoized_results = {} memoized_results = self._memoized_results key = (method.__name__, args, tuple(sorted(kwargs.items()))) if key in memoized_results: return memoized_results[key] else: try: result = method(self, *args, **kwargs) except KeyError as e: if '__wrapped__' in str(e): result = None # is this the right thing to do? happened during py3 conversion else: raise if isinstance(result, GeneratorType) or not isinstance(result, Hashable): raise TypeError("Can't memoize a generator or non-hashable object!") return memoized_results.setdefault(key, result) return _wrapper
[ "def", "memoizemethod", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# NOTE: a __dict__ check is performed here rather than using the", "# built-in hasattr function because hasattr will look up to an object's", "# class if the attr is not directly found in the object's dict. That's", "# bad for this if the class itself has a memoized classmethod for", "# example that has been called before the memoized instance method,", "# then the instance method will use the class's result cache, causing", "# its results to be globally stored rather than on a per instance", "# basis.", "if", "'_memoized_results'", "not", "in", "self", ".", "__dict__", ":", "self", ".", "_memoized_results", "=", "{", "}", "memoized_results", "=", "self", ".", "_memoized_results", "key", "=", "(", "method", ".", "__name__", ",", "args", ",", "tuple", "(", "sorted", "(", "kwargs", ".", "items", "(", ")", ")", ")", ")", "if", "key", "in", "memoized_results", ":", "return", "memoized_results", "[", "key", "]", "else", ":", "try", ":", "result", "=", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "KeyError", "as", "e", ":", "if", "'__wrapped__'", "in", "str", "(", "e", ")", ":", "result", "=", "None", "# is this the right thing to do? happened during py3 conversion", "else", ":", "raise", "if", "isinstance", "(", "result", ",", "GeneratorType", ")", "or", "not", "isinstance", "(", "result", ",", "Hashable", ")", ":", "raise", "TypeError", "(", "\"Can't memoize a generator or non-hashable object!\"", ")", "return", "memoized_results", ".", "setdefault", "(", "key", ",", "result", ")", "return", "_wrapper" ]
Decorator to cause a method to cache it's results in self for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable. >>> class Foo (object): ... @memoizemethod ... def foo(self, x, y=0): ... print('running method with', x, y) ... return x + y + 3 ... >>> foo1 = Foo() >>> foo2 = Foo() >>> foo1.foo(10) running method with 10 0 13 >>> foo1.foo(10) 13 >>> foo2.foo(11, y=7) running method with 11 7 21 >>> foo2.foo(11) running method with 11 0 14 >>> foo2.foo(11, y=7) 21 >>> class Foo (object): ... def __init__(self, lower): ... self.lower = lower ... @memoizemethod ... def range_tuple(self, upper): ... print('running function') ... return tuple(i for i in range(self.lower, upper)) ... @memoizemethod ... def range_iter(self, upper): ... print('running function') ... return (i for i in range(self.lower, upper)) ... >>> foo = Foo(3) >>> foo.range_tuple(6) running function (3, 4, 5) >>> foo.range_tuple(7) running function (3, 4, 5, 6) >>> foo.range_tuple(6) (3, 4, 5) >>> foo.range_iter(6) Traceback (most recent call last): TypeError: Can't memoize a generator or non-hashable object!
[ "Decorator", "to", "cause", "a", "method", "to", "cache", "it", "s", "results", "in", "self", "for", "each", "combination", "of", "inputs", "and", "return", "the", "cached", "result", "on", "subsequent", "calls", ".", "Does", "not", "support", "named", "arguments", "or", "arg", "values", "that", "are", "not", "hashable", "." ]
train
https://github.com/kalefranz/auxlib/blob/6ff2d6b57d128d0b9ed8f01ad83572e938da064f/auxlib/decorators.py#L65-L147
0.001366
klmitch/requiem
requiem/client.py
RESTClient._make_req
def _make_req(self, method, url, methname, headers=None): """Create a request object for the specified method and url.""" # Build up headers hset = hdrs.HeaderDict() # Walk through our global headers for hdr, value in self._headers.items(): # If it's a callable, call it if callable(value): value = value(methname) else: # OK, just stringify it value = str(value) # If it's meaningful, attach it if value: hset[hdr] = value # Were headers passed in? if headers is not None: # Update from specified headers hset.update(headers) # Hook method to instantiate requests self._debug("Creating request %s.%s(%r, %r, headers=%r)", self._req_class.__module__, self._req_class.__name__, method, url, hset) return self._req_class(method, url, self._client, self._procstack, headers=hset, debug=self._debug)
python
def _make_req(self, method, url, methname, headers=None): """Create a request object for the specified method and url.""" # Build up headers hset = hdrs.HeaderDict() # Walk through our global headers for hdr, value in self._headers.items(): # If it's a callable, call it if callable(value): value = value(methname) else: # OK, just stringify it value = str(value) # If it's meaningful, attach it if value: hset[hdr] = value # Were headers passed in? if headers is not None: # Update from specified headers hset.update(headers) # Hook method to instantiate requests self._debug("Creating request %s.%s(%r, %r, headers=%r)", self._req_class.__module__, self._req_class.__name__, method, url, hset) return self._req_class(method, url, self._client, self._procstack, headers=hset, debug=self._debug)
[ "def", "_make_req", "(", "self", ",", "method", ",", "url", ",", "methname", ",", "headers", "=", "None", ")", ":", "# Build up headers", "hset", "=", "hdrs", ".", "HeaderDict", "(", ")", "# Walk through our global headers", "for", "hdr", ",", "value", "in", "self", ".", "_headers", ".", "items", "(", ")", ":", "# If it's a callable, call it", "if", "callable", "(", "value", ")", ":", "value", "=", "value", "(", "methname", ")", "else", ":", "# OK, just stringify it", "value", "=", "str", "(", "value", ")", "# If it's meaningful, attach it", "if", "value", ":", "hset", "[", "hdr", "]", "=", "value", "# Were headers passed in?", "if", "headers", "is", "not", "None", ":", "# Update from specified headers", "hset", ".", "update", "(", "headers", ")", "# Hook method to instantiate requests", "self", ".", "_debug", "(", "\"Creating request %s.%s(%r, %r, headers=%r)\"", ",", "self", ".", "_req_class", ".", "__module__", ",", "self", ".", "_req_class", ".", "__name__", ",", "method", ",", "url", ",", "hset", ")", "return", "self", ".", "_req_class", "(", "method", ",", "url", ",", "self", ".", "_client", ",", "self", ".", "_procstack", ",", "headers", "=", "hset", ",", "debug", "=", "self", ".", "_debug", ")" ]
Create a request object for the specified method and url.
[ "Create", "a", "request", "object", "for", "the", "specified", "method", "and", "url", "." ]
train
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/client.py#L110-L139
0.00182
pycontribs/jira
jira/client.py
JIRA.create_component
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None, isAssigneeTypeValid=False, ): """Create a component inside a project and return a Resource for it. :param name: name of the component :type name: str :param project: key of the project to create the component in :type project: str :param description: a description of the component :type description: str :param leadUserName: the username of the user responsible for this component :type leadUserName: Optional[str] :param assigneeType: see the ComponentBean.AssigneeType class for valid values :type assigneeType: Optional[str] :param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable (Default: False) :type isAssigneeTypeValid: bool :rtype: Component """ data = { 'name': name, 'project': project, 'isAssigneeTypeValid': isAssigneeTypeValid} if description is not None: data['description'] = description if leadUserName is not None: data['leadUserName'] = leadUserName if assigneeType is not None: data['assigneeType'] = assigneeType url = self._get_url('component') r = self._session.post( url, data=json.dumps(data)) component = Component(self._options, self._session, raw=json_loads(r)) return component
python
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None, isAssigneeTypeValid=False, ): """Create a component inside a project and return a Resource for it. :param name: name of the component :type name: str :param project: key of the project to create the component in :type project: str :param description: a description of the component :type description: str :param leadUserName: the username of the user responsible for this component :type leadUserName: Optional[str] :param assigneeType: see the ComponentBean.AssigneeType class for valid values :type assigneeType: Optional[str] :param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable (Default: False) :type isAssigneeTypeValid: bool :rtype: Component """ data = { 'name': name, 'project': project, 'isAssigneeTypeValid': isAssigneeTypeValid} if description is not None: data['description'] = description if leadUserName is not None: data['leadUserName'] = leadUserName if assigneeType is not None: data['assigneeType'] = assigneeType url = self._get_url('component') r = self._session.post( url, data=json.dumps(data)) component = Component(self._options, self._session, raw=json_loads(r)) return component
[ "def", "create_component", "(", "self", ",", "name", ",", "project", ",", "description", "=", "None", ",", "leadUserName", "=", "None", ",", "assigneeType", "=", "None", ",", "isAssigneeTypeValid", "=", "False", ",", ")", ":", "data", "=", "{", "'name'", ":", "name", ",", "'project'", ":", "project", ",", "'isAssigneeTypeValid'", ":", "isAssigneeTypeValid", "}", "if", "description", "is", "not", "None", ":", "data", "[", "'description'", "]", "=", "description", "if", "leadUserName", "is", "not", "None", ":", "data", "[", "'leadUserName'", "]", "=", "leadUserName", "if", "assigneeType", "is", "not", "None", ":", "data", "[", "'assigneeType'", "]", "=", "assigneeType", "url", "=", "self", ".", "_get_url", "(", "'component'", ")", "r", "=", "self", ".", "_session", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "component", "=", "Component", "(", "self", ".", "_options", ",", "self", ".", "_session", ",", "raw", "=", "json_loads", "(", "r", ")", ")", "return", "component" ]
Create a component inside a project and return a Resource for it. :param name: name of the component :type name: str :param project: key of the project to create the component in :type project: str :param description: a description of the component :type description: str :param leadUserName: the username of the user responsible for this component :type leadUserName: Optional[str] :param assigneeType: see the ComponentBean.AssigneeType class for valid values :type assigneeType: Optional[str] :param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable (Default: False) :type isAssigneeTypeValid: bool :rtype: Component
[ "Create", "a", "component", "inside", "a", "project", "and", "return", "a", "Resource", "for", "it", "." ]
train
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L917-L957
0.007134
networks-lab/metaknowledge
metaknowledge/mkCollection.py
CollectionWithIDs.dropBadEntries
def dropBadEntries(self): """Removes all the bad entries from the collection """ self._collection = set((i for i in self if not i.bad)) self.bad = False self.errors = {}
python
def dropBadEntries(self): """Removes all the bad entries from the collection """ self._collection = set((i for i in self if not i.bad)) self.bad = False self.errors = {}
[ "def", "dropBadEntries", "(", "self", ")", ":", "self", ".", "_collection", "=", "set", "(", "(", "i", "for", "i", "in", "self", "if", "not", "i", ".", "bad", ")", ")", "self", ".", "bad", "=", "False", "self", ".", "errors", "=", "{", "}" ]
Removes all the bad entries from the collection
[ "Removes", "all", "the", "bad", "entries", "from", "the", "collection" ]
train
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L504-L509
0.009569
DataDog/integrations-core
process/datadog_checks/process/process.py
ProcessCheck._filter_by_user
def _filter_by_user(self, user, pids): """ Filter pids by it's username. :param user: string with name of system user :param pids: set of pids to filter :return: set of filtered pids """ filtered_pids = set() for pid in pids: try: proc = psutil.Process(pid) if proc.username() == user: self.log.debug("Collecting pid {} belonging to {}".format(pid, user)) filtered_pids.add(pid) else: self.log.debug("Discarding pid {} not belonging to {}".format(pid, user)) except psutil.NoSuchProcess: pass return filtered_pids
python
def _filter_by_user(self, user, pids): """ Filter pids by it's username. :param user: string with name of system user :param pids: set of pids to filter :return: set of filtered pids """ filtered_pids = set() for pid in pids: try: proc = psutil.Process(pid) if proc.username() == user: self.log.debug("Collecting pid {} belonging to {}".format(pid, user)) filtered_pids.add(pid) else: self.log.debug("Discarding pid {} not belonging to {}".format(pid, user)) except psutil.NoSuchProcess: pass return filtered_pids
[ "def", "_filter_by_user", "(", "self", ",", "user", ",", "pids", ")", ":", "filtered_pids", "=", "set", "(", ")", "for", "pid", "in", "pids", ":", "try", ":", "proc", "=", "psutil", ".", "Process", "(", "pid", ")", "if", "proc", ".", "username", "(", ")", "==", "user", ":", "self", ".", "log", ".", "debug", "(", "\"Collecting pid {} belonging to {}\"", ".", "format", "(", "pid", ",", "user", ")", ")", "filtered_pids", ".", "add", "(", "pid", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\"Discarding pid {} not belonging to {}\"", ".", "format", "(", "pid", ",", "user", ")", ")", "except", "psutil", ".", "NoSuchProcess", ":", "pass", "return", "filtered_pids" ]
Filter pids by it's username. :param user: string with name of system user :param pids: set of pids to filter :return: set of filtered pids
[ "Filter", "pids", "by", "it", "s", "username", ".", ":", "param", "user", ":", "string", "with", "name", "of", "system", "user", ":", "param", "pids", ":", "set", "of", "pids", "to", "filter", ":", "return", ":", "set", "of", "filtered", "pids" ]
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/process/datadog_checks/process/process.py#L472-L491
0.00545
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/lib/ANUGA/geo_reference.py
Geo_reference.change_points_geo_ref
def change_points_geo_ref(self, points, points_geo_ref=None): """Change the geo reference of a list or numeric array of points to be this reference.(The reference used for this object) If the points do not have a geo ref, assume 'absolute' values """ import copy # remember if we got a list is_list = isinstance(points, list) points = ensure_numeric(points, num.float) # sanity checks if len(points.shape) == 1: #One point has been passed msg = 'Single point must have two elements' assert len(points) == 2, msg points = num.reshape(points, (1,2)) msg = 'Points array must be two dimensional.\n' msg += 'I got %d dimensions' %len(points.shape) assert len(points.shape) == 2, msg msg = 'Input must be an N x 2 array or list of (x,y) values. ' msg += 'I got an %d x %d array' %points.shape assert points.shape[1] == 2, msg # FIXME (Ole): Could also check if zone, xllcorner, yllcorner # are identical in the two geo refs. if points_geo_ref is not self: # If georeferences are different points = copy.copy(points) # Don't destroy input if not points_geo_ref is None: # Convert points to absolute coordinates points[:,0] += points_geo_ref.xllcorner points[:,1] += points_geo_ref.yllcorner # Make points relative to primary geo reference points[:,0] -= self.xllcorner points[:,1] -= self.yllcorner if is_list: points = points.tolist() return points
python
def change_points_geo_ref(self, points, points_geo_ref=None): """Change the geo reference of a list or numeric array of points to be this reference.(The reference used for this object) If the points do not have a geo ref, assume 'absolute' values """ import copy # remember if we got a list is_list = isinstance(points, list) points = ensure_numeric(points, num.float) # sanity checks if len(points.shape) == 1: #One point has been passed msg = 'Single point must have two elements' assert len(points) == 2, msg points = num.reshape(points, (1,2)) msg = 'Points array must be two dimensional.\n' msg += 'I got %d dimensions' %len(points.shape) assert len(points.shape) == 2, msg msg = 'Input must be an N x 2 array or list of (x,y) values. ' msg += 'I got an %d x %d array' %points.shape assert points.shape[1] == 2, msg # FIXME (Ole): Could also check if zone, xllcorner, yllcorner # are identical in the two geo refs. if points_geo_ref is not self: # If georeferences are different points = copy.copy(points) # Don't destroy input if not points_geo_ref is None: # Convert points to absolute coordinates points[:,0] += points_geo_ref.xllcorner points[:,1] += points_geo_ref.yllcorner # Make points relative to primary geo reference points[:,0] -= self.xllcorner points[:,1] -= self.yllcorner if is_list: points = points.tolist() return points
[ "def", "change_points_geo_ref", "(", "self", ",", "points", ",", "points_geo_ref", "=", "None", ")", ":", "import", "copy", "# remember if we got a list", "is_list", "=", "isinstance", "(", "points", ",", "list", ")", "points", "=", "ensure_numeric", "(", "points", ",", "num", ".", "float", ")", "# sanity checks", "if", "len", "(", "points", ".", "shape", ")", "==", "1", ":", "#One point has been passed", "msg", "=", "'Single point must have two elements'", "assert", "len", "(", "points", ")", "==", "2", ",", "msg", "points", "=", "num", ".", "reshape", "(", "points", ",", "(", "1", ",", "2", ")", ")", "msg", "=", "'Points array must be two dimensional.\\n'", "msg", "+=", "'I got %d dimensions'", "%", "len", "(", "points", ".", "shape", ")", "assert", "len", "(", "points", ".", "shape", ")", "==", "2", ",", "msg", "msg", "=", "'Input must be an N x 2 array or list of (x,y) values. '", "msg", "+=", "'I got an %d x %d array'", "%", "points", ".", "shape", "assert", "points", ".", "shape", "[", "1", "]", "==", "2", ",", "msg", "# FIXME (Ole): Could also check if zone, xllcorner, yllcorner", "# are identical in the two geo refs.", "if", "points_geo_ref", "is", "not", "self", ":", "# If georeferences are different", "points", "=", "copy", ".", "copy", "(", "points", ")", "# Don't destroy input", "if", "not", "points_geo_ref", "is", "None", ":", "# Convert points to absolute coordinates", "points", "[", ":", ",", "0", "]", "+=", "points_geo_ref", ".", "xllcorner", "points", "[", ":", ",", "1", "]", "+=", "points_geo_ref", ".", "yllcorner", "# Make points relative to primary geo reference", "points", "[", ":", ",", "0", "]", "-=", "self", ".", "xllcorner", "points", "[", ":", ",", "1", "]", "-=", "self", ".", "yllcorner", "if", "is_list", ":", "points", "=", "points", ".", "tolist", "(", ")", "return", "points" ]
Change the geo reference of a list or numeric array of points to be this reference.(The reference used for this object) If the points do not have a geo ref, assume 'absolute' values
[ "Change", "the", "geo", "reference", "of", "a", "list", "or", "numeric", "array", "of", "points", "to", "be", "this", "reference", ".", "(", "The", "reference", "used", "for", "this", "object", ")", "If", "the", "points", "do", "not", "have", "a", "geo", "ref", "assume", "absolute", "values" ]
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/lib/ANUGA/geo_reference.py#L229-L273
0.007059
pybel/pybel
src/pybel/io/nodelink.py
to_jsons
def to_jsons(graph: BELGraph, **kwargs) -> str: """Dump this graph as a Node-Link JSON object to a string.""" graph_json_str = to_json(graph) return json.dumps(graph_json_str, ensure_ascii=False, **kwargs)
python
def to_jsons(graph: BELGraph, **kwargs) -> str: """Dump this graph as a Node-Link JSON object to a string.""" graph_json_str = to_json(graph) return json.dumps(graph_json_str, ensure_ascii=False, **kwargs)
[ "def", "to_jsons", "(", "graph", ":", "BELGraph", ",", "*", "*", "kwargs", ")", "->", "str", ":", "graph_json_str", "=", "to_json", "(", "graph", ")", "return", "json", ".", "dumps", "(", "graph_json_str", ",", "ensure_ascii", "=", "False", ",", "*", "*", "kwargs", ")" ]
Dump this graph as a Node-Link JSON object to a string.
[ "Dump", "this", "graph", "as", "a", "Node", "-", "Link", "JSON", "object", "to", "a", "string", "." ]
train
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/io/nodelink.py#L59-L62
0.004608
coinbase/coinbase-python
coinbase/wallet/client.py
Client.commit_sell
def commit_sell(self, account_id, sell_id, **params): """https://developers.coinbase.com/api/v2#commit-a-sell""" response = self._post( 'v2', 'accounts', account_id, 'sells', sell_id, 'commit', data=params) return self._make_api_object(response, Sell)
python
def commit_sell(self, account_id, sell_id, **params): """https://developers.coinbase.com/api/v2#commit-a-sell""" response = self._post( 'v2', 'accounts', account_id, 'sells', sell_id, 'commit', data=params) return self._make_api_object(response, Sell)
[ "def", "commit_sell", "(", "self", ",", "account_id", ",", "sell_id", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "_post", "(", "'v2'", ",", "'accounts'", ",", "account_id", ",", "'sells'", ",", "sell_id", ",", "'commit'", ",", "data", "=", "params", ")", "return", "self", ".", "_make_api_object", "(", "response", ",", "Sell", ")" ]
https://developers.coinbase.com/api/v2#commit-a-sell
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#commit", "-", "a", "-", "sell" ]
train
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L479-L483
0.010453
rkhleics/wagtailmodeladmin
wagtailmodeladmin/options.py
ModelAdmin.edit_view
def edit_view(self, request, object_id): """ Instantiates a class-based view to provide 'edit' functionality for the assigned model, or redirect to Wagtail's edit view if the assigned model extends 'Page'. The view class used can be overridden by changing the 'edit_view_class' attribute. """ kwargs = {'model_admin': self, 'object_id': object_id} view_class = self.edit_view_class return view_class.as_view(**kwargs)(request)
python
def edit_view(self, request, object_id): """ Instantiates a class-based view to provide 'edit' functionality for the assigned model, or redirect to Wagtail's edit view if the assigned model extends 'Page'. The view class used can be overridden by changing the 'edit_view_class' attribute. """ kwargs = {'model_admin': self, 'object_id': object_id} view_class = self.edit_view_class return view_class.as_view(**kwargs)(request)
[ "def", "edit_view", "(", "self", ",", "request", ",", "object_id", ")", ":", "kwargs", "=", "{", "'model_admin'", ":", "self", ",", "'object_id'", ":", "object_id", "}", "view_class", "=", "self", ".", "edit_view_class", "return", "view_class", ".", "as_view", "(", "*", "*", "kwargs", ")", "(", "request", ")" ]
Instantiates a class-based view to provide 'edit' functionality for the assigned model, or redirect to Wagtail's edit view if the assigned model extends 'Page'. The view class used can be overridden by changing the 'edit_view_class' attribute.
[ "Instantiates", "a", "class", "-", "based", "view", "to", "provide", "edit", "functionality", "for", "the", "assigned", "model", "or", "redirect", "to", "Wagtail", "s", "edit", "view", "if", "the", "assigned", "model", "extends", "Page", ".", "The", "view", "class", "used", "can", "be", "overridden", "by", "changing", "the", "edit_view_class", "attribute", "." ]
train
https://github.com/rkhleics/wagtailmodeladmin/blob/7fddc853bab2ff3868b8c7a03329308c55f16358/wagtailmodeladmin/options.py#L354-L363
0.004008
angr/angr
angr/state_plugins/concrete.py
Concrete.sync
def sync(self): """ Handle the switch between the concrete execution and angr. This method takes care of: 1- Synchronize registers. 2- Set a concrete target to the memory backer so the memory reads are redirected in the concrete process memory. 3- If possible restore the SimProcedures with the real addresses inside the concrete process. 4- Set an inspect point to sync the segments register as soon as they are read during the symbolic execution. 5- Flush all the pages loaded until now. :return: """ def _sync_segments(state): """ Segment registers synchronization is on demand as soon as the symbolic execution access a segment register. """ concr_target = state.project.concrete_target if isinstance(state.arch, ArchAMD64): state.project.simos.initialize_segment_register_x64(state, concr_target) elif isinstance(state.arch, ArchX86): gdt = state.project.simos.initialize_gdt_x86(state, concr_target) state.concrete.whitelist.append((gdt.addr, gdt.addr + gdt.limit)) state.inspect.remove_breakpoint('reg_read', bp=state.concrete.fs_register_bp) state.concrete.segment_registers_initialized = True state.concrete.fs_register_bp = None l.debug("Sync the state with the concrete memory inside the Concrete plugin") target = self.state.project.concrete_target # Setting a concrete memory backend self.state.memory.mem._memory_backer.set_concrete_target(target) # Sync angr registers with the one getting from the concrete target # registers that we don't want to concretize. l.debug("Synchronizing general purpose registers") to_sync_register = list(filter(lambda x: x.concrete, self.state.arch.register_list)) for register in to_sync_register: # before let's sync all the subregisters of the current register. # sometimes this can be helpful ( i.e. ymmm0 e xmm0 ) if register.subregisters: subregisters_names = map(lambda x: x[0], register.subregisters) self._sync_registers(subregisters_names, target) # finally let's synchronize the whole register self._sync_registers([register.name], target) if self.synchronize_cle: self._sync_cle(target) # Synchronize the imported functions addresses (.got, IAT) in the # concrete process with ones used in the SimProcedures dictionary if self.state.project._should_use_sim_procedures and not self.state.project.loader.main_object.pic: l.debug("Restoring SimProc using concrete memory") for reloc in self.state.project.loader.main_object.relocs: if reloc.symbol: # consider only reloc with a symbol l.debug("Trying to re-hook SimProc %s", reloc.symbol.name) # l.debug("reloc.rebased_addr: %#x " % reloc.rebased_addr) func_address = target.read_memory(reloc.rebased_addr, self.state.project.arch.bits / 8) func_address = struct.unpack(self.state.project.arch.struct_fmt(), func_address)[0] l.debug("Function address hook is now: %#x ", func_address) self.state.project.rehook_symbol(func_address, reloc.symbol.name) if self.synchronize_cle and not self.state.project.loader.main_object.contains_addr(func_address): old_func_symbol = self.state.project.loader.find_symbol(reloc.symbol.name) if old_func_symbol: # if we actually have a symbol owner_obj = old_func_symbol.owner # calculating the new real address new_relative_address = func_address - owner_obj.mapped_base new_func_symbol = cle.backends.Symbol(owner_obj, old_func_symbol.name, new_relative_address, old_func_symbol.size, old_func_symbol.type) for new_reloc in self.state.project.loader.find_relevant_relocations(old_func_symbol.name): if new_reloc.symbol.name == new_func_symbol.name and \ new_reloc.value != new_func_symbol.rebased_addr: l.debug("Updating CLE symbols metadata, moving %s from 0x%x to 0x%x", new_reloc.symbol.name, new_reloc.value, new_func_symbol.rebased_addr) new_reloc.resolve(new_func_symbol) new_reloc.relocate([]) else: l.debug("SimProc not restored, you are going to simulate also the code of external libraries!") # flush the angr memory in order to synchronize them with the content of the # concrete process memory when a read/write to the page is performed self.state.memory.flush_pages(self.whitelist) l.info("Exiting SimEngineConcrete: simulated address %x concrete address %x ", self.state.addr, target.read_register("pc")) # now we have to register a SimInspect in order to synchronize the segments register # on demand when the symbolic execution accesses it if not self.segment_registers_callback_initialized: segment_register_name = self.state.project.simos.get_segment_register_name() if segment_register_name: self.fs_register_bp = self.state.inspect.b('reg_read', reg_read_offset=segment_register_name, action=_sync_segments) self.segment_registers_callback_initialized = True l.debug("Set SimInspect breakpoint to the new state!") else: l.error("Can't set breakpoint to synchronize segments registers, horrible things will happen.")
python
def sync(self): """ Handle the switch between the concrete execution and angr. This method takes care of: 1- Synchronize registers. 2- Set a concrete target to the memory backer so the memory reads are redirected in the concrete process memory. 3- If possible restore the SimProcedures with the real addresses inside the concrete process. 4- Set an inspect point to sync the segments register as soon as they are read during the symbolic execution. 5- Flush all the pages loaded until now. :return: """ def _sync_segments(state): """ Segment registers synchronization is on demand as soon as the symbolic execution access a segment register. """ concr_target = state.project.concrete_target if isinstance(state.arch, ArchAMD64): state.project.simos.initialize_segment_register_x64(state, concr_target) elif isinstance(state.arch, ArchX86): gdt = state.project.simos.initialize_gdt_x86(state, concr_target) state.concrete.whitelist.append((gdt.addr, gdt.addr + gdt.limit)) state.inspect.remove_breakpoint('reg_read', bp=state.concrete.fs_register_bp) state.concrete.segment_registers_initialized = True state.concrete.fs_register_bp = None l.debug("Sync the state with the concrete memory inside the Concrete plugin") target = self.state.project.concrete_target # Setting a concrete memory backend self.state.memory.mem._memory_backer.set_concrete_target(target) # Sync angr registers with the one getting from the concrete target # registers that we don't want to concretize. l.debug("Synchronizing general purpose registers") to_sync_register = list(filter(lambda x: x.concrete, self.state.arch.register_list)) for register in to_sync_register: # before let's sync all the subregisters of the current register. # sometimes this can be helpful ( i.e. ymmm0 e xmm0 ) if register.subregisters: subregisters_names = map(lambda x: x[0], register.subregisters) self._sync_registers(subregisters_names, target) # finally let's synchronize the whole register self._sync_registers([register.name], target) if self.synchronize_cle: self._sync_cle(target) # Synchronize the imported functions addresses (.got, IAT) in the # concrete process with ones used in the SimProcedures dictionary if self.state.project._should_use_sim_procedures and not self.state.project.loader.main_object.pic: l.debug("Restoring SimProc using concrete memory") for reloc in self.state.project.loader.main_object.relocs: if reloc.symbol: # consider only reloc with a symbol l.debug("Trying to re-hook SimProc %s", reloc.symbol.name) # l.debug("reloc.rebased_addr: %#x " % reloc.rebased_addr) func_address = target.read_memory(reloc.rebased_addr, self.state.project.arch.bits / 8) func_address = struct.unpack(self.state.project.arch.struct_fmt(), func_address)[0] l.debug("Function address hook is now: %#x ", func_address) self.state.project.rehook_symbol(func_address, reloc.symbol.name) if self.synchronize_cle and not self.state.project.loader.main_object.contains_addr(func_address): old_func_symbol = self.state.project.loader.find_symbol(reloc.symbol.name) if old_func_symbol: # if we actually have a symbol owner_obj = old_func_symbol.owner # calculating the new real address new_relative_address = func_address - owner_obj.mapped_base new_func_symbol = cle.backends.Symbol(owner_obj, old_func_symbol.name, new_relative_address, old_func_symbol.size, old_func_symbol.type) for new_reloc in self.state.project.loader.find_relevant_relocations(old_func_symbol.name): if new_reloc.symbol.name == new_func_symbol.name and \ new_reloc.value != new_func_symbol.rebased_addr: l.debug("Updating CLE symbols metadata, moving %s from 0x%x to 0x%x", new_reloc.symbol.name, new_reloc.value, new_func_symbol.rebased_addr) new_reloc.resolve(new_func_symbol) new_reloc.relocate([]) else: l.debug("SimProc not restored, you are going to simulate also the code of external libraries!") # flush the angr memory in order to synchronize them with the content of the # concrete process memory when a read/write to the page is performed self.state.memory.flush_pages(self.whitelist) l.info("Exiting SimEngineConcrete: simulated address %x concrete address %x ", self.state.addr, target.read_register("pc")) # now we have to register a SimInspect in order to synchronize the segments register # on demand when the symbolic execution accesses it if not self.segment_registers_callback_initialized: segment_register_name = self.state.project.simos.get_segment_register_name() if segment_register_name: self.fs_register_bp = self.state.inspect.b('reg_read', reg_read_offset=segment_register_name, action=_sync_segments) self.segment_registers_callback_initialized = True l.debug("Set SimInspect breakpoint to the new state!") else: l.error("Can't set breakpoint to synchronize segments registers, horrible things will happen.")
[ "def", "sync", "(", "self", ")", ":", "def", "_sync_segments", "(", "state", ")", ":", "\"\"\"\n Segment registers synchronization is on demand as soon as the\n symbolic execution access a segment register.\n \"\"\"", "concr_target", "=", "state", ".", "project", ".", "concrete_target", "if", "isinstance", "(", "state", ".", "arch", ",", "ArchAMD64", ")", ":", "state", ".", "project", ".", "simos", ".", "initialize_segment_register_x64", "(", "state", ",", "concr_target", ")", "elif", "isinstance", "(", "state", ".", "arch", ",", "ArchX86", ")", ":", "gdt", "=", "state", ".", "project", ".", "simos", ".", "initialize_gdt_x86", "(", "state", ",", "concr_target", ")", "state", ".", "concrete", ".", "whitelist", ".", "append", "(", "(", "gdt", ".", "addr", ",", "gdt", ".", "addr", "+", "gdt", ".", "limit", ")", ")", "state", ".", "inspect", ".", "remove_breakpoint", "(", "'reg_read'", ",", "bp", "=", "state", ".", "concrete", ".", "fs_register_bp", ")", "state", ".", "concrete", ".", "segment_registers_initialized", "=", "True", "state", ".", "concrete", ".", "fs_register_bp", "=", "None", "l", ".", "debug", "(", "\"Sync the state with the concrete memory inside the Concrete plugin\"", ")", "target", "=", "self", ".", "state", ".", "project", ".", "concrete_target", "# Setting a concrete memory backend", "self", ".", "state", ".", "memory", ".", "mem", ".", "_memory_backer", ".", "set_concrete_target", "(", "target", ")", "# Sync angr registers with the one getting from the concrete target", "# registers that we don't want to concretize.", "l", ".", "debug", "(", "\"Synchronizing general purpose registers\"", ")", "to_sync_register", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", ".", "concrete", ",", "self", ".", "state", ".", "arch", ".", "register_list", ")", ")", "for", "register", "in", "to_sync_register", ":", "# before let's sync all the subregisters of the current register.", "# sometimes this can be helpful ( i.e. ymmm0 e xmm0 )", "if", "register", ".", "subregisters", ":", "subregisters_names", "=", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "register", ".", "subregisters", ")", "self", ".", "_sync_registers", "(", "subregisters_names", ",", "target", ")", "# finally let's synchronize the whole register", "self", ".", "_sync_registers", "(", "[", "register", ".", "name", "]", ",", "target", ")", "if", "self", ".", "synchronize_cle", ":", "self", ".", "_sync_cle", "(", "target", ")", "# Synchronize the imported functions addresses (.got, IAT) in the", "# concrete process with ones used in the SimProcedures dictionary", "if", "self", ".", "state", ".", "project", ".", "_should_use_sim_procedures", "and", "not", "self", ".", "state", ".", "project", ".", "loader", ".", "main_object", ".", "pic", ":", "l", ".", "debug", "(", "\"Restoring SimProc using concrete memory\"", ")", "for", "reloc", "in", "self", ".", "state", ".", "project", ".", "loader", ".", "main_object", ".", "relocs", ":", "if", "reloc", ".", "symbol", ":", "# consider only reloc with a symbol", "l", ".", "debug", "(", "\"Trying to re-hook SimProc %s\"", ",", "reloc", ".", "symbol", ".", "name", ")", "# l.debug(\"reloc.rebased_addr: %#x \" % reloc.rebased_addr)", "func_address", "=", "target", ".", "read_memory", "(", "reloc", ".", "rebased_addr", ",", "self", ".", "state", ".", "project", ".", "arch", ".", "bits", "/", "8", ")", "func_address", "=", "struct", ".", "unpack", "(", "self", ".", "state", ".", "project", ".", "arch", ".", "struct_fmt", "(", ")", ",", "func_address", ")", "[", "0", "]", "l", ".", "debug", "(", "\"Function address hook is now: %#x \"", ",", "func_address", ")", "self", ".", "state", ".", "project", ".", "rehook_symbol", "(", "func_address", ",", "reloc", ".", "symbol", ".", "name", ")", "if", "self", ".", "synchronize_cle", "and", "not", "self", ".", "state", ".", "project", ".", "loader", ".", "main_object", ".", "contains_addr", "(", "func_address", ")", ":", "old_func_symbol", "=", "self", ".", "state", ".", "project", ".", "loader", ".", "find_symbol", "(", "reloc", ".", "symbol", ".", "name", ")", "if", "old_func_symbol", ":", "# if we actually have a symbol", "owner_obj", "=", "old_func_symbol", ".", "owner", "# calculating the new real address", "new_relative_address", "=", "func_address", "-", "owner_obj", ".", "mapped_base", "new_func_symbol", "=", "cle", ".", "backends", ".", "Symbol", "(", "owner_obj", ",", "old_func_symbol", ".", "name", ",", "new_relative_address", ",", "old_func_symbol", ".", "size", ",", "old_func_symbol", ".", "type", ")", "for", "new_reloc", "in", "self", ".", "state", ".", "project", ".", "loader", ".", "find_relevant_relocations", "(", "old_func_symbol", ".", "name", ")", ":", "if", "new_reloc", ".", "symbol", ".", "name", "==", "new_func_symbol", ".", "name", "and", "new_reloc", ".", "value", "!=", "new_func_symbol", ".", "rebased_addr", ":", "l", ".", "debug", "(", "\"Updating CLE symbols metadata, moving %s from 0x%x to 0x%x\"", ",", "new_reloc", ".", "symbol", ".", "name", ",", "new_reloc", ".", "value", ",", "new_func_symbol", ".", "rebased_addr", ")", "new_reloc", ".", "resolve", "(", "new_func_symbol", ")", "new_reloc", ".", "relocate", "(", "[", "]", ")", "else", ":", "l", ".", "debug", "(", "\"SimProc not restored, you are going to simulate also the code of external libraries!\"", ")", "# flush the angr memory in order to synchronize them with the content of the", "# concrete process memory when a read/write to the page is performed", "self", ".", "state", ".", "memory", ".", "flush_pages", "(", "self", ".", "whitelist", ")", "l", ".", "info", "(", "\"Exiting SimEngineConcrete: simulated address %x concrete address %x \"", ",", "self", ".", "state", ".", "addr", ",", "target", ".", "read_register", "(", "\"pc\"", ")", ")", "# now we have to register a SimInspect in order to synchronize the segments register", "# on demand when the symbolic execution accesses it", "if", "not", "self", ".", "segment_registers_callback_initialized", ":", "segment_register_name", "=", "self", ".", "state", ".", "project", ".", "simos", ".", "get_segment_register_name", "(", ")", "if", "segment_register_name", ":", "self", ".", "fs_register_bp", "=", "self", ".", "state", ".", "inspect", ".", "b", "(", "'reg_read'", ",", "reg_read_offset", "=", "segment_register_name", ",", "action", "=", "_sync_segments", ")", "self", ".", "segment_registers_callback_initialized", "=", "True", "l", ".", "debug", "(", "\"Set SimInspect breakpoint to the new state!\"", ")", "else", ":", "l", ".", "error", "(", "\"Can't set breakpoint to synchronize segments registers, horrible things will happen.\"", ")" ]
Handle the switch between the concrete execution and angr. This method takes care of: 1- Synchronize registers. 2- Set a concrete target to the memory backer so the memory reads are redirected in the concrete process memory. 3- If possible restore the SimProcedures with the real addresses inside the concrete process. 4- Set an inspect point to sync the segments register as soon as they are read during the symbolic execution. 5- Flush all the pages loaded until now. :return:
[ "Handle", "the", "switch", "between", "the", "concrete", "execution", "and", "angr", ".", "This", "method", "takes", "care", "of", ":", "1", "-", "Synchronize", "registers", ".", "2", "-", "Set", "a", "concrete", "target", "to", "the", "memory", "backer", "so", "the", "memory", "reads", "are", "redirected", "in", "the", "concrete", "process", "memory", ".", "3", "-", "If", "possible", "restore", "the", "SimProcedures", "with", "the", "real", "addresses", "inside", "the", "concrete", "process", ".", "4", "-", "Set", "an", "inspect", "point", "to", "sync", "the", "segments", "register", "as", "soon", "as", "they", "are", "read", "during", "the", "symbolic", "execution", ".", "5", "-", "Flush", "all", "the", "pages", "loaded", "until", "now", "." ]
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/concrete.py#L60-L178
0.005059
inasafe/inasafe
safe/gui/tools/shake_grid/shake_grid.py
ShakeGrid.mmi_to_contours
def mmi_to_contours(self, force_flag=True, algorithm=USE_ASCII): """Extract contours from the event's tif file. Contours are extracted at a 0.5 MMI interval. The resulting file will be saved in the extract directory. In the easiest use case you can :param force_flag: (Optional). Whether to force the regeneration of contour product. Defaults to False. :type force_flag: bool :param algorithm: (Optional) Which interpolation algorithm to use to create the underlying raster. Defaults to 'nearest'. :type algorithm: str **Only enforced if theForceFlag is true!** :returns: An absolute filesystem path pointing to the generated contour dataset. :exception: ContourCreationError simply do:: shake_grid = ShakeGrid() contour_path = shake_grid.mmi_to_contours() which will return the contour dataset for the latest event on the ftp server. """ LOGGER.debug('mmi_to_contours requested.') # TODO: Use sqlite rather? output_file_base = os.path.join( self.output_dir, '%s-contours-%s.' % (self.output_basename, algorithm)) output_file = output_file_base + 'shp' if os.path.exists(output_file) and force_flag is not True: return output_file elif os.path.exists(output_file): try: os.remove(output_file_base + 'shp') os.remove(output_file_base + 'shx') os.remove(output_file_base + 'dbf') os.remove(output_file_base + 'prj') except OSError: LOGGER.exception( 'Old contour files not deleted' ' - this may indicate a file permissions issue.') tif_path = self.mmi_to_raster(force_flag, algorithm) # Based largely on # http://svn.osgeo.org/gdal/trunk/autotest/alg/contour.py driver = ogr.GetDriverByName('ESRI Shapefile') ogr_dataset = driver.CreateDataSource(output_file) if ogr_dataset is None: # Probably the file existed and could not be overriden raise ContourCreationError( 'Could not create datasource for:\n%s. Check that the file ' 'does not already exist and that you do not have file system ' 'permissions issues' % output_file) layer = ogr_dataset.CreateLayer('contour') field_definition = ogr.FieldDefn('ID', ogr.OFTInteger) layer.CreateField(field_definition) field_definition = ogr.FieldDefn('MMI', ogr.OFTReal) layer.CreateField(field_definition) # So we can fix the x pos to the same x coord as centroid of the # feature so labels line up nicely vertically field_definition = ogr.FieldDefn('X', ogr.OFTReal) layer.CreateField(field_definition) # So we can fix the y pos to the min y coord of the whole contour so # labels line up nicely vertically field_definition = ogr.FieldDefn('Y', ogr.OFTReal) layer.CreateField(field_definition) # So that we can set the html hex colour based on its MMI class field_definition = ogr.FieldDefn('RGB', ogr.OFTString) layer.CreateField(field_definition) # So that we can set the label in it roman numeral form field_definition = ogr.FieldDefn('ROMAN', ogr.OFTString) layer.CreateField(field_definition) # So that we can set the label horizontal alignment field_definition = ogr.FieldDefn('ALIGN', ogr.OFTString) layer.CreateField(field_definition) # So that we can set the label vertical alignment field_definition = ogr.FieldDefn('VALIGN', ogr.OFTString) layer.CreateField(field_definition) # So that we can set feature length to filter out small features field_definition = ogr.FieldDefn('LEN', ogr.OFTReal) layer.CreateField(field_definition) tif_dataset = gdal.Open(tif_path, GA_ReadOnly) # see http://gdal.org/java/org/gdal/gdal/gdal.html for these options band = 1 contour_interval = 0.5 contour_base = 0 fixed_level_list = [] use_no_data_flag = 0 no_data_value = -9999 id_field = 0 # first field defined above elevation_field = 1 # second (MMI) field defined above try: gdal.ContourGenerate( tif_dataset.GetRasterBand(band), contour_interval, contour_base, fixed_level_list, use_no_data_flag, no_data_value, layer, id_field, elevation_field) except Exception as e: LOGGER.exception('Contour creation failed') raise ContourCreationError(str(e)) finally: del tif_dataset ogr_dataset.Release() # Copy over the standard .prj file since ContourGenerate does not # create a projection definition projection_path = os.path.join( self.output_dir, '%s-contours-%s.prj' % (self.output_basename, algorithm)) source_projection_path = resources_path( 'converter_data', 'mmi-contours.prj') shutil.copyfile(source_projection_path, projection_path) # Lastly copy over the standard qml (QGIS Style file) qml_path = os.path.join( self.output_dir, '%s-contours-%s.qml' % (self.output_basename, algorithm)) source_qml_path = resources_path('converter_data', 'mmi-contours.qml') shutil.copyfile(source_qml_path, qml_path) # Now update the additional columns - X,Y, ROMAN and RGB try: set_contour_properties(output_file) except InvalidLayerError: raise return output_file
python
def mmi_to_contours(self, force_flag=True, algorithm=USE_ASCII): """Extract contours from the event's tif file. Contours are extracted at a 0.5 MMI interval. The resulting file will be saved in the extract directory. In the easiest use case you can :param force_flag: (Optional). Whether to force the regeneration of contour product. Defaults to False. :type force_flag: bool :param algorithm: (Optional) Which interpolation algorithm to use to create the underlying raster. Defaults to 'nearest'. :type algorithm: str **Only enforced if theForceFlag is true!** :returns: An absolute filesystem path pointing to the generated contour dataset. :exception: ContourCreationError simply do:: shake_grid = ShakeGrid() contour_path = shake_grid.mmi_to_contours() which will return the contour dataset for the latest event on the ftp server. """ LOGGER.debug('mmi_to_contours requested.') # TODO: Use sqlite rather? output_file_base = os.path.join( self.output_dir, '%s-contours-%s.' % (self.output_basename, algorithm)) output_file = output_file_base + 'shp' if os.path.exists(output_file) and force_flag is not True: return output_file elif os.path.exists(output_file): try: os.remove(output_file_base + 'shp') os.remove(output_file_base + 'shx') os.remove(output_file_base + 'dbf') os.remove(output_file_base + 'prj') except OSError: LOGGER.exception( 'Old contour files not deleted' ' - this may indicate a file permissions issue.') tif_path = self.mmi_to_raster(force_flag, algorithm) # Based largely on # http://svn.osgeo.org/gdal/trunk/autotest/alg/contour.py driver = ogr.GetDriverByName('ESRI Shapefile') ogr_dataset = driver.CreateDataSource(output_file) if ogr_dataset is None: # Probably the file existed and could not be overriden raise ContourCreationError( 'Could not create datasource for:\n%s. Check that the file ' 'does not already exist and that you do not have file system ' 'permissions issues' % output_file) layer = ogr_dataset.CreateLayer('contour') field_definition = ogr.FieldDefn('ID', ogr.OFTInteger) layer.CreateField(field_definition) field_definition = ogr.FieldDefn('MMI', ogr.OFTReal) layer.CreateField(field_definition) # So we can fix the x pos to the same x coord as centroid of the # feature so labels line up nicely vertically field_definition = ogr.FieldDefn('X', ogr.OFTReal) layer.CreateField(field_definition) # So we can fix the y pos to the min y coord of the whole contour so # labels line up nicely vertically field_definition = ogr.FieldDefn('Y', ogr.OFTReal) layer.CreateField(field_definition) # So that we can set the html hex colour based on its MMI class field_definition = ogr.FieldDefn('RGB', ogr.OFTString) layer.CreateField(field_definition) # So that we can set the label in it roman numeral form field_definition = ogr.FieldDefn('ROMAN', ogr.OFTString) layer.CreateField(field_definition) # So that we can set the label horizontal alignment field_definition = ogr.FieldDefn('ALIGN', ogr.OFTString) layer.CreateField(field_definition) # So that we can set the label vertical alignment field_definition = ogr.FieldDefn('VALIGN', ogr.OFTString) layer.CreateField(field_definition) # So that we can set feature length to filter out small features field_definition = ogr.FieldDefn('LEN', ogr.OFTReal) layer.CreateField(field_definition) tif_dataset = gdal.Open(tif_path, GA_ReadOnly) # see http://gdal.org/java/org/gdal/gdal/gdal.html for these options band = 1 contour_interval = 0.5 contour_base = 0 fixed_level_list = [] use_no_data_flag = 0 no_data_value = -9999 id_field = 0 # first field defined above elevation_field = 1 # second (MMI) field defined above try: gdal.ContourGenerate( tif_dataset.GetRasterBand(band), contour_interval, contour_base, fixed_level_list, use_no_data_flag, no_data_value, layer, id_field, elevation_field) except Exception as e: LOGGER.exception('Contour creation failed') raise ContourCreationError(str(e)) finally: del tif_dataset ogr_dataset.Release() # Copy over the standard .prj file since ContourGenerate does not # create a projection definition projection_path = os.path.join( self.output_dir, '%s-contours-%s.prj' % (self.output_basename, algorithm)) source_projection_path = resources_path( 'converter_data', 'mmi-contours.prj') shutil.copyfile(source_projection_path, projection_path) # Lastly copy over the standard qml (QGIS Style file) qml_path = os.path.join( self.output_dir, '%s-contours-%s.qml' % (self.output_basename, algorithm)) source_qml_path = resources_path('converter_data', 'mmi-contours.qml') shutil.copyfile(source_qml_path, qml_path) # Now update the additional columns - X,Y, ROMAN and RGB try: set_contour_properties(output_file) except InvalidLayerError: raise return output_file
[ "def", "mmi_to_contours", "(", "self", ",", "force_flag", "=", "True", ",", "algorithm", "=", "USE_ASCII", ")", ":", "LOGGER", ".", "debug", "(", "'mmi_to_contours requested.'", ")", "# TODO: Use sqlite rather?", "output_file_base", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output_dir", ",", "'%s-contours-%s.'", "%", "(", "self", ".", "output_basename", ",", "algorithm", ")", ")", "output_file", "=", "output_file_base", "+", "'shp'", "if", "os", ".", "path", ".", "exists", "(", "output_file", ")", "and", "force_flag", "is", "not", "True", ":", "return", "output_file", "elif", "os", ".", "path", ".", "exists", "(", "output_file", ")", ":", "try", ":", "os", ".", "remove", "(", "output_file_base", "+", "'shp'", ")", "os", ".", "remove", "(", "output_file_base", "+", "'shx'", ")", "os", ".", "remove", "(", "output_file_base", "+", "'dbf'", ")", "os", ".", "remove", "(", "output_file_base", "+", "'prj'", ")", "except", "OSError", ":", "LOGGER", ".", "exception", "(", "'Old contour files not deleted'", "' - this may indicate a file permissions issue.'", ")", "tif_path", "=", "self", ".", "mmi_to_raster", "(", "force_flag", ",", "algorithm", ")", "# Based largely on", "# http://svn.osgeo.org/gdal/trunk/autotest/alg/contour.py", "driver", "=", "ogr", ".", "GetDriverByName", "(", "'ESRI Shapefile'", ")", "ogr_dataset", "=", "driver", ".", "CreateDataSource", "(", "output_file", ")", "if", "ogr_dataset", "is", "None", ":", "# Probably the file existed and could not be overriden", "raise", "ContourCreationError", "(", "'Could not create datasource for:\\n%s. Check that the file '", "'does not already exist and that you do not have file system '", "'permissions issues'", "%", "output_file", ")", "layer", "=", "ogr_dataset", ".", "CreateLayer", "(", "'contour'", ")", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'ID'", ",", "ogr", ".", "OFTInteger", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'MMI'", ",", "ogr", ".", "OFTReal", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "# So we can fix the x pos to the same x coord as centroid of the", "# feature so labels line up nicely vertically", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'X'", ",", "ogr", ".", "OFTReal", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "# So we can fix the y pos to the min y coord of the whole contour so", "# labels line up nicely vertically", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'Y'", ",", "ogr", ".", "OFTReal", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "# So that we can set the html hex colour based on its MMI class", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'RGB'", ",", "ogr", ".", "OFTString", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "# So that we can set the label in it roman numeral form", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'ROMAN'", ",", "ogr", ".", "OFTString", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "# So that we can set the label horizontal alignment", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'ALIGN'", ",", "ogr", ".", "OFTString", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "# So that we can set the label vertical alignment", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'VALIGN'", ",", "ogr", ".", "OFTString", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "# So that we can set feature length to filter out small features", "field_definition", "=", "ogr", ".", "FieldDefn", "(", "'LEN'", ",", "ogr", ".", "OFTReal", ")", "layer", ".", "CreateField", "(", "field_definition", ")", "tif_dataset", "=", "gdal", ".", "Open", "(", "tif_path", ",", "GA_ReadOnly", ")", "# see http://gdal.org/java/org/gdal/gdal/gdal.html for these options", "band", "=", "1", "contour_interval", "=", "0.5", "contour_base", "=", "0", "fixed_level_list", "=", "[", "]", "use_no_data_flag", "=", "0", "no_data_value", "=", "-", "9999", "id_field", "=", "0", "# first field defined above", "elevation_field", "=", "1", "# second (MMI) field defined above", "try", ":", "gdal", ".", "ContourGenerate", "(", "tif_dataset", ".", "GetRasterBand", "(", "band", ")", ",", "contour_interval", ",", "contour_base", ",", "fixed_level_list", ",", "use_no_data_flag", ",", "no_data_value", ",", "layer", ",", "id_field", ",", "elevation_field", ")", "except", "Exception", "as", "e", ":", "LOGGER", ".", "exception", "(", "'Contour creation failed'", ")", "raise", "ContourCreationError", "(", "str", "(", "e", ")", ")", "finally", ":", "del", "tif_dataset", "ogr_dataset", ".", "Release", "(", ")", "# Copy over the standard .prj file since ContourGenerate does not", "# create a projection definition", "projection_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output_dir", ",", "'%s-contours-%s.prj'", "%", "(", "self", ".", "output_basename", ",", "algorithm", ")", ")", "source_projection_path", "=", "resources_path", "(", "'converter_data'", ",", "'mmi-contours.prj'", ")", "shutil", ".", "copyfile", "(", "source_projection_path", ",", "projection_path", ")", "# Lastly copy over the standard qml (QGIS Style file)", "qml_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output_dir", ",", "'%s-contours-%s.qml'", "%", "(", "self", ".", "output_basename", ",", "algorithm", ")", ")", "source_qml_path", "=", "resources_path", "(", "'converter_data'", ",", "'mmi-contours.qml'", ")", "shutil", ".", "copyfile", "(", "source_qml_path", ",", "qml_path", ")", "# Now update the additional columns - X,Y, ROMAN and RGB", "try", ":", "set_contour_properties", "(", "output_file", ")", "except", "InvalidLayerError", ":", "raise", "return", "output_file" ]
Extract contours from the event's tif file. Contours are extracted at a 0.5 MMI interval. The resulting file will be saved in the extract directory. In the easiest use case you can :param force_flag: (Optional). Whether to force the regeneration of contour product. Defaults to False. :type force_flag: bool :param algorithm: (Optional) Which interpolation algorithm to use to create the underlying raster. Defaults to 'nearest'. :type algorithm: str **Only enforced if theForceFlag is true!** :returns: An absolute filesystem path pointing to the generated contour dataset. :exception: ContourCreationError simply do:: shake_grid = ShakeGrid() contour_path = shake_grid.mmi_to_contours() which will return the contour dataset for the latest event on the ftp server.
[ "Extract", "contours", "from", "the", "event", "s", "tif", "file", "." ]
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/shake_grid/shake_grid.py#L712-L848
0.000336
ziwenxie/netease-dl
netease/start.py
cli
def cli(ctx, timeout, proxy, output, quiet, lyric, again): """A command tool to download NetEase-Music's songs.""" ctx.obj = NetEase(timeout, proxy, output, quiet, lyric, again)
python
def cli(ctx, timeout, proxy, output, quiet, lyric, again): """A command tool to download NetEase-Music's songs.""" ctx.obj = NetEase(timeout, proxy, output, quiet, lyric, again)
[ "def", "cli", "(", "ctx", ",", "timeout", ",", "proxy", ",", "output", ",", "quiet", ",", "lyric", ",", "again", ")", ":", "ctx", ".", "obj", "=", "NetEase", "(", "timeout", ",", "proxy", ",", "output", ",", "quiet", ",", "lyric", ",", "again", ")" ]
A command tool to download NetEase-Music's songs.
[ "A", "command", "tool", "to", "download", "NetEase", "-", "Music", "s", "songs", "." ]
train
https://github.com/ziwenxie/netease-dl/blob/84b226fc07b10f7f66580f0fc69f10356f66b5c3/netease/start.py#L41-L43
0.005405
BerkeleyAutomation/perception
perception/image.py
BinaryImage.inverse
def inverse(self): """ Inverts image (all nonzeros become zeros and vice verse) Returns ------- :obj:`BinaryImage` inverse of this binary image """ data = np.zeros(self.shape).astype(np.uint8) ind = np.where(self.data == 0) data[ind[0], ind[1], ...] = BINARY_IM_MAX_VAL return BinaryImage(data, self._frame)
python
def inverse(self): """ Inverts image (all nonzeros become zeros and vice verse) Returns ------- :obj:`BinaryImage` inverse of this binary image """ data = np.zeros(self.shape).astype(np.uint8) ind = np.where(self.data == 0) data[ind[0], ind[1], ...] = BINARY_IM_MAX_VAL return BinaryImage(data, self._frame)
[ "def", "inverse", "(", "self", ")", ":", "data", "=", "np", ".", "zeros", "(", "self", ".", "shape", ")", ".", "astype", "(", "np", ".", "uint8", ")", "ind", "=", "np", ".", "where", "(", "self", ".", "data", "==", "0", ")", "data", "[", "ind", "[", "0", "]", ",", "ind", "[", "1", "]", ",", "...", "]", "=", "BINARY_IM_MAX_VAL", "return", "BinaryImage", "(", "data", ",", "self", ".", "_frame", ")" ]
Inverts image (all nonzeros become zeros and vice verse) Returns ------- :obj:`BinaryImage` inverse of this binary image
[ "Inverts", "image", "(", "all", "nonzeros", "become", "zeros", "and", "vice", "verse", ")", "Returns", "-------", ":", "obj", ":", "BinaryImage", "inverse", "of", "this", "binary", "image" ]
train
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L2197-L2207
0.005115
DataONEorg/d1_python
lib_common/src/d1_common/cert/x509.py
deserialize_pem
def deserialize_pem(cert_pem): """Deserialize PEM (Base64) encoded X.509 v3 certificate. Args: cert_pem: str or bytes PEM (Base64) encoded X.509 v3 certificate Returns: cert_obj: cryptography.Certificate """ if isinstance(cert_pem, str): cert_pem = cert_pem.encode("utf-8") return cryptography.x509.load_pem_x509_certificate( data=cert_pem, backend=cryptography.hazmat.backends.default_backend() )
python
def deserialize_pem(cert_pem): """Deserialize PEM (Base64) encoded X.509 v3 certificate. Args: cert_pem: str or bytes PEM (Base64) encoded X.509 v3 certificate Returns: cert_obj: cryptography.Certificate """ if isinstance(cert_pem, str): cert_pem = cert_pem.encode("utf-8") return cryptography.x509.load_pem_x509_certificate( data=cert_pem, backend=cryptography.hazmat.backends.default_backend() )
[ "def", "deserialize_pem", "(", "cert_pem", ")", ":", "if", "isinstance", "(", "cert_pem", ",", "str", ")", ":", "cert_pem", "=", "cert_pem", ".", "encode", "(", "\"utf-8\"", ")", "return", "cryptography", ".", "x509", ".", "load_pem_x509_certificate", "(", "data", "=", "cert_pem", ",", "backend", "=", "cryptography", ".", "hazmat", ".", "backends", ".", "default_backend", "(", ")", ")" ]
Deserialize PEM (Base64) encoded X.509 v3 certificate. Args: cert_pem: str or bytes PEM (Base64) encoded X.509 v3 certificate Returns: cert_obj: cryptography.Certificate
[ "Deserialize", "PEM", "(", "Base64", ")", "encoded", "X", ".", "509", "v3", "certificate", "." ]
train
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/x509.py#L268-L283
0.002155
myyang/django-unixtimestampfield
unixtimestampfield/fields.py
OrdinalPatchMixin.to_naive_datetime
def to_naive_datetime(self, value): """ from value to datetime with tzinfo format (datetime.datetime instance) """ if isinstance(value, (six.integer_types, float, six.string_types)): try: return self.from_number(value) except ValueError: return self.datetime_str_to_datetime(value) if isinstance(value, datetime.datetime): return value raise exceptions.ValidationError( "Unable to convert value: '%s' to python data type" % value, code="invalid_datetime" )
python
def to_naive_datetime(self, value): """ from value to datetime with tzinfo format (datetime.datetime instance) """ if isinstance(value, (six.integer_types, float, six.string_types)): try: return self.from_number(value) except ValueError: return self.datetime_str_to_datetime(value) if isinstance(value, datetime.datetime): return value raise exceptions.ValidationError( "Unable to convert value: '%s' to python data type" % value, code="invalid_datetime" )
[ "def", "to_naive_datetime", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "six", ".", "integer_types", ",", "float", ",", "six", ".", "string_types", ")", ")", ":", "try", ":", "return", "self", ".", "from_number", "(", "value", ")", "except", "ValueError", ":", "return", "self", ".", "datetime_str_to_datetime", "(", "value", ")", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "value", "raise", "exceptions", ".", "ValidationError", "(", "\"Unable to convert value: '%s' to python data type\"", "%", "value", ",", "code", "=", "\"invalid_datetime\"", ")" ]
from value to datetime with tzinfo format (datetime.datetime instance)
[ "from", "value", "to", "datetime", "with", "tzinfo", "format", "(", "datetime", ".", "datetime", "instance", ")" ]
train
https://github.com/myyang/django-unixtimestampfield/blob/d647681cd628d1a5cdde8dcbb025bcb9612e9b24/unixtimestampfield/fields.py#L329-L345
0.0033
numenta/htmresearch
projects/l2_pooling/noise_tolerance_l2.py
createRandomObjectDescriptions
def createRandomObjectDescriptions(numObjects, numLocationsPerObject, featurePool=("A", "B", "C")): """ Returns {"Object 1": [(0, "C"), (1, "B"), (2, "C"), ...], "Object 2": [(0, "C"), (1, "A"), (2, "B"), ...]} """ return dict(("Object %d" % i, zip(xrange(numLocationsPerObject), [random.choice(featurePool) for _ in xrange(numLocationsPerObject)])) for i in xrange(1, numObjects + 1))
python
def createRandomObjectDescriptions(numObjects, numLocationsPerObject, featurePool=("A", "B", "C")): """ Returns {"Object 1": [(0, "C"), (1, "B"), (2, "C"), ...], "Object 2": [(0, "C"), (1, "A"), (2, "B"), ...]} """ return dict(("Object %d" % i, zip(xrange(numLocationsPerObject), [random.choice(featurePool) for _ in xrange(numLocationsPerObject)])) for i in xrange(1, numObjects + 1))
[ "def", "createRandomObjectDescriptions", "(", "numObjects", ",", "numLocationsPerObject", ",", "featurePool", "=", "(", "\"A\"", ",", "\"B\"", ",", "\"C\"", ")", ")", ":", "return", "dict", "(", "(", "\"Object %d\"", "%", "i", ",", "zip", "(", "xrange", "(", "numLocationsPerObject", ")", ",", "[", "random", ".", "choice", "(", "featurePool", ")", "for", "_", "in", "xrange", "(", "numLocationsPerObject", ")", "]", ")", ")", "for", "i", "in", "xrange", "(", "1", ",", "numObjects", "+", "1", ")", ")" ]
Returns {"Object 1": [(0, "C"), (1, "B"), (2, "C"), ...], "Object 2": [(0, "C"), (1, "A"), (2, "B"), ...]}
[ "Returns", "{", "Object", "1", ":", "[", "(", "0", "C", ")", "(", "1", "B", ")", "(", "2", "C", ")", "...", "]", "Object", "2", ":", "[", "(", "0", "C", ")", "(", "1", "A", ")", "(", "2", "B", ")", "...", "]", "}" ]
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/noise_tolerance_l2.py#L45-L56
0.005535
ddorn/superprompt
superprompt/core.py
prompt_file
def prompt_file(prompt, default=None, must_exist=True, is_dir=False, show_default=True, prompt_suffix=': ', color=None): """ Prompt a filename using using glob for autocompetion. If must_exist is True (default) then you can be sure that the value returned is an existing filename or directory name. If is_dir is True, this will show only the directories for the completion. """ if must_exist: while True: r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default, prompt_suffix=prompt_suffix, color=color) if os.path.exists(r): break print('This path does not exist.') else: r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default, prompt_suffix=prompt_suffix, color=color) return r
python
def prompt_file(prompt, default=None, must_exist=True, is_dir=False, show_default=True, prompt_suffix=': ', color=None): """ Prompt a filename using using glob for autocompetion. If must_exist is True (default) then you can be sure that the value returned is an existing filename or directory name. If is_dir is True, this will show only the directories for the completion. """ if must_exist: while True: r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default, prompt_suffix=prompt_suffix, color=color) if os.path.exists(r): break print('This path does not exist.') else: r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default, prompt_suffix=prompt_suffix, color=color) return r
[ "def", "prompt_file", "(", "prompt", ",", "default", "=", "None", ",", "must_exist", "=", "True", ",", "is_dir", "=", "False", ",", "show_default", "=", "True", ",", "prompt_suffix", "=", "': '", ",", "color", "=", "None", ")", ":", "if", "must_exist", ":", "while", "True", ":", "r", "=", "prompt_autocomplete", "(", "prompt", ",", "path_complete", "(", "is_dir", ")", ",", "default", ",", "show_default", "=", "show_default", ",", "prompt_suffix", "=", "prompt_suffix", ",", "color", "=", "color", ")", "if", "os", ".", "path", ".", "exists", "(", "r", ")", ":", "break", "print", "(", "'This path does not exist.'", ")", "else", ":", "r", "=", "prompt_autocomplete", "(", "prompt", ",", "path_complete", "(", "is_dir", ")", ",", "default", ",", "show_default", "=", "show_default", ",", "prompt_suffix", "=", "prompt_suffix", ",", "color", "=", "color", ")", "return", "r" ]
Prompt a filename using using glob for autocompetion. If must_exist is True (default) then you can be sure that the value returned is an existing filename or directory name. If is_dir is True, this will show only the directories for the completion.
[ "Prompt", "a", "filename", "using", "using", "glob", "for", "autocompetion", "." ]
train
https://github.com/ddorn/superprompt/blob/f2ee13a71c0523663ca1740738b545e2ab1eab20/superprompt/core.py#L104-L125
0.005319
bharadwajyarlagadda/bingmaps
bingmaps/apiservices/elevations.py
ElevationsApi.zoomlevel
def zoomlevel(self): """Retrieves zoomlevel from the output response Returns: zoomlevel (namedtuple): A namedtuple of zoomlevel from the output response """ resources = self.get_resource() zoomlevel = namedtuple('zoomlevel', 'zoomLevel') try: return [zoomlevel(resource['zoomLevel']) for resource in resources] except TypeError: try: if isinstance(resources['ElevationData'], dict): return zoomlevel(resources['ElevationData']['ZoomLevel']) except KeyError: try: if isinstance(resources['SeaLevelData'], dict): zoom = resources['SeaLevelData']['ZoomLevel'] return zoomlevel(zoom) except KeyError: print(KeyError)
python
def zoomlevel(self): """Retrieves zoomlevel from the output response Returns: zoomlevel (namedtuple): A namedtuple of zoomlevel from the output response """ resources = self.get_resource() zoomlevel = namedtuple('zoomlevel', 'zoomLevel') try: return [zoomlevel(resource['zoomLevel']) for resource in resources] except TypeError: try: if isinstance(resources['ElevationData'], dict): return zoomlevel(resources['ElevationData']['ZoomLevel']) except KeyError: try: if isinstance(resources['SeaLevelData'], dict): zoom = resources['SeaLevelData']['ZoomLevel'] return zoomlevel(zoom) except KeyError: print(KeyError)
[ "def", "zoomlevel", "(", "self", ")", ":", "resources", "=", "self", ".", "get_resource", "(", ")", "zoomlevel", "=", "namedtuple", "(", "'zoomlevel'", ",", "'zoomLevel'", ")", "try", ":", "return", "[", "zoomlevel", "(", "resource", "[", "'zoomLevel'", "]", ")", "for", "resource", "in", "resources", "]", "except", "TypeError", ":", "try", ":", "if", "isinstance", "(", "resources", "[", "'ElevationData'", "]", ",", "dict", ")", ":", "return", "zoomlevel", "(", "resources", "[", "'ElevationData'", "]", "[", "'ZoomLevel'", "]", ")", "except", "KeyError", ":", "try", ":", "if", "isinstance", "(", "resources", "[", "'SeaLevelData'", "]", ",", "dict", ")", ":", "zoom", "=", "resources", "[", "'SeaLevelData'", "]", "[", "'ZoomLevel'", "]", "return", "zoomlevel", "(", "zoom", ")", "except", "KeyError", ":", "print", "(", "KeyError", ")" ]
Retrieves zoomlevel from the output response Returns: zoomlevel (namedtuple): A namedtuple of zoomlevel from the output response
[ "Retrieves", "zoomlevel", "from", "the", "output", "response" ]
train
https://github.com/bharadwajyarlagadda/bingmaps/blob/6bb3cdadfb121aaff96704509cedff2710a62b6d/bingmaps/apiservices/elevations.py#L165-L187
0.00221
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
smooth_differentiation
def smooth_differentiation(x, y, weigths=None, order=5, smoothness=3, derivation=1): '''Returns the dy/dx(x) with the fit and differentiation of a spline curve Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like ''' if (len(x) != len(y)): raise ValueError("x, y must have the same length") f = splrep(x, y, w=weigths, k=order, s=smoothness) # spline function return splev(x, f, der=derivation)
python
def smooth_differentiation(x, y, weigths=None, order=5, smoothness=3, derivation=1): '''Returns the dy/dx(x) with the fit and differentiation of a spline curve Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like ''' if (len(x) != len(y)): raise ValueError("x, y must have the same length") f = splrep(x, y, w=weigths, k=order, s=smoothness) # spline function return splev(x, f, der=derivation)
[ "def", "smooth_differentiation", "(", "x", ",", "y", ",", "weigths", "=", "None", ",", "order", "=", "5", ",", "smoothness", "=", "3", ",", "derivation", "=", "1", ")", ":", "if", "(", "len", "(", "x", ")", "!=", "len", "(", "y", ")", ")", ":", "raise", "ValueError", "(", "\"x, y must have the same length\"", ")", "f", "=", "splrep", "(", "x", ",", "y", ",", "w", "=", "weigths", ",", "k", "=", "order", ",", "s", "=", "smoothness", ")", "# spline function", "return", "splev", "(", "x", ",", "f", ",", "der", "=", "derivation", ")" ]
Returns the dy/dx(x) with the fit and differentiation of a spline curve Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like
[ "Returns", "the", "dy", "/", "dx", "(", "x", ")", "with", "the", "fit", "and", "differentiation", "of", "a", "spline", "curve" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L627-L642
0.004107
sorgerlab/indra
indra/sources/eidos/cli.py
run_eidos
def run_eidos(endpoint, *args): """Run a given enpoint of Eidos through the command line. Parameters ---------- endpoint : str The class within the Eidos package to run, for instance 'apps.ExtractFromDirectory' will run 'org.clulab.wm.eidos.apps.ExtractFromDirectory' *args Any further arguments to be passed as inputs to the class being run. """ # Make the full path to the class that should be used call_class = '%s.%s' % (eidos_package, endpoint) # Assemble the command line command and append optonal args cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args) logger.info('Running Eidos with command "%s"' % (' '.join(cmd))) subprocess.call(cmd)
python
def run_eidos(endpoint, *args): """Run a given enpoint of Eidos through the command line. Parameters ---------- endpoint : str The class within the Eidos package to run, for instance 'apps.ExtractFromDirectory' will run 'org.clulab.wm.eidos.apps.ExtractFromDirectory' *args Any further arguments to be passed as inputs to the class being run. """ # Make the full path to the class that should be used call_class = '%s.%s' % (eidos_package, endpoint) # Assemble the command line command and append optonal args cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args) logger.info('Running Eidos with command "%s"' % (' '.join(cmd))) subprocess.call(cmd)
[ "def", "run_eidos", "(", "endpoint", ",", "*", "args", ")", ":", "# Make the full path to the class that should be used", "call_class", "=", "'%s.%s'", "%", "(", "eidos_package", ",", "endpoint", ")", "# Assemble the command line command and append optonal args", "cmd", "=", "[", "'java'", ",", "'-Xmx12G'", ",", "'-cp'", ",", "eip", ",", "call_class", "]", "+", "list", "(", "args", ")", "logger", ".", "info", "(", "'Running Eidos with command \"%s\"'", "%", "(", "' '", ".", "join", "(", "cmd", ")", ")", ")", "subprocess", ".", "call", "(", "cmd", ")" ]
Run a given enpoint of Eidos through the command line. Parameters ---------- endpoint : str The class within the Eidos package to run, for instance 'apps.ExtractFromDirectory' will run 'org.clulab.wm.eidos.apps.ExtractFromDirectory' *args Any further arguments to be passed as inputs to the class being run.
[ "Run", "a", "given", "enpoint", "of", "Eidos", "through", "the", "command", "line", "." ]
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/cli.py#L20-L38
0.001339
rushter/heamy
heamy/dataset.py
Dataset.to_dense
def to_dense(self): """Convert sparse Dataset to dense matrix.""" if hasattr(self._X_train, 'todense'): self._X_train = self._X_train.todense() self._X_test = self._X_test.todense()
python
def to_dense(self): """Convert sparse Dataset to dense matrix.""" if hasattr(self._X_train, 'todense'): self._X_train = self._X_train.todense() self._X_test = self._X_test.todense()
[ "def", "to_dense", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "_X_train", ",", "'todense'", ")", ":", "self", ".", "_X_train", "=", "self", ".", "_X_train", ".", "todense", "(", ")", "self", ".", "_X_test", "=", "self", ".", "_X_test", ".", "todense", "(", ")" ]
Convert sparse Dataset to dense matrix.
[ "Convert", "sparse", "Dataset", "to", "dense", "matrix", "." ]
train
https://github.com/rushter/heamy/blob/c330854cee3c547417eb353a4a4a23331b40b4bc/heamy/dataset.py#L336-L340
0.00905
saltstack/salt
salt/modules/boto3_elasticache.py
create_cache_cluster
def create_cache_cluster(name, wait=600, security_groups=None, region=None, key=None, keyid=None, profile=None, **args): ''' Create a cache cluster. Example: .. code-block:: bash salt myminion boto3_elasticache.create_cache_cluster name=myCacheCluster \ Engine=redis \ CacheNodeType=cache.t2.micro \ NumCacheNodes=1 \ SecurityGroupIds='[sg-11223344]' \ CacheSubnetGroupName=myCacheSubnetGroup ''' if security_groups: if not isinstance(security_groups, list): security_groups = [security_groups] sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region, key=key, keyid=keyid, profile=profile) if 'SecurityGroupIds' not in args: args['SecurityGroupIds'] = [] args['SecurityGroupIds'] += sgs args = dict([(k, v) for k, v in args.items() if not k.startswith('_')]) return _create_resource(name, name_param='CacheClusterId', desc='cache cluster', res_type='cache_cluster', wait=wait, status_param='CacheClusterStatus', region=region, key=key, keyid=keyid, profile=profile, **args)
python
def create_cache_cluster(name, wait=600, security_groups=None, region=None, key=None, keyid=None, profile=None, **args): ''' Create a cache cluster. Example: .. code-block:: bash salt myminion boto3_elasticache.create_cache_cluster name=myCacheCluster \ Engine=redis \ CacheNodeType=cache.t2.micro \ NumCacheNodes=1 \ SecurityGroupIds='[sg-11223344]' \ CacheSubnetGroupName=myCacheSubnetGroup ''' if security_groups: if not isinstance(security_groups, list): security_groups = [security_groups] sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region, key=key, keyid=keyid, profile=profile) if 'SecurityGroupIds' not in args: args['SecurityGroupIds'] = [] args['SecurityGroupIds'] += sgs args = dict([(k, v) for k, v in args.items() if not k.startswith('_')]) return _create_resource(name, name_param='CacheClusterId', desc='cache cluster', res_type='cache_cluster', wait=wait, status_param='CacheClusterStatus', region=region, key=key, keyid=keyid, profile=profile, **args)
[ "def", "create_cache_cluster", "(", "name", ",", "wait", "=", "600", ",", "security_groups", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "args", ")", ":", "if", "security_groups", ":", "if", "not", "isinstance", "(", "security_groups", ",", "list", ")", ":", "security_groups", "=", "[", "security_groups", "]", "sgs", "=", "__salt__", "[", "'boto_secgroup.convert_to_group_ids'", "]", "(", "groups", "=", "security_groups", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "'SecurityGroupIds'", "not", "in", "args", ":", "args", "[", "'SecurityGroupIds'", "]", "=", "[", "]", "args", "[", "'SecurityGroupIds'", "]", "+=", "sgs", "args", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "args", ".", "items", "(", ")", "if", "not", "k", ".", "startswith", "(", "'_'", ")", "]", ")", "return", "_create_resource", "(", "name", ",", "name_param", "=", "'CacheClusterId'", ",", "desc", "=", "'cache cluster'", ",", "res_type", "=", "'cache_cluster'", ",", "wait", "=", "wait", ",", "status_param", "=", "'CacheClusterStatus'", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "*", "*", "args", ")" ]
Create a cache cluster. Example: .. code-block:: bash salt myminion boto3_elasticache.create_cache_cluster name=myCacheCluster \ Engine=redis \ CacheNodeType=cache.t2.micro \ NumCacheNodes=1 \ SecurityGroupIds='[sg-11223344]' \ CacheSubnetGroupName=myCacheSubnetGroup
[ "Create", "a", "cache", "cluster", "." ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_elasticache.py#L314-L341
0.006431
lmcinnes/umap
umap/umap_.py
UMAP.transform
def transform(self, X): """Transform X into the existing embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) New data to be transformed. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the new data in low-dimensional space. """ # If we fit just a single instance then error if self.embedding_.shape[0] == 1: raise ValueError('Transform unavailable when model was fit with' 'only a single data sample.') # If we just have the original input then short circuit things X = check_array(X, dtype=np.float32, accept_sparse="csr") x_hash = joblib.hash(X) if x_hash == self._input_hash: return self.embedding_ if self._sparse_data: raise ValueError("Transform not available for sparse input.") elif self.metric == 'precomputed': raise ValueError("Transform of new data not available for " "precomputed metric.") X = check_array(X, dtype=np.float32, order="C") random_state = check_random_state(self.transform_seed) rng_state = random_state.randint(INT32_MIN, INT32_MAX, 3).astype(np.int64) if self._small_data: dmat = pairwise_distances( X, self._raw_data, metric=self.metric, **self._metric_kwds ) indices = np.argpartition(dmat, self._n_neighbors)[:, :self._n_neighbors] dmat_shortened = submatrix(dmat, indices, self._n_neighbors) indices_sorted = np.argsort(dmat_shortened) indices = submatrix(indices, indices_sorted, self._n_neighbors) dists = submatrix(dmat_shortened, indices_sorted, self._n_neighbors) else: init = initialise_search( self._rp_forest, self._raw_data, X, int(self._n_neighbors * self.transform_queue_size), self._random_init, self._tree_init, rng_state, ) result = self._search( self._raw_data, self._search_graph.indptr, self._search_graph.indices, init, X, ) indices, dists = deheap_sort(result) indices = indices[:, : self._n_neighbors] dists = dists[:, : self._n_neighbors] adjusted_local_connectivity = max(0, self.local_connectivity - 1.0) sigmas, rhos = smooth_knn_dist( dists, self._n_neighbors, local_connectivity=adjusted_local_connectivity ) rows, cols, vals = compute_membership_strengths(indices, dists, sigmas, rhos) graph = scipy.sparse.coo_matrix( (vals, (rows, cols)), shape=(X.shape[0], self._raw_data.shape[0]) ) # This was a very specially constructed graph with constant degree. # That lets us do fancy unpacking by reshaping the csr matrix indices # and data. Doing so relies on the constant degree assumption! csr_graph = normalize(graph.tocsr(), norm="l1") inds = csr_graph.indices.reshape(X.shape[0], self._n_neighbors) weights = csr_graph.data.reshape(X.shape[0], self._n_neighbors) embedding = init_transform(inds, weights, self.embedding_) if self.n_epochs is None: # For smaller datasets we can use more epochs if graph.shape[0] <= 10000: n_epochs = 100 else: n_epochs = 30 else: n_epochs = self.n_epochs // 3.0 graph.data[graph.data < (graph.data.max() / float(n_epochs))] = 0.0 graph.eliminate_zeros() epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs) head = graph.row tail = graph.col embedding = optimize_layout( embedding, self.embedding_, head, tail, n_epochs, graph.shape[1], epochs_per_sample, self._a, self._b, rng_state, self.repulsion_strength, self._initial_alpha, self.negative_sample_rate, verbose=self.verbose, ) return embedding
python
def transform(self, X): """Transform X into the existing embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) New data to be transformed. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the new data in low-dimensional space. """ # If we fit just a single instance then error if self.embedding_.shape[0] == 1: raise ValueError('Transform unavailable when model was fit with' 'only a single data sample.') # If we just have the original input then short circuit things X = check_array(X, dtype=np.float32, accept_sparse="csr") x_hash = joblib.hash(X) if x_hash == self._input_hash: return self.embedding_ if self._sparse_data: raise ValueError("Transform not available for sparse input.") elif self.metric == 'precomputed': raise ValueError("Transform of new data not available for " "precomputed metric.") X = check_array(X, dtype=np.float32, order="C") random_state = check_random_state(self.transform_seed) rng_state = random_state.randint(INT32_MIN, INT32_MAX, 3).astype(np.int64) if self._small_data: dmat = pairwise_distances( X, self._raw_data, metric=self.metric, **self._metric_kwds ) indices = np.argpartition(dmat, self._n_neighbors)[:, :self._n_neighbors] dmat_shortened = submatrix(dmat, indices, self._n_neighbors) indices_sorted = np.argsort(dmat_shortened) indices = submatrix(indices, indices_sorted, self._n_neighbors) dists = submatrix(dmat_shortened, indices_sorted, self._n_neighbors) else: init = initialise_search( self._rp_forest, self._raw_data, X, int(self._n_neighbors * self.transform_queue_size), self._random_init, self._tree_init, rng_state, ) result = self._search( self._raw_data, self._search_graph.indptr, self._search_graph.indices, init, X, ) indices, dists = deheap_sort(result) indices = indices[:, : self._n_neighbors] dists = dists[:, : self._n_neighbors] adjusted_local_connectivity = max(0, self.local_connectivity - 1.0) sigmas, rhos = smooth_knn_dist( dists, self._n_neighbors, local_connectivity=adjusted_local_connectivity ) rows, cols, vals = compute_membership_strengths(indices, dists, sigmas, rhos) graph = scipy.sparse.coo_matrix( (vals, (rows, cols)), shape=(X.shape[0], self._raw_data.shape[0]) ) # This was a very specially constructed graph with constant degree. # That lets us do fancy unpacking by reshaping the csr matrix indices # and data. Doing so relies on the constant degree assumption! csr_graph = normalize(graph.tocsr(), norm="l1") inds = csr_graph.indices.reshape(X.shape[0], self._n_neighbors) weights = csr_graph.data.reshape(X.shape[0], self._n_neighbors) embedding = init_transform(inds, weights, self.embedding_) if self.n_epochs is None: # For smaller datasets we can use more epochs if graph.shape[0] <= 10000: n_epochs = 100 else: n_epochs = 30 else: n_epochs = self.n_epochs // 3.0 graph.data[graph.data < (graph.data.max() / float(n_epochs))] = 0.0 graph.eliminate_zeros() epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs) head = graph.row tail = graph.col embedding = optimize_layout( embedding, self.embedding_, head, tail, n_epochs, graph.shape[1], epochs_per_sample, self._a, self._b, rng_state, self.repulsion_strength, self._initial_alpha, self.negative_sample_rate, verbose=self.verbose, ) return embedding
[ "def", "transform", "(", "self", ",", "X", ")", ":", "# If we fit just a single instance then error", "if", "self", ".", "embedding_", ".", "shape", "[", "0", "]", "==", "1", ":", "raise", "ValueError", "(", "'Transform unavailable when model was fit with'", "'only a single data sample.'", ")", "# If we just have the original input then short circuit things", "X", "=", "check_array", "(", "X", ",", "dtype", "=", "np", ".", "float32", ",", "accept_sparse", "=", "\"csr\"", ")", "x_hash", "=", "joblib", ".", "hash", "(", "X", ")", "if", "x_hash", "==", "self", ".", "_input_hash", ":", "return", "self", ".", "embedding_", "if", "self", ".", "_sparse_data", ":", "raise", "ValueError", "(", "\"Transform not available for sparse input.\"", ")", "elif", "self", ".", "metric", "==", "'precomputed'", ":", "raise", "ValueError", "(", "\"Transform of new data not available for \"", "\"precomputed metric.\"", ")", "X", "=", "check_array", "(", "X", ",", "dtype", "=", "np", ".", "float32", ",", "order", "=", "\"C\"", ")", "random_state", "=", "check_random_state", "(", "self", ".", "transform_seed", ")", "rng_state", "=", "random_state", ".", "randint", "(", "INT32_MIN", ",", "INT32_MAX", ",", "3", ")", ".", "astype", "(", "np", ".", "int64", ")", "if", "self", ".", "_small_data", ":", "dmat", "=", "pairwise_distances", "(", "X", ",", "self", ".", "_raw_data", ",", "metric", "=", "self", ".", "metric", ",", "*", "*", "self", ".", "_metric_kwds", ")", "indices", "=", "np", ".", "argpartition", "(", "dmat", ",", "self", ".", "_n_neighbors", ")", "[", ":", ",", ":", "self", ".", "_n_neighbors", "]", "dmat_shortened", "=", "submatrix", "(", "dmat", ",", "indices", ",", "self", ".", "_n_neighbors", ")", "indices_sorted", "=", "np", ".", "argsort", "(", "dmat_shortened", ")", "indices", "=", "submatrix", "(", "indices", ",", "indices_sorted", ",", "self", ".", "_n_neighbors", ")", "dists", "=", "submatrix", "(", "dmat_shortened", ",", "indices_sorted", ",", "self", ".", "_n_neighbors", ")", "else", ":", "init", "=", "initialise_search", "(", "self", ".", "_rp_forest", ",", "self", ".", "_raw_data", ",", "X", ",", "int", "(", "self", ".", "_n_neighbors", "*", "self", ".", "transform_queue_size", ")", ",", "self", ".", "_random_init", ",", "self", ".", "_tree_init", ",", "rng_state", ",", ")", "result", "=", "self", ".", "_search", "(", "self", ".", "_raw_data", ",", "self", ".", "_search_graph", ".", "indptr", ",", "self", ".", "_search_graph", ".", "indices", ",", "init", ",", "X", ",", ")", "indices", ",", "dists", "=", "deheap_sort", "(", "result", ")", "indices", "=", "indices", "[", ":", ",", ":", "self", ".", "_n_neighbors", "]", "dists", "=", "dists", "[", ":", ",", ":", "self", ".", "_n_neighbors", "]", "adjusted_local_connectivity", "=", "max", "(", "0", ",", "self", ".", "local_connectivity", "-", "1.0", ")", "sigmas", ",", "rhos", "=", "smooth_knn_dist", "(", "dists", ",", "self", ".", "_n_neighbors", ",", "local_connectivity", "=", "adjusted_local_connectivity", ")", "rows", ",", "cols", ",", "vals", "=", "compute_membership_strengths", "(", "indices", ",", "dists", ",", "sigmas", ",", "rhos", ")", "graph", "=", "scipy", ".", "sparse", ".", "coo_matrix", "(", "(", "vals", ",", "(", "rows", ",", "cols", ")", ")", ",", "shape", "=", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "_raw_data", ".", "shape", "[", "0", "]", ")", ")", "# This was a very specially constructed graph with constant degree.", "# That lets us do fancy unpacking by reshaping the csr matrix indices", "# and data. Doing so relies on the constant degree assumption!", "csr_graph", "=", "normalize", "(", "graph", ".", "tocsr", "(", ")", ",", "norm", "=", "\"l1\"", ")", "inds", "=", "csr_graph", ".", "indices", ".", "reshape", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "_n_neighbors", ")", "weights", "=", "csr_graph", ".", "data", ".", "reshape", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "_n_neighbors", ")", "embedding", "=", "init_transform", "(", "inds", ",", "weights", ",", "self", ".", "embedding_", ")", "if", "self", ".", "n_epochs", "is", "None", ":", "# For smaller datasets we can use more epochs", "if", "graph", ".", "shape", "[", "0", "]", "<=", "10000", ":", "n_epochs", "=", "100", "else", ":", "n_epochs", "=", "30", "else", ":", "n_epochs", "=", "self", ".", "n_epochs", "//", "3.0", "graph", ".", "data", "[", "graph", ".", "data", "<", "(", "graph", ".", "data", ".", "max", "(", ")", "/", "float", "(", "n_epochs", ")", ")", "]", "=", "0.0", "graph", ".", "eliminate_zeros", "(", ")", "epochs_per_sample", "=", "make_epochs_per_sample", "(", "graph", ".", "data", ",", "n_epochs", ")", "head", "=", "graph", ".", "row", "tail", "=", "graph", ".", "col", "embedding", "=", "optimize_layout", "(", "embedding", ",", "self", ".", "embedding_", ",", "head", ",", "tail", ",", "n_epochs", ",", "graph", ".", "shape", "[", "1", "]", ",", "epochs_per_sample", ",", "self", ".", "_a", ",", "self", ".", "_b", ",", "rng_state", ",", "self", ".", "repulsion_strength", ",", "self", ".", "_initial_alpha", ",", "self", ".", "negative_sample_rate", ",", "verbose", "=", "self", ".", "verbose", ",", ")", "return", "embedding" ]
Transform X into the existing embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) New data to be transformed. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the new data in low-dimensional space.
[ "Transform", "X", "into", "the", "existing", "embedded", "space", "and", "return", "that", "transformed", "output", "." ]
train
https://github.com/lmcinnes/umap/blob/bbb01c03ba49f7bff8f77fd662d00e50d6686c77/umap/umap_.py#L1592-L1712
0.001107
elastic/elasticsearch-dsl-py
examples/alias_migration.py
migrate
def migrate(move_data=True, update_alias=True): """ Upgrade function that creates a new index for the data. Optionally it also can (and by default will) reindex previous copy of the data into the new index (specify ``move_data=False`` to skip this step) and update the alias to point to the latest index (set ``update_alias=False`` to skip). Note that while this function is running the application can still perform any and all searches without any loss of functionality. It should, however, not perform any writes at this time as those might be lost. """ # construct a new index name by appending current timestamp next_index = PATTERN.replace('*', datetime.now().strftime('%Y%m%d%H%M%S%f')) # get the low level connection es = connections.get_connection() # create new index, it will use the settings from the template es.indices.create(index=next_index) if move_data: # move data from current alias to the new index es.reindex( body={"source": {"index": ALIAS}, "dest": {"index": next_index}}, request_timeout=3600 ) # refresh the index to make the changes visible es.indices.refresh(index=next_index) if update_alias: # repoint the alias to point to the newly created index es.indices.update_aliases(body={ 'actions': [ {"remove": {"alias": ALIAS, "index": PATTERN}}, {"add": {"alias": ALIAS, "index": next_index}}, ] })
python
def migrate(move_data=True, update_alias=True): """ Upgrade function that creates a new index for the data. Optionally it also can (and by default will) reindex previous copy of the data into the new index (specify ``move_data=False`` to skip this step) and update the alias to point to the latest index (set ``update_alias=False`` to skip). Note that while this function is running the application can still perform any and all searches without any loss of functionality. It should, however, not perform any writes at this time as those might be lost. """ # construct a new index name by appending current timestamp next_index = PATTERN.replace('*', datetime.now().strftime('%Y%m%d%H%M%S%f')) # get the low level connection es = connections.get_connection() # create new index, it will use the settings from the template es.indices.create(index=next_index) if move_data: # move data from current alias to the new index es.reindex( body={"source": {"index": ALIAS}, "dest": {"index": next_index}}, request_timeout=3600 ) # refresh the index to make the changes visible es.indices.refresh(index=next_index) if update_alias: # repoint the alias to point to the newly created index es.indices.update_aliases(body={ 'actions': [ {"remove": {"alias": ALIAS, "index": PATTERN}}, {"add": {"alias": ALIAS, "index": next_index}}, ] })
[ "def", "migrate", "(", "move_data", "=", "True", ",", "update_alias", "=", "True", ")", ":", "# construct a new index name by appending current timestamp", "next_index", "=", "PATTERN", ".", "replace", "(", "'*'", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y%m%d%H%M%S%f'", ")", ")", "# get the low level connection", "es", "=", "connections", ".", "get_connection", "(", ")", "# create new index, it will use the settings from the template", "es", ".", "indices", ".", "create", "(", "index", "=", "next_index", ")", "if", "move_data", ":", "# move data from current alias to the new index", "es", ".", "reindex", "(", "body", "=", "{", "\"source\"", ":", "{", "\"index\"", ":", "ALIAS", "}", ",", "\"dest\"", ":", "{", "\"index\"", ":", "next_index", "}", "}", ",", "request_timeout", "=", "3600", ")", "# refresh the index to make the changes visible", "es", ".", "indices", ".", "refresh", "(", "index", "=", "next_index", ")", "if", "update_alias", ":", "# repoint the alias to point to the newly created index", "es", ".", "indices", ".", "update_aliases", "(", "body", "=", "{", "'actions'", ":", "[", "{", "\"remove\"", ":", "{", "\"alias\"", ":", "ALIAS", ",", "\"index\"", ":", "PATTERN", "}", "}", ",", "{", "\"add\"", ":", "{", "\"alias\"", ":", "ALIAS", ",", "\"index\"", ":", "next_index", "}", "}", ",", "]", "}", ")" ]
Upgrade function that creates a new index for the data. Optionally it also can (and by default will) reindex previous copy of the data into the new index (specify ``move_data=False`` to skip this step) and update the alias to point to the latest index (set ``update_alias=False`` to skip). Note that while this function is running the application can still perform any and all searches without any loss of functionality. It should, however, not perform any writes at this time as those might be lost.
[ "Upgrade", "function", "that", "creates", "a", "new", "index", "for", "the", "data", ".", "Optionally", "it", "also", "can", "(", "and", "by", "default", "will", ")", "reindex", "previous", "copy", "of", "the", "data", "into", "the", "new", "index", "(", "specify", "move_data", "=", "False", "to", "skip", "this", "step", ")", "and", "update", "the", "alias", "to", "point", "to", "the", "latest", "index", "(", "set", "update_alias", "=", "False", "to", "skip", ")", "." ]
train
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/examples/alias_migration.py#L70-L106
0.001946
pgmpy/pgmpy
pgmpy/readwrite/XMLBeliefNetwork.py
XBNWriter.set_variables
def set_variables(self, data): """ Set variables for the network. Parameters ---------- data: dict dict for variable in the form of example as shown. Examples -------- >>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter >>> writer = XBNWriter() >>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495', ... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer', ... 'STATES': ['Present', 'Absent']} ... 'b': {'TYPE': 'discrete', 'XPOS': '11290', ... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase', ... 'STATES': ['Present', 'Absent']}}) """ variables = etree.SubElement(self.bnmodel, "VARIABLES") for var in sorted(data): variable = etree.SubElement(variables, 'VAR', attrib={'NAME': var, 'TYPE': data[var]['TYPE'], 'XPOS': data[var]['XPOS'], 'YPOS': data[var]['YPOS']}) etree.SubElement(variable, 'DESCRIPTION', attrib={'DESCRIPTION': data[var]['DESCRIPTION']}) for state in data[var]['STATES']: etree.SubElement(variable, 'STATENAME').text = state
python
def set_variables(self, data): """ Set variables for the network. Parameters ---------- data: dict dict for variable in the form of example as shown. Examples -------- >>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter >>> writer = XBNWriter() >>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495', ... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer', ... 'STATES': ['Present', 'Absent']} ... 'b': {'TYPE': 'discrete', 'XPOS': '11290', ... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase', ... 'STATES': ['Present', 'Absent']}}) """ variables = etree.SubElement(self.bnmodel, "VARIABLES") for var in sorted(data): variable = etree.SubElement(variables, 'VAR', attrib={'NAME': var, 'TYPE': data[var]['TYPE'], 'XPOS': data[var]['XPOS'], 'YPOS': data[var]['YPOS']}) etree.SubElement(variable, 'DESCRIPTION', attrib={'DESCRIPTION': data[var]['DESCRIPTION']}) for state in data[var]['STATES']: etree.SubElement(variable, 'STATENAME').text = state
[ "def", "set_variables", "(", "self", ",", "data", ")", ":", "variables", "=", "etree", ".", "SubElement", "(", "self", ".", "bnmodel", ",", "\"VARIABLES\"", ")", "for", "var", "in", "sorted", "(", "data", ")", ":", "variable", "=", "etree", ".", "SubElement", "(", "variables", ",", "'VAR'", ",", "attrib", "=", "{", "'NAME'", ":", "var", ",", "'TYPE'", ":", "data", "[", "var", "]", "[", "'TYPE'", "]", ",", "'XPOS'", ":", "data", "[", "var", "]", "[", "'XPOS'", "]", ",", "'YPOS'", ":", "data", "[", "var", "]", "[", "'YPOS'", "]", "}", ")", "etree", ".", "SubElement", "(", "variable", ",", "'DESCRIPTION'", ",", "attrib", "=", "{", "'DESCRIPTION'", ":", "data", "[", "var", "]", "[", "'DESCRIPTION'", "]", "}", ")", "for", "state", "in", "data", "[", "var", "]", "[", "'STATES'", "]", ":", "etree", ".", "SubElement", "(", "variable", ",", "'STATENAME'", ")", ".", "text", "=", "state" ]
Set variables for the network. Parameters ---------- data: dict dict for variable in the form of example as shown. Examples -------- >>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter >>> writer = XBNWriter() >>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495', ... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer', ... 'STATES': ['Present', 'Absent']} ... 'b': {'TYPE': 'discrete', 'XPOS': '11290', ... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase', ... 'STATES': ['Present', 'Absent']}})
[ "Set", "variables", "for", "the", "network", "." ]
train
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/XMLBeliefNetwork.py#L340-L366
0.005025
pandas-dev/pandas
pandas/core/internals/blocks.py
Block.replace
def replace(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True): """replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """ inplace = validate_bool_kwarg(inplace, 'inplace') original_to_replace = to_replace # try to replace, if we raise an error, convert to ObjectBlock and # retry try: values, to_replace = self._try_coerce_args(self.values, to_replace) mask = missing.mask_missing(values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False blocks = self.putmask(mask, value, inplace=inplace) if convert: blocks = [b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks] return blocks except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise # try again with a compatible block block = self.astype(object) return block.replace(to_replace=original_to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert)
python
def replace(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True): """replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """ inplace = validate_bool_kwarg(inplace, 'inplace') original_to_replace = to_replace # try to replace, if we raise an error, convert to ObjectBlock and # retry try: values, to_replace = self._try_coerce_args(self.values, to_replace) mask = missing.mask_missing(values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False blocks = self.putmask(mask, value, inplace=inplace) if convert: blocks = [b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks] return blocks except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise # try again with a compatible block block = self.astype(object) return block.replace(to_replace=original_to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert)
[ "def", "replace", "(", "self", ",", "to_replace", ",", "value", ",", "inplace", "=", "False", ",", "filter", "=", "None", ",", "regex", "=", "False", ",", "convert", "=", "True", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "original_to_replace", "=", "to_replace", "# try to replace, if we raise an error, convert to ObjectBlock and", "# retry", "try", ":", "values", ",", "to_replace", "=", "self", ".", "_try_coerce_args", "(", "self", ".", "values", ",", "to_replace", ")", "mask", "=", "missing", ".", "mask_missing", "(", "values", ",", "to_replace", ")", "if", "filter", "is", "not", "None", ":", "filtered_out", "=", "~", "self", ".", "mgr_locs", ".", "isin", "(", "filter", ")", "mask", "[", "filtered_out", ".", "nonzero", "(", ")", "[", "0", "]", "]", "=", "False", "blocks", "=", "self", ".", "putmask", "(", "mask", ",", "value", ",", "inplace", "=", "inplace", ")", "if", "convert", ":", "blocks", "=", "[", "b", ".", "convert", "(", "by_item", "=", "True", ",", "numeric", "=", "False", ",", "copy", "=", "not", "inplace", ")", "for", "b", "in", "blocks", "]", "return", "blocks", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# GH 22083, TypeError or ValueError occurred within error handling", "# causes infinite loop. Cast and retry only if not objectblock.", "if", "is_object_dtype", "(", "self", ")", ":", "raise", "# try again with a compatible block", "block", "=", "self", ".", "astype", "(", "object", ")", "return", "block", ".", "replace", "(", "to_replace", "=", "original_to_replace", ",", "value", "=", "value", ",", "inplace", "=", "inplace", ",", "filter", "=", "filter", ",", "regex", "=", "regex", ",", "convert", "=", "convert", ")" ]
replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility.
[ "replace", "the", "to_replace", "value", "with", "value", "possible", "to", "create", "new", "blocks", "here", "this", "is", "just", "a", "call", "to", "putmask", ".", "regex", "is", "not", "used", "here", ".", "It", "is", "used", "in", "ObjectBlocks", ".", "It", "is", "here", "for", "API", "compatibility", "." ]
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L731-L769
0.001674
CLARIAH/grlc
src/swagger.py
build_spec
def build_spec(user, repo, sha=None, prov=None, extraMetadata=[]): """Build grlc specification for the given github user / repo.""" loader = grlc.utils.getLoader(user, repo, sha=sha, prov=prov) files = loader.fetchFiles() raw_repo_uri = loader.getRawRepoUri() # Fetch all .rq files items = [] allowed_ext = ["rq", "sparql", "json", "tpf"] for c in files: glogger.debug('>>>>>>>>>>>>>>>>>>>>>>>>>c_name: {}'.format(c['name'])) extension = c['name'].split('.')[-1] if extension in allowed_ext: call_name = c['name'].split('.')[0] # Retrieve extra metadata from the query decorators query_text = loader.getTextFor(c) item = None if extension == "json": query_text = json.loads(query_text) if extension in ["rq", "sparql", "json"]: glogger.debug("===================================================================") glogger.debug("Processing SPARQL query: {}".format(c['name'])) glogger.debug("===================================================================") item = process_sparql_query_text(query_text, loader, call_name, extraMetadata) elif "tpf" == extension: glogger.debug("===================================================================") glogger.debug("Processing TPF query: {}".format(c['name'])) glogger.debug("===================================================================") item = process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata) else: glogger.info("Ignoring unsupported source call name: {}".format(c['name'])) if item: items.append(item) return items
python
def build_spec(user, repo, sha=None, prov=None, extraMetadata=[]): """Build grlc specification for the given github user / repo.""" loader = grlc.utils.getLoader(user, repo, sha=sha, prov=prov) files = loader.fetchFiles() raw_repo_uri = loader.getRawRepoUri() # Fetch all .rq files items = [] allowed_ext = ["rq", "sparql", "json", "tpf"] for c in files: glogger.debug('>>>>>>>>>>>>>>>>>>>>>>>>>c_name: {}'.format(c['name'])) extension = c['name'].split('.')[-1] if extension in allowed_ext: call_name = c['name'].split('.')[0] # Retrieve extra metadata from the query decorators query_text = loader.getTextFor(c) item = None if extension == "json": query_text = json.loads(query_text) if extension in ["rq", "sparql", "json"]: glogger.debug("===================================================================") glogger.debug("Processing SPARQL query: {}".format(c['name'])) glogger.debug("===================================================================") item = process_sparql_query_text(query_text, loader, call_name, extraMetadata) elif "tpf" == extension: glogger.debug("===================================================================") glogger.debug("Processing TPF query: {}".format(c['name'])) glogger.debug("===================================================================") item = process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata) else: glogger.info("Ignoring unsupported source call name: {}".format(c['name'])) if item: items.append(item) return items
[ "def", "build_spec", "(", "user", ",", "repo", ",", "sha", "=", "None", ",", "prov", "=", "None", ",", "extraMetadata", "=", "[", "]", ")", ":", "loader", "=", "grlc", ".", "utils", ".", "getLoader", "(", "user", ",", "repo", ",", "sha", "=", "sha", ",", "prov", "=", "prov", ")", "files", "=", "loader", ".", "fetchFiles", "(", ")", "raw_repo_uri", "=", "loader", ".", "getRawRepoUri", "(", ")", "# Fetch all .rq files", "items", "=", "[", "]", "allowed_ext", "=", "[", "\"rq\"", ",", "\"sparql\"", ",", "\"json\"", ",", "\"tpf\"", "]", "for", "c", "in", "files", ":", "glogger", ".", "debug", "(", "'>>>>>>>>>>>>>>>>>>>>>>>>>c_name: {}'", ".", "format", "(", "c", "[", "'name'", "]", ")", ")", "extension", "=", "c", "[", "'name'", "]", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "extension", "in", "allowed_ext", ":", "call_name", "=", "c", "[", "'name'", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "# Retrieve extra metadata from the query decorators", "query_text", "=", "loader", ".", "getTextFor", "(", "c", ")", "item", "=", "None", "if", "extension", "==", "\"json\"", ":", "query_text", "=", "json", ".", "loads", "(", "query_text", ")", "if", "extension", "in", "[", "\"rq\"", ",", "\"sparql\"", ",", "\"json\"", "]", ":", "glogger", ".", "debug", "(", "\"===================================================================\"", ")", "glogger", ".", "debug", "(", "\"Processing SPARQL query: {}\"", ".", "format", "(", "c", "[", "'name'", "]", ")", ")", "glogger", ".", "debug", "(", "\"===================================================================\"", ")", "item", "=", "process_sparql_query_text", "(", "query_text", ",", "loader", ",", "call_name", ",", "extraMetadata", ")", "elif", "\"tpf\"", "==", "extension", ":", "glogger", ".", "debug", "(", "\"===================================================================\"", ")", "glogger", ".", "debug", "(", "\"Processing TPF query: {}\"", ".", "format", "(", "c", "[", "'name'", "]", ")", ")", "glogger", ".", "debug", "(", "\"===================================================================\"", ")", "item", "=", "process_tpf_query_text", "(", "query_text", ",", "raw_repo_uri", ",", "call_name", ",", "extraMetadata", ")", "else", ":", "glogger", ".", "info", "(", "\"Ignoring unsupported source call name: {}\"", ".", "format", "(", "c", "[", "'name'", "]", ")", ")", "if", "item", ":", "items", ".", "append", "(", "item", ")", "return", "items" ]
Build grlc specification for the given github user / repo.
[ "Build", "grlc", "specification", "for", "the", "given", "github", "user", "/", "repo", "." ]
train
https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/swagger.py#L107-L146
0.004324
Esri/ArcREST
src/arcrest/manageorg/_parameters.py
InvitationList.addUser
def addUser(self, username, password, firstname, lastname, email, role): """adds a user to the invitation list""" self._invites.append({ "username":username, "password":password, "firstname":firstname, "lastname":lastname, "fullname":"%s %s" % (firstname, lastname), "email":email, "role":role })
python
def addUser(self, username, password, firstname, lastname, email, role): """adds a user to the invitation list""" self._invites.append({ "username":username, "password":password, "firstname":firstname, "lastname":lastname, "fullname":"%s %s" % (firstname, lastname), "email":email, "role":role })
[ "def", "addUser", "(", "self", ",", "username", ",", "password", ",", "firstname", ",", "lastname", ",", "email", ",", "role", ")", ":", "self", ".", "_invites", ".", "append", "(", "{", "\"username\"", ":", "username", ",", "\"password\"", ":", "password", ",", "\"firstname\"", ":", "firstname", ",", "\"lastname\"", ":", "lastname", ",", "\"fullname\"", ":", "\"%s %s\"", "%", "(", "firstname", ",", "lastname", ")", ",", "\"email\"", ":", "email", ",", "\"role\"", ":", "role", "}", ")" ]
adds a user to the invitation list
[ "adds", "a", "user", "to", "the", "invitation", "list" ]
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_parameters.py#L17-L29
0.025229
ktbyers/netmiko
netmiko/fortinet/fortinet_ssh.py
FortinetSSH.disable_paging
def disable_paging(self, delay_factor=1): """Disable paging is only available with specific roles so it may fail.""" check_command = "get system status | grep Virtual" output = self.send_command_timing(check_command) self.allow_disable_global = True self.vdoms = False self._output_mode = "more" if "Virtual domain configuration: enable" in output: self.vdoms = True vdom_additional_command = "config global" output = self.send_command_timing(vdom_additional_command, delay_factor=2) if "Command fail" in output: self.allow_disable_global = False self.remote_conn.close() self.establish_connection(width=100, height=1000) new_output = "" if self.allow_disable_global: self._retrieve_output_mode() disable_paging_commands = [ "config system console", "set output standard", "end", ] # There is an extra 'end' required if in multi-vdoms are enabled if self.vdoms: disable_paging_commands.append("end") outputlist = [ self.send_command_timing(command, delay_factor=2) for command in disable_paging_commands ] # Should test output is valid new_output = self.RETURN.join(outputlist) return output + new_output
python
def disable_paging(self, delay_factor=1): """Disable paging is only available with specific roles so it may fail.""" check_command = "get system status | grep Virtual" output = self.send_command_timing(check_command) self.allow_disable_global = True self.vdoms = False self._output_mode = "more" if "Virtual domain configuration: enable" in output: self.vdoms = True vdom_additional_command = "config global" output = self.send_command_timing(vdom_additional_command, delay_factor=2) if "Command fail" in output: self.allow_disable_global = False self.remote_conn.close() self.establish_connection(width=100, height=1000) new_output = "" if self.allow_disable_global: self._retrieve_output_mode() disable_paging_commands = [ "config system console", "set output standard", "end", ] # There is an extra 'end' required if in multi-vdoms are enabled if self.vdoms: disable_paging_commands.append("end") outputlist = [ self.send_command_timing(command, delay_factor=2) for command in disable_paging_commands ] # Should test output is valid new_output = self.RETURN.join(outputlist) return output + new_output
[ "def", "disable_paging", "(", "self", ",", "delay_factor", "=", "1", ")", ":", "check_command", "=", "\"get system status | grep Virtual\"", "output", "=", "self", ".", "send_command_timing", "(", "check_command", ")", "self", ".", "allow_disable_global", "=", "True", "self", ".", "vdoms", "=", "False", "self", ".", "_output_mode", "=", "\"more\"", "if", "\"Virtual domain configuration: enable\"", "in", "output", ":", "self", ".", "vdoms", "=", "True", "vdom_additional_command", "=", "\"config global\"", "output", "=", "self", ".", "send_command_timing", "(", "vdom_additional_command", ",", "delay_factor", "=", "2", ")", "if", "\"Command fail\"", "in", "output", ":", "self", ".", "allow_disable_global", "=", "False", "self", ".", "remote_conn", ".", "close", "(", ")", "self", ".", "establish_connection", "(", "width", "=", "100", ",", "height", "=", "1000", ")", "new_output", "=", "\"\"", "if", "self", ".", "allow_disable_global", ":", "self", ".", "_retrieve_output_mode", "(", ")", "disable_paging_commands", "=", "[", "\"config system console\"", ",", "\"set output standard\"", ",", "\"end\"", ",", "]", "# There is an extra 'end' required if in multi-vdoms are enabled", "if", "self", ".", "vdoms", ":", "disable_paging_commands", ".", "append", "(", "\"end\"", ")", "outputlist", "=", "[", "self", ".", "send_command_timing", "(", "command", ",", "delay_factor", "=", "2", ")", "for", "command", "in", "disable_paging_commands", "]", "# Should test output is valid", "new_output", "=", "self", ".", "RETURN", ".", "join", "(", "outputlist", ")", "return", "output", "+", "new_output" ]
Disable paging is only available with specific roles so it may fail.
[ "Disable", "paging", "is", "only", "available", "with", "specific", "roles", "so", "it", "may", "fail", "." ]
train
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/fortinet/fortinet_ssh.py#L27-L62
0.00269
eddieantonio/perfection
perfection/getty.py
arrange_rows
def arrange_rows(row_queue, t): """ Takes a priority queue as generated by place_items_in_square(). Arranges the items from its conceptual square to one list. Returns both the resultant vector, plus the displacement vector, to be used in the final output hash function. >>> rows = [(2, 1, [(0, 1), (1, 5)]), (3, 3, [(1, 7)])] >>> result, displacements = arrange_rows(rows, 4) >>> result (1, 5, 7) >>> displacements (None, 0, None, 1) >>> rows = [(1, 1, [(0, 1), (2, 7)]), (2, 2, [(1, 5)])] >>> result, displacements = arrange_rows(rows, 3) >>> result (1, 5, 7) >>> displacements (None, 0, 0) """ # Create a set of all of the unoccupied columns. max_columns = t ** 2 cols = ((x, True) for x in range(max_columns)) unoccupied_columns = collections.OrderedDict(cols) # Create the resultant and displacement vectors. result = [None] * max_columns displacements = [None] * t while row_queue: # Get the next row to place. _inverse_length, y, row = heapq.heappop(row_queue) offset = find_first_fit(unoccupied_columns, row, max_columns) # Calculate the offset of the first item. first_item_x = row[0][0] displacements[y] = offset for x, item in row: actual_x = x + offset result[actual_x] = item del unoccupied_columns[actual_x] return tuple(trim_nones_from_right(result)), tuple(displacements)
python
def arrange_rows(row_queue, t): """ Takes a priority queue as generated by place_items_in_square(). Arranges the items from its conceptual square to one list. Returns both the resultant vector, plus the displacement vector, to be used in the final output hash function. >>> rows = [(2, 1, [(0, 1), (1, 5)]), (3, 3, [(1, 7)])] >>> result, displacements = arrange_rows(rows, 4) >>> result (1, 5, 7) >>> displacements (None, 0, None, 1) >>> rows = [(1, 1, [(0, 1), (2, 7)]), (2, 2, [(1, 5)])] >>> result, displacements = arrange_rows(rows, 3) >>> result (1, 5, 7) >>> displacements (None, 0, 0) """ # Create a set of all of the unoccupied columns. max_columns = t ** 2 cols = ((x, True) for x in range(max_columns)) unoccupied_columns = collections.OrderedDict(cols) # Create the resultant and displacement vectors. result = [None] * max_columns displacements = [None] * t while row_queue: # Get the next row to place. _inverse_length, y, row = heapq.heappop(row_queue) offset = find_first_fit(unoccupied_columns, row, max_columns) # Calculate the offset of the first item. first_item_x = row[0][0] displacements[y] = offset for x, item in row: actual_x = x + offset result[actual_x] = item del unoccupied_columns[actual_x] return tuple(trim_nones_from_right(result)), tuple(displacements)
[ "def", "arrange_rows", "(", "row_queue", ",", "t", ")", ":", "# Create a set of all of the unoccupied columns.", "max_columns", "=", "t", "**", "2", "cols", "=", "(", "(", "x", ",", "True", ")", "for", "x", "in", "range", "(", "max_columns", ")", ")", "unoccupied_columns", "=", "collections", ".", "OrderedDict", "(", "cols", ")", "# Create the resultant and displacement vectors.", "result", "=", "[", "None", "]", "*", "max_columns", "displacements", "=", "[", "None", "]", "*", "t", "while", "row_queue", ":", "# Get the next row to place.", "_inverse_length", ",", "y", ",", "row", "=", "heapq", ".", "heappop", "(", "row_queue", ")", "offset", "=", "find_first_fit", "(", "unoccupied_columns", ",", "row", ",", "max_columns", ")", "# Calculate the offset of the first item.", "first_item_x", "=", "row", "[", "0", "]", "[", "0", "]", "displacements", "[", "y", "]", "=", "offset", "for", "x", ",", "item", "in", "row", ":", "actual_x", "=", "x", "+", "offset", "result", "[", "actual_x", "]", "=", "item", "del", "unoccupied_columns", "[", "actual_x", "]", "return", "tuple", "(", "trim_nones_from_right", "(", "result", ")", ")", ",", "tuple", "(", "displacements", ")" ]
Takes a priority queue as generated by place_items_in_square(). Arranges the items from its conceptual square to one list. Returns both the resultant vector, plus the displacement vector, to be used in the final output hash function. >>> rows = [(2, 1, [(0, 1), (1, 5)]), (3, 3, [(1, 7)])] >>> result, displacements = arrange_rows(rows, 4) >>> result (1, 5, 7) >>> displacements (None, 0, None, 1) >>> rows = [(1, 1, [(0, 1), (2, 7)]), (2, 2, [(1, 5)])] >>> result, displacements = arrange_rows(rows, 3) >>> result (1, 5, 7) >>> displacements (None, 0, 0)
[ "Takes", "a", "priority", "queue", "as", "generated", "by", "place_items_in_square", "()", ".", "Arranges", "the", "items", "from", "its", "conceptual", "square", "to", "one", "list", ".", "Returns", "both", "the", "resultant", "vector", "plus", "the", "displacement", "vector", "to", "be", "used", "in", "the", "final", "output", "hash", "function", "." ]
train
https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L157-L202
0.000669
totalgood/pugnlp
src/pugnlp/util.py
hist_from_values_list
def hist_from_values_list(values_list, fillers=(None,), normalize=False, cumulative=False, to_str=False, sep=',', min_bin=None, max_bin=None): """Compute an emprical histogram, PMF or CDF in a list of lists or a csv string Only works for discrete (integer) values (doesn't bin real values). `fillers`: list or tuple of values to ignore in computing the histogram >>> hist_from_values_list([1,1,2,1,1,1,2,3,2,4,4,5,7,7,9]) # doctest: +NORMALIZE_WHITESPACE [(1, 5), (2, 3), (3, 1), (4, 2), (5, 1), (6, 0), (7, 2), (8, 0), (9, 1)] >>> hist_from_values_list([(1,9),(1,8),(2,),(1,),(1,4),(2,5),(3,3),(5,0),(2,2)]) # doctest: +NORMALIZE_WHITESPACE [[(1, 4), (2, 3), (3, 1), (4, 0), (5, 1)], [(0, 1), (1, 0), ... (6, 0), (7, 0), (8, 1), (9, 1)]] >>> hist_from_values_list(transposed_matrix([(8,),(1,3,5),(2,),(3,4,5,8)])) # doctest: +NORMALIZE_WHITESPACE [[(8, 1)], [(1, 1), (2, 0), (3, 1), (4, 0), (5, 1)], [(2, 1)], [(3, 1), (4, 1), (5, 1), (6, 0), (7, 0), (8, 1)]] """ value_types = tuple([int, float] + [type(filler) for filler in fillers]) if all(isinstance(value, value_types) for value in values_list): # ignore all fillers and convert all floats to ints when doing counting counters = [Counter(int(value) for value in values_list if isinstance(value, (int, float)))] elif all(len(row) == 1 for row in values_list) and all(isinstance(row[0], value_types) for row in values_list): return hist_from_values_list([values[0] for values in values_list], fillers=fillers, normalize=normalize, cumulative=cumulative, to_str=to_str, sep=sep, min_bin=min_bin, max_bin=max_bin) else: # assume it's a row-wise table (list of rows) return [ hist_from_values_list(col, fillers=fillers, normalize=normalize, cumulative=cumulative, to_str=to_str, sep=sep, min_bin=min_bin, max_bin=max_bin) for col in transposed_matrix(values_list) ] if not values_list: return [] intkeys_list = [[c for c in counts if (isinstance(c, int) or (isinstance(c, float) and int(c) == c))] for counts in counters] try: min_bin = int(min_bin) except (IndexError, ValueError, AttributeError, TypeError): min_bin = min(min(intkeys) for intkeys in intkeys_list) try: max_bin = int(max_bin) except (IndexError, ValueError, AttributeError, TypeError): max_bin = max(max(intkeys) for intkeys in intkeys_list) # FIXME: this looks slow and hazardous (like it's ignore min/max bin): # TODO: reuse min(intkeys) min_bin = max(min_bin, min((min(intkeys) if intkeys else 0) for intkeys in intkeys_list)) max_bin = min(max_bin, max((max(intkeys) if intkeys else 0) for intkeys in intkeys_list)) histograms = [] for intkeys, counts in zip(intkeys_list, counters): histograms += [OrderedDict()] if not intkeys: continue if normalize: N = sum(counts[c] for c in intkeys) for c in intkeys: counts[c] = float(counts[c]) / N if cumulative: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) + histograms[-1].get(i - 1, 0) else: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) if not histograms: histograms = [OrderedDict()] # fill in the zero counts between the integer bins of the histogram aligned_histograms = [] for i in range(min_bin, max_bin + 1): aligned_histograms += [tuple([i] + [hist.get(i, 0) for hist in histograms])] if to_str: # FIXME: add header row return str_from_table(aligned_histograms, sep=sep, max_rows=365 * 2 + 1) return aligned_histograms
python
def hist_from_values_list(values_list, fillers=(None,), normalize=False, cumulative=False, to_str=False, sep=',', min_bin=None, max_bin=None): """Compute an emprical histogram, PMF or CDF in a list of lists or a csv string Only works for discrete (integer) values (doesn't bin real values). `fillers`: list or tuple of values to ignore in computing the histogram >>> hist_from_values_list([1,1,2,1,1,1,2,3,2,4,4,5,7,7,9]) # doctest: +NORMALIZE_WHITESPACE [(1, 5), (2, 3), (3, 1), (4, 2), (5, 1), (6, 0), (7, 2), (8, 0), (9, 1)] >>> hist_from_values_list([(1,9),(1,8),(2,),(1,),(1,4),(2,5),(3,3),(5,0),(2,2)]) # doctest: +NORMALIZE_WHITESPACE [[(1, 4), (2, 3), (3, 1), (4, 0), (5, 1)], [(0, 1), (1, 0), ... (6, 0), (7, 0), (8, 1), (9, 1)]] >>> hist_from_values_list(transposed_matrix([(8,),(1,3,5),(2,),(3,4,5,8)])) # doctest: +NORMALIZE_WHITESPACE [[(8, 1)], [(1, 1), (2, 0), (3, 1), (4, 0), (5, 1)], [(2, 1)], [(3, 1), (4, 1), (5, 1), (6, 0), (7, 0), (8, 1)]] """ value_types = tuple([int, float] + [type(filler) for filler in fillers]) if all(isinstance(value, value_types) for value in values_list): # ignore all fillers and convert all floats to ints when doing counting counters = [Counter(int(value) for value in values_list if isinstance(value, (int, float)))] elif all(len(row) == 1 for row in values_list) and all(isinstance(row[0], value_types) for row in values_list): return hist_from_values_list([values[0] for values in values_list], fillers=fillers, normalize=normalize, cumulative=cumulative, to_str=to_str, sep=sep, min_bin=min_bin, max_bin=max_bin) else: # assume it's a row-wise table (list of rows) return [ hist_from_values_list(col, fillers=fillers, normalize=normalize, cumulative=cumulative, to_str=to_str, sep=sep, min_bin=min_bin, max_bin=max_bin) for col in transposed_matrix(values_list) ] if not values_list: return [] intkeys_list = [[c for c in counts if (isinstance(c, int) or (isinstance(c, float) and int(c) == c))] for counts in counters] try: min_bin = int(min_bin) except (IndexError, ValueError, AttributeError, TypeError): min_bin = min(min(intkeys) for intkeys in intkeys_list) try: max_bin = int(max_bin) except (IndexError, ValueError, AttributeError, TypeError): max_bin = max(max(intkeys) for intkeys in intkeys_list) # FIXME: this looks slow and hazardous (like it's ignore min/max bin): # TODO: reuse min(intkeys) min_bin = max(min_bin, min((min(intkeys) if intkeys else 0) for intkeys in intkeys_list)) max_bin = min(max_bin, max((max(intkeys) if intkeys else 0) for intkeys in intkeys_list)) histograms = [] for intkeys, counts in zip(intkeys_list, counters): histograms += [OrderedDict()] if not intkeys: continue if normalize: N = sum(counts[c] for c in intkeys) for c in intkeys: counts[c] = float(counts[c]) / N if cumulative: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) + histograms[-1].get(i - 1, 0) else: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) if not histograms: histograms = [OrderedDict()] # fill in the zero counts between the integer bins of the histogram aligned_histograms = [] for i in range(min_bin, max_bin + 1): aligned_histograms += [tuple([i] + [hist.get(i, 0) for hist in histograms])] if to_str: # FIXME: add header row return str_from_table(aligned_histograms, sep=sep, max_rows=365 * 2 + 1) return aligned_histograms
[ "def", "hist_from_values_list", "(", "values_list", ",", "fillers", "=", "(", "None", ",", ")", ",", "normalize", "=", "False", ",", "cumulative", "=", "False", ",", "to_str", "=", "False", ",", "sep", "=", "','", ",", "min_bin", "=", "None", ",", "max_bin", "=", "None", ")", ":", "value_types", "=", "tuple", "(", "[", "int", ",", "float", "]", "+", "[", "type", "(", "filler", ")", "for", "filler", "in", "fillers", "]", ")", "if", "all", "(", "isinstance", "(", "value", ",", "value_types", ")", "for", "value", "in", "values_list", ")", ":", "# ignore all fillers and convert all floats to ints when doing counting", "counters", "=", "[", "Counter", "(", "int", "(", "value", ")", "for", "value", "in", "values_list", "if", "isinstance", "(", "value", ",", "(", "int", ",", "float", ")", ")", ")", "]", "elif", "all", "(", "len", "(", "row", ")", "==", "1", "for", "row", "in", "values_list", ")", "and", "all", "(", "isinstance", "(", "row", "[", "0", "]", ",", "value_types", ")", "for", "row", "in", "values_list", ")", ":", "return", "hist_from_values_list", "(", "[", "values", "[", "0", "]", "for", "values", "in", "values_list", "]", ",", "fillers", "=", "fillers", ",", "normalize", "=", "normalize", ",", "cumulative", "=", "cumulative", ",", "to_str", "=", "to_str", ",", "sep", "=", "sep", ",", "min_bin", "=", "min_bin", ",", "max_bin", "=", "max_bin", ")", "else", ":", "# assume it's a row-wise table (list of rows)", "return", "[", "hist_from_values_list", "(", "col", ",", "fillers", "=", "fillers", ",", "normalize", "=", "normalize", ",", "cumulative", "=", "cumulative", ",", "to_str", "=", "to_str", ",", "sep", "=", "sep", ",", "min_bin", "=", "min_bin", ",", "max_bin", "=", "max_bin", ")", "for", "col", "in", "transposed_matrix", "(", "values_list", ")", "]", "if", "not", "values_list", ":", "return", "[", "]", "intkeys_list", "=", "[", "[", "c", "for", "c", "in", "counts", "if", "(", "isinstance", "(", "c", ",", "int", ")", "or", "(", "isinstance", "(", "c", ",", "float", ")", "and", "int", "(", "c", ")", "==", "c", ")", ")", "]", "for", "counts", "in", "counters", "]", "try", ":", "min_bin", "=", "int", "(", "min_bin", ")", "except", "(", "IndexError", ",", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "min_bin", "=", "min", "(", "min", "(", "intkeys", ")", "for", "intkeys", "in", "intkeys_list", ")", "try", ":", "max_bin", "=", "int", "(", "max_bin", ")", "except", "(", "IndexError", ",", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "max_bin", "=", "max", "(", "max", "(", "intkeys", ")", "for", "intkeys", "in", "intkeys_list", ")", "# FIXME: this looks slow and hazardous (like it's ignore min/max bin):", "# TODO: reuse min(intkeys)", "min_bin", "=", "max", "(", "min_bin", ",", "min", "(", "(", "min", "(", "intkeys", ")", "if", "intkeys", "else", "0", ")", "for", "intkeys", "in", "intkeys_list", ")", ")", "max_bin", "=", "min", "(", "max_bin", ",", "max", "(", "(", "max", "(", "intkeys", ")", "if", "intkeys", "else", "0", ")", "for", "intkeys", "in", "intkeys_list", ")", ")", "histograms", "=", "[", "]", "for", "intkeys", ",", "counts", "in", "zip", "(", "intkeys_list", ",", "counters", ")", ":", "histograms", "+=", "[", "OrderedDict", "(", ")", "]", "if", "not", "intkeys", ":", "continue", "if", "normalize", ":", "N", "=", "sum", "(", "counts", "[", "c", "]", "for", "c", "in", "intkeys", ")", "for", "c", "in", "intkeys", ":", "counts", "[", "c", "]", "=", "float", "(", "counts", "[", "c", "]", ")", "/", "N", "if", "cumulative", ":", "for", "i", "in", "range", "(", "min_bin", ",", "max_bin", "+", "1", ")", ":", "histograms", "[", "-", "1", "]", "[", "i", "]", "=", "counts", ".", "get", "(", "i", ",", "0", ")", "+", "histograms", "[", "-", "1", "]", ".", "get", "(", "i", "-", "1", ",", "0", ")", "else", ":", "for", "i", "in", "range", "(", "min_bin", ",", "max_bin", "+", "1", ")", ":", "histograms", "[", "-", "1", "]", "[", "i", "]", "=", "counts", ".", "get", "(", "i", ",", "0", ")", "if", "not", "histograms", ":", "histograms", "=", "[", "OrderedDict", "(", ")", "]", "# fill in the zero counts between the integer bins of the histogram", "aligned_histograms", "=", "[", "]", "for", "i", "in", "range", "(", "min_bin", ",", "max_bin", "+", "1", ")", ":", "aligned_histograms", "+=", "[", "tuple", "(", "[", "i", "]", "+", "[", "hist", ".", "get", "(", "i", ",", "0", ")", "for", "hist", "in", "histograms", "]", ")", "]", "if", "to_str", ":", "# FIXME: add header row", "return", "str_from_table", "(", "aligned_histograms", ",", "sep", "=", "sep", ",", "max_rows", "=", "365", "*", "2", "+", "1", ")", "return", "aligned_histograms" ]
Compute an emprical histogram, PMF or CDF in a list of lists or a csv string Only works for discrete (integer) values (doesn't bin real values). `fillers`: list or tuple of values to ignore in computing the histogram >>> hist_from_values_list([1,1,2,1,1,1,2,3,2,4,4,5,7,7,9]) # doctest: +NORMALIZE_WHITESPACE [(1, 5), (2, 3), (3, 1), (4, 2), (5, 1), (6, 0), (7, 2), (8, 0), (9, 1)] >>> hist_from_values_list([(1,9),(1,8),(2,),(1,),(1,4),(2,5),(3,3),(5,0),(2,2)]) # doctest: +NORMALIZE_WHITESPACE [[(1, 4), (2, 3), (3, 1), (4, 0), (5, 1)], [(0, 1), (1, 0), ... (6, 0), (7, 0), (8, 1), (9, 1)]] >>> hist_from_values_list(transposed_matrix([(8,),(1,3,5),(2,),(3,4,5,8)])) # doctest: +NORMALIZE_WHITESPACE [[(8, 1)], [(1, 1), (2, 0), (3, 1), (4, 0), (5, 1)], [(2, 1)], [(3, 1), (4, 1), (5, 1), (6, 0), (7, 0), (8, 1)]]
[ "Compute", "an", "emprical", "histogram", "PMF", "or", "CDF", "in", "a", "list", "of", "lists", "or", "a", "csv", "string" ]
train
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1014-L1091
0.005083
nicodv/kmodes
kmodes/util/__init__.py
get_max_value_key
def get_max_value_key(dic): """Gets the key for the maximum value in a dict.""" v = np.array(list(dic.values())) k = np.array(list(dic.keys())) maxima = np.where(v == np.max(v))[0] if len(maxima) == 1: return k[maxima[0]] else: # In order to be consistent, always selects the minimum key # (guaranteed to be unique) when there are multiple maximum values. return k[maxima[np.argmin(k[maxima])]]
python
def get_max_value_key(dic): """Gets the key for the maximum value in a dict.""" v = np.array(list(dic.values())) k = np.array(list(dic.keys())) maxima = np.where(v == np.max(v))[0] if len(maxima) == 1: return k[maxima[0]] else: # In order to be consistent, always selects the minimum key # (guaranteed to be unique) when there are multiple maximum values. return k[maxima[np.argmin(k[maxima])]]
[ "def", "get_max_value_key", "(", "dic", ")", ":", "v", "=", "np", ".", "array", "(", "list", "(", "dic", ".", "values", "(", ")", ")", ")", "k", "=", "np", ".", "array", "(", "list", "(", "dic", ".", "keys", "(", ")", ")", ")", "maxima", "=", "np", ".", "where", "(", "v", "==", "np", ".", "max", "(", "v", ")", ")", "[", "0", "]", "if", "len", "(", "maxima", ")", "==", "1", ":", "return", "k", "[", "maxima", "[", "0", "]", "]", "else", ":", "# In order to be consistent, always selects the minimum key", "# (guaranteed to be unique) when there are multiple maximum values.", "return", "k", "[", "maxima", "[", "np", ".", "argmin", "(", "k", "[", "maxima", "]", ")", "]", "]" ]
Gets the key for the maximum value in a dict.
[ "Gets", "the", "key", "for", "the", "maximum", "value", "in", "a", "dict", "." ]
train
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/util/__init__.py#L12-L23
0.002217
MacHu-GWU/sqlalchemy_mate-project
sqlalchemy_mate/credential.py
EngineCreator.create_mysql_oursql
def create_mysql_oursql(self, **kwargs): """ :rtype: Engine """ return self._ce( self._ccs(self.DialectAndDriver.mysql_oursql), **kwargs )
python
def create_mysql_oursql(self, **kwargs): """ :rtype: Engine """ return self._ce( self._ccs(self.DialectAndDriver.mysql_oursql), **kwargs )
[ "def", "create_mysql_oursql", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_ce", "(", "self", ".", "_ccs", "(", "self", ".", "DialectAndDriver", ".", "mysql_oursql", ")", ",", "*", "*", "kwargs", ")" ]
:rtype: Engine
[ ":", "rtype", ":", "Engine" ]
train
https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L386-L392
0.010526
pyca/pynacl
src/nacl/bindings/crypto_core.py
crypto_core_ed25519_is_valid_point
def crypto_core_ed25519_is_valid_point(p): """ Check if ``p`` represents a point on the edwards25519 curve, in canonical form, on the main subgroup, and that the point doesn't have a small order. :param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence representing a point on the edwards25519 curve :type p: bytes :return: point validity :rtype: bool """ ensure(isinstance(p, bytes) and len(p) == crypto_core_ed25519_BYTES, 'Point must be a crypto_core_ed25519_BYTES long bytes sequence', raising=exc.TypeError) rc = lib.crypto_core_ed25519_is_valid_point(p) return rc == 1
python
def crypto_core_ed25519_is_valid_point(p): """ Check if ``p`` represents a point on the edwards25519 curve, in canonical form, on the main subgroup, and that the point doesn't have a small order. :param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence representing a point on the edwards25519 curve :type p: bytes :return: point validity :rtype: bool """ ensure(isinstance(p, bytes) and len(p) == crypto_core_ed25519_BYTES, 'Point must be a crypto_core_ed25519_BYTES long bytes sequence', raising=exc.TypeError) rc = lib.crypto_core_ed25519_is_valid_point(p) return rc == 1
[ "def", "crypto_core_ed25519_is_valid_point", "(", "p", ")", ":", "ensure", "(", "isinstance", "(", "p", ",", "bytes", ")", "and", "len", "(", "p", ")", "==", "crypto_core_ed25519_BYTES", ",", "'Point must be a crypto_core_ed25519_BYTES long bytes sequence'", ",", "raising", "=", "exc", ".", "TypeError", ")", "rc", "=", "lib", ".", "crypto_core_ed25519_is_valid_point", "(", "p", ")", "return", "rc", "==", "1" ]
Check if ``p`` represents a point on the edwards25519 curve, in canonical form, on the main subgroup, and that the point doesn't have a small order. :param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence representing a point on the edwards25519 curve :type p: bytes :return: point validity :rtype: bool
[ "Check", "if", "p", "represents", "a", "point", "on", "the", "edwards25519", "curve", "in", "canonical", "form", "on", "the", "main", "subgroup", "and", "that", "the", "point", "doesn", "t", "have", "a", "small", "order", "." ]
train
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_core.py#L25-L42
0.001493
moonso/loqusdb
loqusdb/plugins/mongo/variant.py
VariantMixin.add_variant
def add_variant(self, variant): """Add a variant to the variant collection If the variant exists we update the count else we insert a new variant object. Args: variant (dict): A variant dictionary """ LOG.debug("Upserting variant: {0}".format(variant.get('_id'))) update = self._get_update(variant) message = self.db.variant.update_one( {'_id': variant['_id']}, update, upsert=True ) if message.modified_count == 1: LOG.debug("Variant %s was updated", variant.get('_id')) else: LOG.debug("Variant was added to database for first time") return
python
def add_variant(self, variant): """Add a variant to the variant collection If the variant exists we update the count else we insert a new variant object. Args: variant (dict): A variant dictionary """ LOG.debug("Upserting variant: {0}".format(variant.get('_id'))) update = self._get_update(variant) message = self.db.variant.update_one( {'_id': variant['_id']}, update, upsert=True ) if message.modified_count == 1: LOG.debug("Variant %s was updated", variant.get('_id')) else: LOG.debug("Variant was added to database for first time") return
[ "def", "add_variant", "(", "self", ",", "variant", ")", ":", "LOG", ".", "debug", "(", "\"Upserting variant: {0}\"", ".", "format", "(", "variant", ".", "get", "(", "'_id'", ")", ")", ")", "update", "=", "self", ".", "_get_update", "(", "variant", ")", "message", "=", "self", ".", "db", ".", "variant", ".", "update_one", "(", "{", "'_id'", ":", "variant", "[", "'_id'", "]", "}", ",", "update", ",", "upsert", "=", "True", ")", "if", "message", ".", "modified_count", "==", "1", ":", "LOG", ".", "debug", "(", "\"Variant %s was updated\"", ",", "variant", ".", "get", "(", "'_id'", ")", ")", "else", ":", "LOG", ".", "debug", "(", "\"Variant was added to database for first time\"", ")", "return" ]
Add a variant to the variant collection If the variant exists we update the count else we insert a new variant object. Args: variant (dict): A variant dictionary
[ "Add", "a", "variant", "to", "the", "variant", "collection", "If", "the", "variant", "exists", "we", "update", "the", "count", "else", "we", "insert", "a", "new", "variant", "object", ".", "Args", ":", "variant", "(", "dict", ")", ":", "A", "variant", "dictionary" ]
train
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L46-L68
0.01054
nameko/nameko
nameko/exceptions.py
get_module_path
def get_module_path(exc_type): """ Return the dotted module path of `exc_type`, including the class name. e.g.:: >>> get_module_path(MethodNotFound) >>> "nameko.exceptions.MethodNotFound" """ module = inspect.getmodule(exc_type) return "{}.{}".format(module.__name__, exc_type.__name__)
python
def get_module_path(exc_type): """ Return the dotted module path of `exc_type`, including the class name. e.g.:: >>> get_module_path(MethodNotFound) >>> "nameko.exceptions.MethodNotFound" """ module = inspect.getmodule(exc_type) return "{}.{}".format(module.__name__, exc_type.__name__)
[ "def", "get_module_path", "(", "exc_type", ")", ":", "module", "=", "inspect", ".", "getmodule", "(", "exc_type", ")", "return", "\"{}.{}\"", ".", "format", "(", "module", ".", "__name__", ",", "exc_type", ".", "__name__", ")" ]
Return the dotted module path of `exc_type`, including the class name. e.g.:: >>> get_module_path(MethodNotFound) >>> "nameko.exceptions.MethodNotFound"
[ "Return", "the", "dotted", "module", "path", "of", "exc_type", "including", "the", "class", "name", "." ]
train
https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/exceptions.py#L38-L48
0.003077
PyCQA/pydocstyle
src/pydocstyle/violations.py
ErrorRegistry.get_error_codes
def get_error_codes(cls) -> Iterable[str]: """Yield all registered codes.""" for group in cls.groups: for error in group.errors: yield error.code
python
def get_error_codes(cls) -> Iterable[str]: """Yield all registered codes.""" for group in cls.groups: for error in group.errors: yield error.code
[ "def", "get_error_codes", "(", "cls", ")", "->", "Iterable", "[", "str", "]", ":", "for", "group", "in", "cls", ".", "groups", ":", "for", "error", "in", "group", ".", "errors", ":", "yield", "error", ".", "code" ]
Yield all registered codes.
[ "Yield", "all", "registered", "codes", "." ]
train
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/violations.py#L146-L150
0.010582
IBMStreams/pypi.streamsx
streamsx/rest.py
StreamsConnection.resource_url
def resource_url(self): """str: Root URL for IBM Streams REST API""" self._resource_url = self._resource_url or st.get_rest_api() return self._resource_url
python
def resource_url(self): """str: Root URL for IBM Streams REST API""" self._resource_url = self._resource_url or st.get_rest_api() return self._resource_url
[ "def", "resource_url", "(", "self", ")", ":", "self", ".", "_resource_url", "=", "self", ".", "_resource_url", "or", "st", ".", "get_rest_api", "(", ")", "return", "self", ".", "_resource_url" ]
str: Root URL for IBM Streams REST API
[ "str", ":", "Root", "URL", "for", "IBM", "Streams", "REST", "API" ]
train
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest.py#L139-L142
0.011173
UCL-INGI/INGInious
inginious/agent/docker_agent/__init__.py
DockerAgent.handle_student_job_closing
async def handle_student_job_closing(self, container_id, retval): """ Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the associated grading container """ try: self._logger.debug("Closing student %s", container_id) try: job_id, parent_container_id, socket_id, write_stream = self._student_containers_running[container_id] del self._student_containers_running[container_id] except asyncio.CancelledError: raise except: self._logger.warning("Student container %s that has finished(p1) was not launched by this agent", str(container_id), exc_info=True) return # Delete remaining student containers if job_id in self._student_containers_for_job: # if it does not exists, then the parent container has closed self._student_containers_for_job[job_id].remove(container_id) killed = await self._timeout_watcher.was_killed(container_id) if container_id in self._containers_killed: killed = self._containers_killed[container_id] del self._containers_killed[container_id] if killed == "timeout": retval = 253 elif killed == "overflow": retval = 252 try: await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": retval, "socket_id": socket_id}) except asyncio.CancelledError: raise except: pass # parent container closed # Do not forget to remove the container try: await self._docker.remove_container(container_id) except asyncio.CancelledError: raise except: pass # ignore except asyncio.CancelledError: raise except: self._logger.exception("Exception in handle_student_job_closing")
python
async def handle_student_job_closing(self, container_id, retval): """ Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the associated grading container """ try: self._logger.debug("Closing student %s", container_id) try: job_id, parent_container_id, socket_id, write_stream = self._student_containers_running[container_id] del self._student_containers_running[container_id] except asyncio.CancelledError: raise except: self._logger.warning("Student container %s that has finished(p1) was not launched by this agent", str(container_id), exc_info=True) return # Delete remaining student containers if job_id in self._student_containers_for_job: # if it does not exists, then the parent container has closed self._student_containers_for_job[job_id].remove(container_id) killed = await self._timeout_watcher.was_killed(container_id) if container_id in self._containers_killed: killed = self._containers_killed[container_id] del self._containers_killed[container_id] if killed == "timeout": retval = 253 elif killed == "overflow": retval = 252 try: await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": retval, "socket_id": socket_id}) except asyncio.CancelledError: raise except: pass # parent container closed # Do not forget to remove the container try: await self._docker.remove_container(container_id) except asyncio.CancelledError: raise except: pass # ignore except asyncio.CancelledError: raise except: self._logger.exception("Exception in handle_student_job_closing")
[ "async", "def", "handle_student_job_closing", "(", "self", ",", "container_id", ",", "retval", ")", ":", "try", ":", "self", ".", "_logger", ".", "debug", "(", "\"Closing student %s\"", ",", "container_id", ")", "try", ":", "job_id", ",", "parent_container_id", ",", "socket_id", ",", "write_stream", "=", "self", ".", "_student_containers_running", "[", "container_id", "]", "del", "self", ".", "_student_containers_running", "[", "container_id", "]", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "self", ".", "_logger", ".", "warning", "(", "\"Student container %s that has finished(p1) was not launched by this agent\"", ",", "str", "(", "container_id", ")", ",", "exc_info", "=", "True", ")", "return", "# Delete remaining student containers", "if", "job_id", "in", "self", ".", "_student_containers_for_job", ":", "# if it does not exists, then the parent container has closed", "self", ".", "_student_containers_for_job", "[", "job_id", "]", ".", "remove", "(", "container_id", ")", "killed", "=", "await", "self", ".", "_timeout_watcher", ".", "was_killed", "(", "container_id", ")", "if", "container_id", "in", "self", ".", "_containers_killed", ":", "killed", "=", "self", ".", "_containers_killed", "[", "container_id", "]", "del", "self", ".", "_containers_killed", "[", "container_id", "]", "if", "killed", "==", "\"timeout\"", ":", "retval", "=", "253", "elif", "killed", "==", "\"overflow\"", ":", "retval", "=", "252", "try", ":", "await", "self", ".", "_write_to_container_stdin", "(", "write_stream", ",", "{", "\"type\"", ":", "\"run_student_retval\"", ",", "\"retval\"", ":", "retval", ",", "\"socket_id\"", ":", "socket_id", "}", ")", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "pass", "# parent container closed", "# Do not forget to remove the container", "try", ":", "await", "self", ".", "_docker", ".", "remove_container", "(", "container_id", ")", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "pass", "# ignore", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "self", ".", "_logger", ".", "exception", "(", "\"Exception in handle_student_job_closing\"", ")" ]
Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the associated grading container
[ "Handle", "a", "closing", "student", "container", ".", "Do", "some", "cleaning", "verify", "memory", "limits", "timeouts", "...", "and", "returns", "data", "to", "the", "associated", "grading", "container" ]
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/agent/docker_agent/__init__.py#L452-L499
0.005194
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/cluster.py
Cluster.update
def update(self): """Update this cluster. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_update_cluster] :end-before: [END bigtable_update_cluster] .. note:: Updates the ``serve_nodes``. If you'd like to change them before updating, reset the values via .. code:: python cluster.serve_nodes = 8 before calling :meth:`update`. :type location: :str:``CreationOnly`` :param location: The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form ``projects/<project>/locations/<zone>``. :type serve_nodes: :int :param serve_nodes: The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance. :rtype: :class:`Operation` :returns: The long-running operation corresponding to the update operation. """ client = self._instance._client # We are passing `None` for second argument location. # Location is set only at the time of creation of a cluster # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( self.name, self.serve_nodes, None )
python
def update(self): """Update this cluster. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_update_cluster] :end-before: [END bigtable_update_cluster] .. note:: Updates the ``serve_nodes``. If you'd like to change them before updating, reset the values via .. code:: python cluster.serve_nodes = 8 before calling :meth:`update`. :type location: :str:``CreationOnly`` :param location: The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form ``projects/<project>/locations/<zone>``. :type serve_nodes: :int :param serve_nodes: The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance. :rtype: :class:`Operation` :returns: The long-running operation corresponding to the update operation. """ client = self._instance._client # We are passing `None` for second argument location. # Location is set only at the time of creation of a cluster # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( self.name, self.serve_nodes, None )
[ "def", "update", "(", "self", ")", ":", "client", "=", "self", ".", "_instance", ".", "_client", "# We are passing `None` for second argument location.", "# Location is set only at the time of creation of a cluster", "# and can not be changed after cluster has been created.", "return", "client", ".", "instance_admin_client", ".", "update_cluster", "(", "self", ".", "name", ",", "self", ".", "serve_nodes", ",", "None", ")" ]
Update this cluster. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_update_cluster] :end-before: [END bigtable_update_cluster] .. note:: Updates the ``serve_nodes``. If you'd like to change them before updating, reset the values via .. code:: python cluster.serve_nodes = 8 before calling :meth:`update`. :type location: :str:``CreationOnly`` :param location: The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form ``projects/<project>/locations/<zone>``. :type serve_nodes: :int :param serve_nodes: The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance. :rtype: :class:`Operation` :returns: The long-running operation corresponding to the update operation.
[ "Update", "this", "cluster", "." ]
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/cluster.py#L269-L311
0.001265
Ouranosinc/xclim
xclim/checks.py
valid_daily_mean_discharge
def valid_daily_mean_discharge(comp): r"""Decorator to check that a computation runs on valid discharge data.""" @wraps(comp) def func(q, **kwds): check_valid_discharge(q) return comp(q, **kwds) return func
python
def valid_daily_mean_discharge(comp): r"""Decorator to check that a computation runs on valid discharge data.""" @wraps(comp) def func(q, **kwds): check_valid_discharge(q) return comp(q, **kwds) return func
[ "def", "valid_daily_mean_discharge", "(", "comp", ")", ":", "@", "wraps", "(", "comp", ")", "def", "func", "(", "q", ",", "*", "*", "kwds", ")", ":", "check_valid_discharge", "(", "q", ")", "return", "comp", "(", "q", ",", "*", "*", "kwds", ")", "return", "func" ]
r"""Decorator to check that a computation runs on valid discharge data.
[ "r", "Decorator", "to", "check", "that", "a", "computation", "runs", "on", "valid", "discharge", "data", "." ]
train
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/checks.py#L121-L129
0.004167
MacHu-GWU/single_file_module-project
sfm/ziplib.py
decompress
def decompress(obj, return_type="bytes"): """ De-compress it to it's original. :param obj: Compressed object, could be bytes or str. :param return_type: if bytes, then return bytes; if str, then use base64.b64decode; if obj, then use pickle.loads return an object. """ if isinstance(obj, binary_type): b = zlib.decompress(obj) elif isinstance(obj, string_types): b = zlib.decompress(base64.b64decode(obj.encode("utf-8"))) else: raise TypeError("input cannot be anything other than str and bytes!") if return_type == "bytes": return b elif return_type == "str": return b.decode("utf-8") elif return_type == "obj": return pickle.loads(b) else: raise ValueError( "'return_type' has to be one of 'bytes', 'str' or 'obj'!")
python
def decompress(obj, return_type="bytes"): """ De-compress it to it's original. :param obj: Compressed object, could be bytes or str. :param return_type: if bytes, then return bytes; if str, then use base64.b64decode; if obj, then use pickle.loads return an object. """ if isinstance(obj, binary_type): b = zlib.decompress(obj) elif isinstance(obj, string_types): b = zlib.decompress(base64.b64decode(obj.encode("utf-8"))) else: raise TypeError("input cannot be anything other than str and bytes!") if return_type == "bytes": return b elif return_type == "str": return b.decode("utf-8") elif return_type == "obj": return pickle.loads(b) else: raise ValueError( "'return_type' has to be one of 'bytes', 'str' or 'obj'!")
[ "def", "decompress", "(", "obj", ",", "return_type", "=", "\"bytes\"", ")", ":", "if", "isinstance", "(", "obj", ",", "binary_type", ")", ":", "b", "=", "zlib", ".", "decompress", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "string_types", ")", ":", "b", "=", "zlib", ".", "decompress", "(", "base64", ".", "b64decode", "(", "obj", ".", "encode", "(", "\"utf-8\"", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "\"input cannot be anything other than str and bytes!\"", ")", "if", "return_type", "==", "\"bytes\"", ":", "return", "b", "elif", "return_type", "==", "\"str\"", ":", "return", "b", ".", "decode", "(", "\"utf-8\"", ")", "elif", "return_type", "==", "\"obj\"", ":", "return", "pickle", ".", "loads", "(", "b", ")", "else", ":", "raise", "ValueError", "(", "\"'return_type' has to be one of 'bytes', 'str' or 'obj'!\"", ")" ]
De-compress it to it's original. :param obj: Compressed object, could be bytes or str. :param return_type: if bytes, then return bytes; if str, then use base64.b64decode; if obj, then use pickle.loads return an object.
[ "De", "-", "compress", "it", "to", "it", "s", "original", "." ]
train
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L76-L99
0.001185
etcher-be/emiz
emiz/avwx/remarks.py
pressure_tendency
def pressure_tendency(code: str, unit: str = 'mb') -> str: """ Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing """ width, precision = int(code[2:4]), code[4] return ('3-hour pressure difference: +/- ' f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}')
python
def pressure_tendency(code: str, unit: str = 'mb') -> str: """ Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing """ width, precision = int(code[2:4]), code[4] return ('3-hour pressure difference: +/- ' f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}')
[ "def", "pressure_tendency", "(", "code", ":", "str", ",", "unit", ":", "str", "=", "'mb'", ")", "->", "str", ":", "width", ",", "precision", "=", "int", "(", "code", "[", "2", ":", "4", "]", ")", ",", "code", "[", "4", "]", "return", "(", "'3-hour pressure difference: +/- '", "f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}'", ")" ]
Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing
[ "Translates", "a", "5", "-", "digit", "pressure", "outlook", "code" ]
train
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/remarks.py#L31-L39
0.00289
dask/dask-kubernetes
dask_kubernetes/objects.py
_set_k8s_attribute
def _set_k8s_attribute(obj, attribute, value): """ Set a specific value on a kubernetes object's attribute obj an object from Kubernetes Python API client attribute Should be a Kubernetes API style attribute (with camelCase) value Can be anything (string, list, dict, k8s objects) that can be accepted by the k8s python client """ current_value = None attribute_name = None # All k8s python client objects have an 'attribute_map' property # which has as keys python style attribute names (api_client) # and as values the kubernetes JSON API style attribute names # (apiClient). We want to allow users to use the JSON API style attribute # names only. for python_attribute, json_attribute in obj.attribute_map.items(): if json_attribute == attribute: attribute_name = python_attribute break else: raise ValueError('Attribute must be one of {}'.format(obj.attribute_map.values())) if hasattr(obj, attribute_name): current_value = getattr(obj, attribute_name) if current_value is not None: # This will ensure that current_value is something JSONable, # so a dict, list, or scalar current_value = SERIALIZATION_API_CLIENT.sanitize_for_serialization( current_value ) if isinstance(current_value, dict): # Deep merge our dictionaries! setattr(obj, attribute_name, merge_dictionaries(current_value, value)) elif isinstance(current_value, list): # Just append lists setattr(obj, attribute_name, current_value + value) else: # Replace everything else setattr(obj, attribute_name, value)
python
def _set_k8s_attribute(obj, attribute, value): """ Set a specific value on a kubernetes object's attribute obj an object from Kubernetes Python API client attribute Should be a Kubernetes API style attribute (with camelCase) value Can be anything (string, list, dict, k8s objects) that can be accepted by the k8s python client """ current_value = None attribute_name = None # All k8s python client objects have an 'attribute_map' property # which has as keys python style attribute names (api_client) # and as values the kubernetes JSON API style attribute names # (apiClient). We want to allow users to use the JSON API style attribute # names only. for python_attribute, json_attribute in obj.attribute_map.items(): if json_attribute == attribute: attribute_name = python_attribute break else: raise ValueError('Attribute must be one of {}'.format(obj.attribute_map.values())) if hasattr(obj, attribute_name): current_value = getattr(obj, attribute_name) if current_value is not None: # This will ensure that current_value is something JSONable, # so a dict, list, or scalar current_value = SERIALIZATION_API_CLIENT.sanitize_for_serialization( current_value ) if isinstance(current_value, dict): # Deep merge our dictionaries! setattr(obj, attribute_name, merge_dictionaries(current_value, value)) elif isinstance(current_value, list): # Just append lists setattr(obj, attribute_name, current_value + value) else: # Replace everything else setattr(obj, attribute_name, value)
[ "def", "_set_k8s_attribute", "(", "obj", ",", "attribute", ",", "value", ")", ":", "current_value", "=", "None", "attribute_name", "=", "None", "# All k8s python client objects have an 'attribute_map' property", "# which has as keys python style attribute names (api_client)", "# and as values the kubernetes JSON API style attribute names", "# (apiClient). We want to allow users to use the JSON API style attribute", "# names only.", "for", "python_attribute", ",", "json_attribute", "in", "obj", ".", "attribute_map", ".", "items", "(", ")", ":", "if", "json_attribute", "==", "attribute", ":", "attribute_name", "=", "python_attribute", "break", "else", ":", "raise", "ValueError", "(", "'Attribute must be one of {}'", ".", "format", "(", "obj", ".", "attribute_map", ".", "values", "(", ")", ")", ")", "if", "hasattr", "(", "obj", ",", "attribute_name", ")", ":", "current_value", "=", "getattr", "(", "obj", ",", "attribute_name", ")", "if", "current_value", "is", "not", "None", ":", "# This will ensure that current_value is something JSONable,", "# so a dict, list, or scalar", "current_value", "=", "SERIALIZATION_API_CLIENT", ".", "sanitize_for_serialization", "(", "current_value", ")", "if", "isinstance", "(", "current_value", ",", "dict", ")", ":", "# Deep merge our dictionaries!", "setattr", "(", "obj", ",", "attribute_name", ",", "merge_dictionaries", "(", "current_value", ",", "value", ")", ")", "elif", "isinstance", "(", "current_value", ",", "list", ")", ":", "# Just append lists", "setattr", "(", "obj", ",", "attribute_name", ",", "current_value", "+", "value", ")", "else", ":", "# Replace everything else", "setattr", "(", "obj", ",", "attribute_name", ",", "value", ")" ]
Set a specific value on a kubernetes object's attribute obj an object from Kubernetes Python API client attribute Should be a Kubernetes API style attribute (with camelCase) value Can be anything (string, list, dict, k8s objects) that can be accepted by the k8s python client
[ "Set", "a", "specific", "value", "on", "a", "kubernetes", "object", "s", "attribute" ]
train
https://github.com/dask/dask-kubernetes/blob/8a4883ecd902460b446bb1f43ed97efe398a135e/dask_kubernetes/objects.py#L20-L64
0.001154
dpgaspar/Flask-AppBuilder
flask_appbuilder/views.py
RestCRUDView.show_item_dict
def show_item_dict(self, item): """Returns a json-able dict for show""" d = {} for col in self.show_columns: v = getattr(item, col) if not isinstance(v, (int, float, string_types)): v = str(v) d[col] = v return d
python
def show_item_dict(self, item): """Returns a json-able dict for show""" d = {} for col in self.show_columns: v = getattr(item, col) if not isinstance(v, (int, float, string_types)): v = str(v) d[col] = v return d
[ "def", "show_item_dict", "(", "self", ",", "item", ")", ":", "d", "=", "{", "}", "for", "col", "in", "self", ".", "show_columns", ":", "v", "=", "getattr", "(", "item", ",", "col", ")", "if", "not", "isinstance", "(", "v", ",", "(", "int", ",", "float", ",", "string_types", ")", ")", ":", "v", "=", "str", "(", "v", ")", "d", "[", "col", "]", "=", "v", "return", "d" ]
Returns a json-able dict for show
[ "Returns", "a", "json", "-", "able", "dict", "for", "show" ]
train
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/views.py#L268-L276
0.006757
ming060/robotframework-uiautomatorlibrary
uiautomatorlibrary/Mobile.py
Mobile.scroll_backward_vertically
def scroll_backward_vertically(self, steps=10, *args, **selectors): """ Perform scroll backward (vertically)action on the object which has *selectors* attributes. Return whether the object can be Scroll or not. See `Scroll Forward Vertically` for more details. """ return self.device(**selectors).scroll.vert.backward(steps=steps)
python
def scroll_backward_vertically(self, steps=10, *args, **selectors): """ Perform scroll backward (vertically)action on the object which has *selectors* attributes. Return whether the object can be Scroll or not. See `Scroll Forward Vertically` for more details. """ return self.device(**selectors).scroll.vert.backward(steps=steps)
[ "def", "scroll_backward_vertically", "(", "self", ",", "steps", "=", "10", ",", "*", "args", ",", "*", "*", "selectors", ")", ":", "return", "self", ".", "device", "(", "*", "*", "selectors", ")", ".", "scroll", ".", "vert", ".", "backward", "(", "steps", "=", "steps", ")" ]
Perform scroll backward (vertically)action on the object which has *selectors* attributes. Return whether the object can be Scroll or not. See `Scroll Forward Vertically` for more details.
[ "Perform", "scroll", "backward", "(", "vertically", ")", "action", "on", "the", "object", "which", "has", "*", "selectors", "*", "attributes", "." ]
train
https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L500-L508
0.007895
Alignak-monitoring/alignak
alignak/external_command.py
ExternalCommandManager.send_custom_host_notification
def send_custom_host_notification(self, host, options, author, comment): """DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment> :param host: host to send notif for :type host: alignak.object.host.Host :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None """ logger.warning("The external command 'SEND_CUSTOM_HOST_NOTIFICATION' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SEND_CUSTOM_HOST_NOTIFICATION: this command is not implemented!'))
python
def send_custom_host_notification(self, host, options, author, comment): """DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment> :param host: host to send notif for :type host: alignak.object.host.Host :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None """ logger.warning("The external command 'SEND_CUSTOM_HOST_NOTIFICATION' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SEND_CUSTOM_HOST_NOTIFICATION: this command is not implemented!'))
[ "def", "send_custom_host_notification", "(", "self", ",", "host", ",", "options", ",", "author", ",", "comment", ")", ":", "logger", ".", "warning", "(", "\"The external command 'SEND_CUSTOM_HOST_NOTIFICATION' \"", "\"is not currently implemented in Alignak. If you really need it, \"", "\"request for its implementation in the project repository: \"", "\"https://github.com/Alignak-monitoring/alignak\"", ")", "self", ".", "send_an_element", "(", "make_monitoring_log", "(", "'warning'", ",", "'SEND_CUSTOM_HOST_NOTIFICATION: this command is not implemented!'", ")", ")" ]
DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment> :param host: host to send notif for :type host: alignak.object.host.Host :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None
[ "DOES", "NOTHING", "(", "Should", "send", "a", "custom", "notification", ")", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3793-L3814
0.004695
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/connection_plugins/fireball.py
Connection.exec_command
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): ''' run a command on the remote host ''' vvv("EXEC COMMAND %s" % cmd) if self.runner.sudo and sudoable: raise errors.AnsibleError("fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).") data = dict( mode='command', cmd=cmd, tmp_path=tmp_path, executable=executable, ) data = utils.jsonify(data) data = utils.encrypt(self.key, data) self.socket.send(data) response = self.socket.recv() response = utils.decrypt(self.key, response) response = utils.parse_json(response) return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
python
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): ''' run a command on the remote host ''' vvv("EXEC COMMAND %s" % cmd) if self.runner.sudo and sudoable: raise errors.AnsibleError("fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).") data = dict( mode='command', cmd=cmd, tmp_path=tmp_path, executable=executable, ) data = utils.jsonify(data) data = utils.encrypt(self.key, data) self.socket.send(data) response = self.socket.recv() response = utils.decrypt(self.key, response) response = utils.parse_json(response) return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
[ "def", "exec_command", "(", "self", ",", "cmd", ",", "tmp_path", ",", "sudo_user", ",", "sudoable", "=", "False", ",", "executable", "=", "'/bin/sh'", ")", ":", "vvv", "(", "\"EXEC COMMAND %s\"", "%", "cmd", ")", "if", "self", ".", "runner", ".", "sudo", "and", "sudoable", ":", "raise", "errors", ".", "AnsibleError", "(", "\"fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).\"", ")", "data", "=", "dict", "(", "mode", "=", "'command'", ",", "cmd", "=", "cmd", ",", "tmp_path", "=", "tmp_path", ",", "executable", "=", "executable", ",", ")", "data", "=", "utils", ".", "jsonify", "(", "data", ")", "data", "=", "utils", ".", "encrypt", "(", "self", ".", "key", ",", "data", ")", "self", ".", "socket", ".", "send", "(", "data", ")", "response", "=", "self", ".", "socket", ".", "recv", "(", ")", "response", "=", "utils", ".", "decrypt", "(", "self", ".", "key", ",", "response", ")", "response", "=", "utils", ".", "parse_json", "(", "response", ")", "return", "(", "response", ".", "get", "(", "'rc'", ",", "None", ")", ",", "''", ",", "response", ".", "get", "(", "'stdout'", ",", "''", ")", ",", "response", ".", "get", "(", "'stderr'", ",", "''", ")", ")" ]
run a command on the remote host
[ "run", "a", "command", "on", "the", "remote", "host" ]
train
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/connection_plugins/fireball.py#L70-L92
0.010369
chapel-lang/sphinxcontrib-chapeldomain
sphinxcontrib/chapeldomain.py
ChapelDomain._make_module_refnode
def _make_module_refnode(self, builder, fromdocname, name, contnode): """Helper function to generate new xref node based on current environment. """ # Get additional info for modules. docname, synopsis, platform, deprecated = self.data['modules'][name] title = name if synopsis: title += ': ' + synopsis if deprecated: title += _(' (deprecated)') if platform: title += ' (' + platform + ')' return make_refnode(builder, fromdocname, docname, 'module-' + name, contnode, title)
python
def _make_module_refnode(self, builder, fromdocname, name, contnode): """Helper function to generate new xref node based on current environment. """ # Get additional info for modules. docname, synopsis, platform, deprecated = self.data['modules'][name] title = name if synopsis: title += ': ' + synopsis if deprecated: title += _(' (deprecated)') if platform: title += ' (' + platform + ')' return make_refnode(builder, fromdocname, docname, 'module-' + name, contnode, title)
[ "def", "_make_module_refnode", "(", "self", ",", "builder", ",", "fromdocname", ",", "name", ",", "contnode", ")", ":", "# Get additional info for modules.", "docname", ",", "synopsis", ",", "platform", ",", "deprecated", "=", "self", ".", "data", "[", "'modules'", "]", "[", "name", "]", "title", "=", "name", "if", "synopsis", ":", "title", "+=", "': '", "+", "synopsis", "if", "deprecated", ":", "title", "+=", "_", "(", "' (deprecated)'", ")", "if", "platform", ":", "title", "+=", "' ('", "+", "platform", "+", "')'", "return", "make_refnode", "(", "builder", ",", "fromdocname", ",", "docname", ",", "'module-'", "+", "name", ",", "contnode", ",", "title", ")" ]
Helper function to generate new xref node based on current environment.
[ "Helper", "function", "to", "generate", "new", "xref", "node", "based", "on", "current", "environment", "." ]
train
https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L975-L989
0.003226
Esri/ArcREST
src/arcrest/ags/_imageservice.py
ImageService.getSamples
def getSamples(self,geometry,geometryType="esriGeometryPoint", sampleDistance=None,sampleCount=None,mosaicRule=None, pixelSize=None,returnFirstValueOnly=None,interpolation=None, outFields=None): """ The getSamples operation is performed on an image service resource. The getSamples operation is supported by both mosaic dataset and raster dataset image services. The result of this operation includes sample point locations, pixel values, and corresponding spatial resolutions of the source data for a given geometry. When the input geometry is a polyline, envelope, or polygon, sampling is based on sampleCount or sampleDistance; when the input geometry is a point or multipoint, the point or points are used directly. The number of sample locations in the response is based on the sampleDistance or sampleCount parameter and cannot exceed the limit of the image service (the default is 1000, which is an approximate limit). Inputs: geometry - A geometry that defines the location(s) to be sampled. The structure of the geometry is the same as the structure of the JSON geometry objects returned by the ArcGIS REST API. Applicable geometry types are point, multipoint, polyline, polygon, and envelope. When spatialReference is omitted in the input geometry, it will be assumed to be the spatial reference of the image service. geometryType - The type of geometry specified by the geometry parameter. The geometry type can be point, multipoint, polyline, polygon, or envelope. Values: esriGeometryPoint | esriGeometryMultipoint | esriGeometryPolyline | esriGeometryPolygon | esriGeometryEnvelope sampleDistance - The distance interval used to sample points from the provided path. The unit is the same as the input geometry. If neither sampleCount nor sampleDistance is provided, no densification can be done for paths (polylines), and a default sampleCount (100) is used for areas (polygons or envelopes). sampleCount - The approximate number of sample locations from the provided path. If neither sampleCount nor sampleDistance is provided, no densification can be done for paths (polylines), and a default sampleCount (100) is used for areas (polygons or envelopes). mosaicRule - Specifies the mosaic rule defining the image sort order. Additional filtering can be applied to the where clause and FIDs of a mosaic rule. pixelSize - The raster that is visible at the specified pixel size in the mosaic dataset will be used for sampling. If pixelSize is not specified, the service's pixel size is used. The structure of the esri_codephpixelSize parameter is the same as the structure of the point object returned by the ArcGIS REST API. In addition to the JSON structure, you can specify the pixel size with a simple comma-separated syntax. returnFirstValueOnly - Indicates whether to return all values at a point, or return the first non-NoData value based on the current mosaic rule. The default is true. interpolation - This parameter was added at 10.3. The resampling method. Default is nearest neighbor. outFields - This parameter was added at 10.3. The list of fields to be included in the response. This list is a comma-delimited list of field names. You can also specify the wildcard character (*) as the value of this parameter to include all the field values in the results. """ url = self._url + "/getSamples" params = { "f" : "json", "geometry" : geometry, "geometryType": geometryType } if not sampleDistance is None: params["sampleDistance"] = sampleDistance if not sampleCount is None: params["sampleCount"] = sampleCount if not mosaicRule is None: params["mosaicRule"] = mosaicRule if not pixelSize is None: params["pixelSize"] = pixelSize if not returnFirstValueOnly is None: params["returnFirstValueOnly"] = returnFirstValueOnly if not interpolation is None: params["interpolation"] = interpolation if not outFields is None: params["outFields"] = outFields return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
python
def getSamples(self,geometry,geometryType="esriGeometryPoint", sampleDistance=None,sampleCount=None,mosaicRule=None, pixelSize=None,returnFirstValueOnly=None,interpolation=None, outFields=None): """ The getSamples operation is performed on an image service resource. The getSamples operation is supported by both mosaic dataset and raster dataset image services. The result of this operation includes sample point locations, pixel values, and corresponding spatial resolutions of the source data for a given geometry. When the input geometry is a polyline, envelope, or polygon, sampling is based on sampleCount or sampleDistance; when the input geometry is a point or multipoint, the point or points are used directly. The number of sample locations in the response is based on the sampleDistance or sampleCount parameter and cannot exceed the limit of the image service (the default is 1000, which is an approximate limit). Inputs: geometry - A geometry that defines the location(s) to be sampled. The structure of the geometry is the same as the structure of the JSON geometry objects returned by the ArcGIS REST API. Applicable geometry types are point, multipoint, polyline, polygon, and envelope. When spatialReference is omitted in the input geometry, it will be assumed to be the spatial reference of the image service. geometryType - The type of geometry specified by the geometry parameter. The geometry type can be point, multipoint, polyline, polygon, or envelope. Values: esriGeometryPoint | esriGeometryMultipoint | esriGeometryPolyline | esriGeometryPolygon | esriGeometryEnvelope sampleDistance - The distance interval used to sample points from the provided path. The unit is the same as the input geometry. If neither sampleCount nor sampleDistance is provided, no densification can be done for paths (polylines), and a default sampleCount (100) is used for areas (polygons or envelopes). sampleCount - The approximate number of sample locations from the provided path. If neither sampleCount nor sampleDistance is provided, no densification can be done for paths (polylines), and a default sampleCount (100) is used for areas (polygons or envelopes). mosaicRule - Specifies the mosaic rule defining the image sort order. Additional filtering can be applied to the where clause and FIDs of a mosaic rule. pixelSize - The raster that is visible at the specified pixel size in the mosaic dataset will be used for sampling. If pixelSize is not specified, the service's pixel size is used. The structure of the esri_codephpixelSize parameter is the same as the structure of the point object returned by the ArcGIS REST API. In addition to the JSON structure, you can specify the pixel size with a simple comma-separated syntax. returnFirstValueOnly - Indicates whether to return all values at a point, or return the first non-NoData value based on the current mosaic rule. The default is true. interpolation - This parameter was added at 10.3. The resampling method. Default is nearest neighbor. outFields - This parameter was added at 10.3. The list of fields to be included in the response. This list is a comma-delimited list of field names. You can also specify the wildcard character (*) as the value of this parameter to include all the field values in the results. """ url = self._url + "/getSamples" params = { "f" : "json", "geometry" : geometry, "geometryType": geometryType } if not sampleDistance is None: params["sampleDistance"] = sampleDistance if not sampleCount is None: params["sampleCount"] = sampleCount if not mosaicRule is None: params["mosaicRule"] = mosaicRule if not pixelSize is None: params["pixelSize"] = pixelSize if not returnFirstValueOnly is None: params["returnFirstValueOnly"] = returnFirstValueOnly if not interpolation is None: params["interpolation"] = interpolation if not outFields is None: params["outFields"] = outFields return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "getSamples", "(", "self", ",", "geometry", ",", "geometryType", "=", "\"esriGeometryPoint\"", ",", "sampleDistance", "=", "None", ",", "sampleCount", "=", "None", ",", "mosaicRule", "=", "None", ",", "pixelSize", "=", "None", ",", "returnFirstValueOnly", "=", "None", ",", "interpolation", "=", "None", ",", "outFields", "=", "None", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/getSamples\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"geometry\"", ":", "geometry", ",", "\"geometryType\"", ":", "geometryType", "}", "if", "not", "sampleDistance", "is", "None", ":", "params", "[", "\"sampleDistance\"", "]", "=", "sampleDistance", "if", "not", "sampleCount", "is", "None", ":", "params", "[", "\"sampleCount\"", "]", "=", "sampleCount", "if", "not", "mosaicRule", "is", "None", ":", "params", "[", "\"mosaicRule\"", "]", "=", "mosaicRule", "if", "not", "pixelSize", "is", "None", ":", "params", "[", "\"pixelSize\"", "]", "=", "pixelSize", "if", "not", "returnFirstValueOnly", "is", "None", ":", "params", "[", "\"returnFirstValueOnly\"", "]", "=", "returnFirstValueOnly", "if", "not", "interpolation", "is", "None", ":", "params", "[", "\"interpolation\"", "]", "=", "interpolation", "if", "not", "outFields", "is", "None", ":", "params", "[", "\"outFields\"", "]", "=", "outFields", "return", "self", ".", "_get", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
The getSamples operation is performed on an image service resource. The getSamples operation is supported by both mosaic dataset and raster dataset image services. The result of this operation includes sample point locations, pixel values, and corresponding spatial resolutions of the source data for a given geometry. When the input geometry is a polyline, envelope, or polygon, sampling is based on sampleCount or sampleDistance; when the input geometry is a point or multipoint, the point or points are used directly. The number of sample locations in the response is based on the sampleDistance or sampleCount parameter and cannot exceed the limit of the image service (the default is 1000, which is an approximate limit). Inputs: geometry - A geometry that defines the location(s) to be sampled. The structure of the geometry is the same as the structure of the JSON geometry objects returned by the ArcGIS REST API. Applicable geometry types are point, multipoint, polyline, polygon, and envelope. When spatialReference is omitted in the input geometry, it will be assumed to be the spatial reference of the image service. geometryType - The type of geometry specified by the geometry parameter. The geometry type can be point, multipoint, polyline, polygon, or envelope. Values: esriGeometryPoint | esriGeometryMultipoint | esriGeometryPolyline | esriGeometryPolygon | esriGeometryEnvelope sampleDistance - The distance interval used to sample points from the provided path. The unit is the same as the input geometry. If neither sampleCount nor sampleDistance is provided, no densification can be done for paths (polylines), and a default sampleCount (100) is used for areas (polygons or envelopes). sampleCount - The approximate number of sample locations from the provided path. If neither sampleCount nor sampleDistance is provided, no densification can be done for paths (polylines), and a default sampleCount (100) is used for areas (polygons or envelopes). mosaicRule - Specifies the mosaic rule defining the image sort order. Additional filtering can be applied to the where clause and FIDs of a mosaic rule. pixelSize - The raster that is visible at the specified pixel size in the mosaic dataset will be used for sampling. If pixelSize is not specified, the service's pixel size is used. The structure of the esri_codephpixelSize parameter is the same as the structure of the point object returned by the ArcGIS REST API. In addition to the JSON structure, you can specify the pixel size with a simple comma-separated syntax. returnFirstValueOnly - Indicates whether to return all values at a point, or return the first non-NoData value based on the current mosaic rule. The default is true. interpolation - This parameter was added at 10.3. The resampling method. Default is nearest neighbor. outFields - This parameter was added at 10.3. The list of fields to be included in the response. This list is a comma-delimited list of field names. You can also specify the wildcard character (*) as the value of this parameter to include all the field values in the results.
[ "The", "getSamples", "operation", "is", "performed", "on", "an", "image", "service", "resource", ".", "The", "getSamples", "operation", "is", "supported", "by", "both", "mosaic", "dataset", "and", "raster", "dataset", "image", "services", ".", "The", "result", "of", "this", "operation", "includes", "sample", "point", "locations", "pixel", "values", "and", "corresponding", "spatial", "resolutions", "of", "the", "source", "data", "for", "a", "given", "geometry", ".", "When", "the", "input", "geometry", "is", "a", "polyline", "envelope", "or", "polygon", "sampling", "is", "based", "on", "sampleCount", "or", "sampleDistance", ";", "when", "the", "input", "geometry", "is", "a", "point", "or", "multipoint", "the", "point", "or", "points", "are", "used", "directly", ".", "The", "number", "of", "sample", "locations", "in", "the", "response", "is", "based", "on", "the", "sampleDistance", "or", "sampleCount", "parameter", "and", "cannot", "exceed", "the", "limit", "of", "the", "image", "service", "(", "the", "default", "is", "1000", "which", "is", "an", "approximate", "limit", ")", "." ]
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_imageservice.py#L1146-L1240
0.006397
dropbox/stone
stone/frontend/ir_generator.py
IRGenerator._add_imports_to_env
def _add_imports_to_env(self, raw_api): """ Scans raw parser output for import declarations. Checks if the imports are valid, and then creates a reference to the namespace in the environment. Args: raw_api (Tuple[Namespace, List[stone.stone.parser._Element]]): Namespace paired with raw parser output. """ for namespace, desc in raw_api: for item in desc: if isinstance(item, AstImport): if namespace.name == item.target: raise InvalidSpec('Cannot import current namespace.', item.lineno, item.path) if item.target not in self.api.namespaces: raise InvalidSpec( 'Namespace %s is not defined in any spec.' % quote(item.target), item.lineno, item.path) env = self._get_or_create_env(namespace.name) imported_env = self._get_or_create_env(item.target) if namespace.name in imported_env: # Block circular imports. The Python backend can't # easily generate code for circular references. raise InvalidSpec( 'Circular import of namespaces %s and %s ' 'detected.' % (quote(namespace.name), quote(item.target)), item.lineno, item.path) env[item.target] = imported_env
python
def _add_imports_to_env(self, raw_api): """ Scans raw parser output for import declarations. Checks if the imports are valid, and then creates a reference to the namespace in the environment. Args: raw_api (Tuple[Namespace, List[stone.stone.parser._Element]]): Namespace paired with raw parser output. """ for namespace, desc in raw_api: for item in desc: if isinstance(item, AstImport): if namespace.name == item.target: raise InvalidSpec('Cannot import current namespace.', item.lineno, item.path) if item.target not in self.api.namespaces: raise InvalidSpec( 'Namespace %s is not defined in any spec.' % quote(item.target), item.lineno, item.path) env = self._get_or_create_env(namespace.name) imported_env = self._get_or_create_env(item.target) if namespace.name in imported_env: # Block circular imports. The Python backend can't # easily generate code for circular references. raise InvalidSpec( 'Circular import of namespaces %s and %s ' 'detected.' % (quote(namespace.name), quote(item.target)), item.lineno, item.path) env[item.target] = imported_env
[ "def", "_add_imports_to_env", "(", "self", ",", "raw_api", ")", ":", "for", "namespace", ",", "desc", "in", "raw_api", ":", "for", "item", "in", "desc", ":", "if", "isinstance", "(", "item", ",", "AstImport", ")", ":", "if", "namespace", ".", "name", "==", "item", ".", "target", ":", "raise", "InvalidSpec", "(", "'Cannot import current namespace.'", ",", "item", ".", "lineno", ",", "item", ".", "path", ")", "if", "item", ".", "target", "not", "in", "self", ".", "api", ".", "namespaces", ":", "raise", "InvalidSpec", "(", "'Namespace %s is not defined in any spec.'", "%", "quote", "(", "item", ".", "target", ")", ",", "item", ".", "lineno", ",", "item", ".", "path", ")", "env", "=", "self", ".", "_get_or_create_env", "(", "namespace", ".", "name", ")", "imported_env", "=", "self", ".", "_get_or_create_env", "(", "item", ".", "target", ")", "if", "namespace", ".", "name", "in", "imported_env", ":", "# Block circular imports. The Python backend can't", "# easily generate code for circular references.", "raise", "InvalidSpec", "(", "'Circular import of namespaces %s and %s '", "'detected.'", "%", "(", "quote", "(", "namespace", ".", "name", ")", ",", "quote", "(", "item", ".", "target", ")", ")", ",", "item", ".", "lineno", ",", "item", ".", "path", ")", "env", "[", "item", ".", "target", "]", "=", "imported_env" ]
Scans raw parser output for import declarations. Checks if the imports are valid, and then creates a reference to the namespace in the environment. Args: raw_api (Tuple[Namespace, List[stone.stone.parser._Element]]): Namespace paired with raw parser output.
[ "Scans", "raw", "parser", "output", "for", "import", "declarations", ".", "Checks", "if", "the", "imports", "are", "valid", "and", "then", "creates", "a", "reference", "to", "the", "namespace", "in", "the", "environment", "." ]
train
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L422-L453
0.001212
sloria/read_env
read_env.py
parse_env
def parse_env(content): """Parse the content of a .env file (a line-delimited KEY=value format) into a dictionary mapping keys to values. """ values = {} for line in content.splitlines(): lexer = shlex.shlex(line, posix=True) tokens = list(lexer) # parses the assignment statement if len(tokens) < 3: continue name, op = tokens[:2] value = ''.join(tokens[2:]) if op != '=': continue if not _ITEM_RE.match(name): continue value = value.replace(r'\n', '\n') value = value.replace(r'\t', '\t') values[name] = value return values
python
def parse_env(content): """Parse the content of a .env file (a line-delimited KEY=value format) into a dictionary mapping keys to values. """ values = {} for line in content.splitlines(): lexer = shlex.shlex(line, posix=True) tokens = list(lexer) # parses the assignment statement if len(tokens) < 3: continue name, op = tokens[:2] value = ''.join(tokens[2:]) if op != '=': continue if not _ITEM_RE.match(name): continue value = value.replace(r'\n', '\n') value = value.replace(r'\t', '\t') values[name] = value return values
[ "def", "parse_env", "(", "content", ")", ":", "values", "=", "{", "}", "for", "line", "in", "content", ".", "splitlines", "(", ")", ":", "lexer", "=", "shlex", ".", "shlex", "(", "line", ",", "posix", "=", "True", ")", "tokens", "=", "list", "(", "lexer", ")", "# parses the assignment statement", "if", "len", "(", "tokens", ")", "<", "3", ":", "continue", "name", ",", "op", "=", "tokens", "[", ":", "2", "]", "value", "=", "''", ".", "join", "(", "tokens", "[", "2", ":", "]", ")", "if", "op", "!=", "'='", ":", "continue", "if", "not", "_ITEM_RE", ".", "match", "(", "name", ")", ":", "continue", "value", "=", "value", ".", "replace", "(", "r'\\n'", ",", "'\\n'", ")", "value", "=", "value", ".", "replace", "(", "r'\\t'", ",", "'\\t'", ")", "values", "[", "name", "]", "=", "value", "return", "values" ]
Parse the content of a .env file (a line-delimited KEY=value format) into a dictionary mapping keys to values.
[ "Parse", "the", "content", "of", "a", ".", "env", "file", "(", "a", "line", "-", "delimited", "KEY", "=", "value", "format", ")", "into", "a", "dictionary", "mapping", "keys", "to", "values", "." ]
train
https://github.com/sloria/read_env/blob/90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605/read_env.py#L51-L75
0.002954
PyPSA/PyPSA
pypsa/descriptors.py
get_switchable_as_dense
def get_switchable_as_dense(network, component, attr, snapshots=None, inds=None): """ Return a Dataframe for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these components rather than network.components.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_dense(network, 'Generator', 'p_max_pu') """ df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: index = index.intersection(inds) varying_i = varying_i.intersection(inds) fixed_i = fixed_i.intersection(inds) if snapshots is None: snapshots = network.snapshots return (pd.concat([ pd.DataFrame(np.repeat([df.loc[fixed_i, attr].values], len(snapshots), axis=0), index=snapshots, columns=fixed_i), pnl[attr].loc[snapshots, varying_i] ], axis=1, sort=False).reindex(columns=index))
python
def get_switchable_as_dense(network, component, attr, snapshots=None, inds=None): """ Return a Dataframe for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these components rather than network.components.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_dense(network, 'Generator', 'p_max_pu') """ df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: index = index.intersection(inds) varying_i = varying_i.intersection(inds) fixed_i = fixed_i.intersection(inds) if snapshots is None: snapshots = network.snapshots return (pd.concat([ pd.DataFrame(np.repeat([df.loc[fixed_i, attr].values], len(snapshots), axis=0), index=snapshots, columns=fixed_i), pnl[attr].loc[snapshots, varying_i] ], axis=1, sort=False).reindex(columns=index))
[ "def", "get_switchable_as_dense", "(", "network", ",", "component", ",", "attr", ",", "snapshots", "=", "None", ",", "inds", "=", "None", ")", ":", "df", "=", "network", ".", "df", "(", "component", ")", "pnl", "=", "network", ".", "pnl", "(", "component", ")", "index", "=", "df", ".", "index", "varying_i", "=", "pnl", "[", "attr", "]", ".", "columns", "fixed_i", "=", "df", ".", "index", ".", "difference", "(", "varying_i", ")", "if", "inds", "is", "not", "None", ":", "index", "=", "index", ".", "intersection", "(", "inds", ")", "varying_i", "=", "varying_i", ".", "intersection", "(", "inds", ")", "fixed_i", "=", "fixed_i", ".", "intersection", "(", "inds", ")", "if", "snapshots", "is", "None", ":", "snapshots", "=", "network", ".", "snapshots", "return", "(", "pd", ".", "concat", "(", "[", "pd", ".", "DataFrame", "(", "np", ".", "repeat", "(", "[", "df", ".", "loc", "[", "fixed_i", ",", "attr", "]", ".", "values", "]", ",", "len", "(", "snapshots", ")", ",", "axis", "=", "0", ")", ",", "index", "=", "snapshots", ",", "columns", "=", "fixed_i", ")", ",", "pnl", "[", "attr", "]", ".", "loc", "[", "snapshots", ",", "varying_i", "]", "]", ",", "axis", "=", "1", ",", "sort", "=", "False", ")", ".", "reindex", "(", "columns", "=", "index", ")", ")" ]
Return a Dataframe for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these components rather than network.components.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_dense(network, 'Generator', 'p_max_pu')
[ "Return", "a", "Dataframe", "for", "a", "time", "-", "varying", "component", "attribute", "with", "values", "for", "all", "non", "-", "time", "-", "varying", "components", "filled", "in", "with", "the", "default", "values", "for", "the", "attribute", "." ]
train
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/descriptors.py#L152-L198
0.002789
ejhigson/nestcheck
nestcheck/data_processing.py
process_error_helper
def process_error_helper(root, base_dir, process_func, errors_to_handle=(), **func_kwargs): """Wrapper which applies process_func and handles some common errors so one bad run does not spoil the whole batch. Useful errors to handle include: OSError: if you are not sure if all the files exist AssertionError: if some of the many assertions fail for known reasons; for example is there are occasional problems decomposing runs into threads due to limited numerical precision in logls. Parameters ---------- root: str File root. base_dir: str Directory containing file. process_func: func Function for processing file. errors_to_handle: error type or tuple of error types Errors to catch without throwing an exception. func_kwargs: dict Kwargs to pass to process_func. Returns ------- run: dict Nested sampling run dict (see the module docstring for more details) or, if an error occured, a dict containing its type and the file root. """ try: return process_func(root, base_dir, **func_kwargs) except errors_to_handle as err: run = {'error': type(err).__name__, 'output': {'file_root': root}} return run
python
def process_error_helper(root, base_dir, process_func, errors_to_handle=(), **func_kwargs): """Wrapper which applies process_func and handles some common errors so one bad run does not spoil the whole batch. Useful errors to handle include: OSError: if you are not sure if all the files exist AssertionError: if some of the many assertions fail for known reasons; for example is there are occasional problems decomposing runs into threads due to limited numerical precision in logls. Parameters ---------- root: str File root. base_dir: str Directory containing file. process_func: func Function for processing file. errors_to_handle: error type or tuple of error types Errors to catch without throwing an exception. func_kwargs: dict Kwargs to pass to process_func. Returns ------- run: dict Nested sampling run dict (see the module docstring for more details) or, if an error occured, a dict containing its type and the file root. """ try: return process_func(root, base_dir, **func_kwargs) except errors_to_handle as err: run = {'error': type(err).__name__, 'output': {'file_root': root}} return run
[ "def", "process_error_helper", "(", "root", ",", "base_dir", ",", "process_func", ",", "errors_to_handle", "=", "(", ")", ",", "*", "*", "func_kwargs", ")", ":", "try", ":", "return", "process_func", "(", "root", ",", "base_dir", ",", "*", "*", "func_kwargs", ")", "except", "errors_to_handle", "as", "err", ":", "run", "=", "{", "'error'", ":", "type", "(", "err", ")", ".", "__name__", ",", "'output'", ":", "{", "'file_root'", ":", "root", "}", "}", "return", "run" ]
Wrapper which applies process_func and handles some common errors so one bad run does not spoil the whole batch. Useful errors to handle include: OSError: if you are not sure if all the files exist AssertionError: if some of the many assertions fail for known reasons; for example is there are occasional problems decomposing runs into threads due to limited numerical precision in logls. Parameters ---------- root: str File root. base_dir: str Directory containing file. process_func: func Function for processing file. errors_to_handle: error type or tuple of error types Errors to catch without throwing an exception. func_kwargs: dict Kwargs to pass to process_func. Returns ------- run: dict Nested sampling run dict (see the module docstring for more details) or, if an error occured, a dict containing its type and the file root.
[ "Wrapper", "which", "applies", "process_func", "and", "handles", "some", "common", "errors", "so", "one", "bad", "run", "does", "not", "spoil", "the", "whole", "batch", "." ]
train
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L172-L209
0.000761
HPENetworking/PYHPEIMC
archived/pyhpimc.py
get_vm_host_info
def get_vm_host_info(hostId): """ function takes hostId as input to RESTFUL call to HP IMC :param hostId: int or string of HostId of Hypervisor host :return:list of dictionatires contraining the VM Host information for the target hypervisor """ global r if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() get_vm_host_info_url = "/imcrs/vrm/host?hostId=" + str(hostId) f_url = url + get_vm_host_info_url payload = None r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents # print(r.status_code) if r.status_code == 200: if len(r.text) > 0: return json.loads(r.text) elif r.status_code == 204: print("Device is not a supported Hypervisor") return "Device is not a supported Hypervisor" else: print("An Error has occured")
python
def get_vm_host_info(hostId): """ function takes hostId as input to RESTFUL call to HP IMC :param hostId: int or string of HostId of Hypervisor host :return:list of dictionatires contraining the VM Host information for the target hypervisor """ global r if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() get_vm_host_info_url = "/imcrs/vrm/host?hostId=" + str(hostId) f_url = url + get_vm_host_info_url payload = None r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents # print(r.status_code) if r.status_code == 200: if len(r.text) > 0: return json.loads(r.text) elif r.status_code == 204: print("Device is not a supported Hypervisor") return "Device is not a supported Hypervisor" else: print("An Error has occured")
[ "def", "get_vm_host_info", "(", "hostId", ")", ":", "global", "r", "if", "auth", "is", "None", "or", "url", "is", "None", ":", "# checks to see if the imc credentials are already available", "set_imc_creds", "(", ")", "get_vm_host_info_url", "=", "\"/imcrs/vrm/host?hostId=\"", "+", "str", "(", "hostId", ")", "f_url", "=", "url", "+", "get_vm_host_info_url", "payload", "=", "None", "r", "=", "requests", ".", "get", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "headers", ")", "# creates the URL using the payload variable as the contents", "# print(r.status_code)", "if", "r", ".", "status_code", "==", "200", ":", "if", "len", "(", "r", ".", "text", ")", ">", "0", ":", "return", "json", ".", "loads", "(", "r", ".", "text", ")", "elif", "r", ".", "status_code", "==", "204", ":", "print", "(", "\"Device is not a supported Hypervisor\"", ")", "return", "\"Device is not a supported Hypervisor\"", "else", ":", "print", "(", "\"An Error has occured\"", ")" ]
function takes hostId as input to RESTFUL call to HP IMC :param hostId: int or string of HostId of Hypervisor host :return:list of dictionatires contraining the VM Host information for the target hypervisor
[ "function", "takes", "hostId", "as", "input", "to", "RESTFUL", "call", "to", "HP", "IMC", ":", "param", "hostId", ":", "int", "or", "string", "of", "HostId", "of", "Hypervisor", "host", ":", "return", ":", "list", "of", "dictionatires", "contraining", "the", "VM", "Host", "information", "for", "the", "target", "hypervisor" ]
train
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/archived/pyhpimc.py#L642-L664
0.004115
rhayes777/PyAutoFit
autofit/tools/pipeline.py
ResultsCollection.from_phase
def from_phase(self, phase_name): """ Returns the result of a previous phase by its name Parameters ---------- phase_name: str The name of a previous phase Returns ------- result: Result The result of that phase Raises ------ exc.PipelineException If no phase with the expected result is found """ try: return self.__result_dict[phase_name] except KeyError: raise exc.PipelineException("No previous phase named {} found in results ({})".format(phase_name, ", ".join( self.__result_dict.keys())))
python
def from_phase(self, phase_name): """ Returns the result of a previous phase by its name Parameters ---------- phase_name: str The name of a previous phase Returns ------- result: Result The result of that phase Raises ------ exc.PipelineException If no phase with the expected result is found """ try: return self.__result_dict[phase_name] except KeyError: raise exc.PipelineException("No previous phase named {} found in results ({})".format(phase_name, ", ".join( self.__result_dict.keys())))
[ "def", "from_phase", "(", "self", ",", "phase_name", ")", ":", "try", ":", "return", "self", ".", "__result_dict", "[", "phase_name", "]", "except", "KeyError", ":", "raise", "exc", ".", "PipelineException", "(", "\"No previous phase named {} found in results ({})\"", ".", "format", "(", "phase_name", ",", "\", \"", ".", "join", "(", "self", ".", "__result_dict", ".", "keys", "(", ")", ")", ")", ")" ]
Returns the result of a previous phase by its name Parameters ---------- phase_name: str The name of a previous phase Returns ------- result: Result The result of that phase Raises ------ exc.PipelineException If no phase with the expected result is found
[ "Returns", "the", "result", "of", "a", "previous", "phase", "by", "its", "name" ]
train
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L74-L97
0.004373
resonai/ybt
yabt/caching.py
save_target_in_cache
def save_target_in_cache(target: Target, build_context): """Save `target` to build cache for future reuse. The target hash is used to determine its cache location, where the target metadata and artifacts metadata are seriazlied to JSON. In addition, relevant artifacts produced by the target are copied under the artifacts cache dir by their content hash. TODO: pruning policy to limit cache size. """ cache_dir = build_context.conf.get_cache_dir(target, build_context) if isdir(cache_dir): rmtree(cache_dir) makedirs(cache_dir) logger.debug('Saving target metadata in cache under {}', cache_dir) # write target metadata with open(join(cache_dir, 'target.json'), 'w') as meta_file: meta_file.write(target.json(build_context)) # copy artifacts to artifact cache by hash artifacts = target.artifacts.get_all() artifact_hashes = {} for artifact_type, artifact_map in artifacts.items(): if artifact_type in (AT.docker_image,): continue for dst_path, src_path in artifact_map.items(): artifact_hashes[dst_path] = hash_tree(src_path) # not caching "app" artifacts, since they're part # of the source tree if artifact_type not in _NO_CACHE_TYPES: copy_artifact(src_path, artifact_hashes[dst_path], build_context.conf) # serialize target artifacts metadata + hashes artifacts_desc = { artifact_type.name: [{'dst': dst_path, 'src': src_path, 'hash': artifact_hashes.get(dst_path)} for dst_path, src_path in artifact_map.items()] for artifact_type, artifact_map in artifacts.items() } with open(join(cache_dir, 'artifacts.json'), 'w') as artifacts_meta_file: artifacts_meta_file.write(json.dumps(artifacts_desc, indent=4, sort_keys=True)) # copying the summary dict so I can modify it without mutating the target summary = dict(target.summary) summary['name'] = target.name summary['artifacts_hash'] = hash_tree(join(cache_dir, 'artifacts.json')) if summary.get('created') is None: summary['created'] = time() write_summary(summary, cache_dir)
python
def save_target_in_cache(target: Target, build_context): """Save `target` to build cache for future reuse. The target hash is used to determine its cache location, where the target metadata and artifacts metadata are seriazlied to JSON. In addition, relevant artifacts produced by the target are copied under the artifacts cache dir by their content hash. TODO: pruning policy to limit cache size. """ cache_dir = build_context.conf.get_cache_dir(target, build_context) if isdir(cache_dir): rmtree(cache_dir) makedirs(cache_dir) logger.debug('Saving target metadata in cache under {}', cache_dir) # write target metadata with open(join(cache_dir, 'target.json'), 'w') as meta_file: meta_file.write(target.json(build_context)) # copy artifacts to artifact cache by hash artifacts = target.artifacts.get_all() artifact_hashes = {} for artifact_type, artifact_map in artifacts.items(): if artifact_type in (AT.docker_image,): continue for dst_path, src_path in artifact_map.items(): artifact_hashes[dst_path] = hash_tree(src_path) # not caching "app" artifacts, since they're part # of the source tree if artifact_type not in _NO_CACHE_TYPES: copy_artifact(src_path, artifact_hashes[dst_path], build_context.conf) # serialize target artifacts metadata + hashes artifacts_desc = { artifact_type.name: [{'dst': dst_path, 'src': src_path, 'hash': artifact_hashes.get(dst_path)} for dst_path, src_path in artifact_map.items()] for artifact_type, artifact_map in artifacts.items() } with open(join(cache_dir, 'artifacts.json'), 'w') as artifacts_meta_file: artifacts_meta_file.write(json.dumps(artifacts_desc, indent=4, sort_keys=True)) # copying the summary dict so I can modify it without mutating the target summary = dict(target.summary) summary['name'] = target.name summary['artifacts_hash'] = hash_tree(join(cache_dir, 'artifacts.json')) if summary.get('created') is None: summary['created'] = time() write_summary(summary, cache_dir)
[ "def", "save_target_in_cache", "(", "target", ":", "Target", ",", "build_context", ")", ":", "cache_dir", "=", "build_context", ".", "conf", ".", "get_cache_dir", "(", "target", ",", "build_context", ")", "if", "isdir", "(", "cache_dir", ")", ":", "rmtree", "(", "cache_dir", ")", "makedirs", "(", "cache_dir", ")", "logger", ".", "debug", "(", "'Saving target metadata in cache under {}'", ",", "cache_dir", ")", "# write target metadata", "with", "open", "(", "join", "(", "cache_dir", ",", "'target.json'", ")", ",", "'w'", ")", "as", "meta_file", ":", "meta_file", ".", "write", "(", "target", ".", "json", "(", "build_context", ")", ")", "# copy artifacts to artifact cache by hash", "artifacts", "=", "target", ".", "artifacts", ".", "get_all", "(", ")", "artifact_hashes", "=", "{", "}", "for", "artifact_type", ",", "artifact_map", "in", "artifacts", ".", "items", "(", ")", ":", "if", "artifact_type", "in", "(", "AT", ".", "docker_image", ",", ")", ":", "continue", "for", "dst_path", ",", "src_path", "in", "artifact_map", ".", "items", "(", ")", ":", "artifact_hashes", "[", "dst_path", "]", "=", "hash_tree", "(", "src_path", ")", "# not caching \"app\" artifacts, since they're part", "# of the source tree", "if", "artifact_type", "not", "in", "_NO_CACHE_TYPES", ":", "copy_artifact", "(", "src_path", ",", "artifact_hashes", "[", "dst_path", "]", ",", "build_context", ".", "conf", ")", "# serialize target artifacts metadata + hashes", "artifacts_desc", "=", "{", "artifact_type", ".", "name", ":", "[", "{", "'dst'", ":", "dst_path", ",", "'src'", ":", "src_path", ",", "'hash'", ":", "artifact_hashes", ".", "get", "(", "dst_path", ")", "}", "for", "dst_path", ",", "src_path", "in", "artifact_map", ".", "items", "(", ")", "]", "for", "artifact_type", ",", "artifact_map", "in", "artifacts", ".", "items", "(", ")", "}", "with", "open", "(", "join", "(", "cache_dir", ",", "'artifacts.json'", ")", ",", "'w'", ")", "as", "artifacts_meta_file", ":", "artifacts_meta_file", ".", "write", "(", "json", ".", "dumps", "(", "artifacts_desc", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", "# copying the summary dict so I can modify it without mutating the target", "summary", "=", "dict", "(", "target", ".", "summary", ")", "summary", "[", "'name'", "]", "=", "target", ".", "name", "summary", "[", "'artifacts_hash'", "]", "=", "hash_tree", "(", "join", "(", "cache_dir", ",", "'artifacts.json'", ")", ")", "if", "summary", ".", "get", "(", "'created'", ")", "is", "None", ":", "summary", "[", "'created'", "]", "=", "time", "(", ")", "write_summary", "(", "summary", ",", "cache_dir", ")" ]
Save `target` to build cache for future reuse. The target hash is used to determine its cache location, where the target metadata and artifacts metadata are seriazlied to JSON. In addition, relevant artifacts produced by the target are copied under the artifacts cache dir by their content hash. TODO: pruning policy to limit cache size.
[ "Save", "target", "to", "build", "cache", "for", "future", "reuse", "." ]
train
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/caching.py#L239-L287
0.000438
christophertbrown/bioscripts
ctbBio/rRNA_copies.py
rna_bases
def rna_bases(rna_cov, scaffold, bases, line): """ determine if read overlaps with rna, if so count bases """ start = int(line[3]) stop = start + bases - 1 if scaffold not in rna_cov: return rna_cov for pos in rna_cov[scaffold][2]: ol = get_overlap([start, stop], pos) rna_cov[scaffold][0] += ol return rna_cov
python
def rna_bases(rna_cov, scaffold, bases, line): """ determine if read overlaps with rna, if so count bases """ start = int(line[3]) stop = start + bases - 1 if scaffold not in rna_cov: return rna_cov for pos in rna_cov[scaffold][2]: ol = get_overlap([start, stop], pos) rna_cov[scaffold][0] += ol return rna_cov
[ "def", "rna_bases", "(", "rna_cov", ",", "scaffold", ",", "bases", ",", "line", ")", ":", "start", "=", "int", "(", "line", "[", "3", "]", ")", "stop", "=", "start", "+", "bases", "-", "1", "if", "scaffold", "not", "in", "rna_cov", ":", "return", "rna_cov", "for", "pos", "in", "rna_cov", "[", "scaffold", "]", "[", "2", "]", ":", "ol", "=", "get_overlap", "(", "[", "start", ",", "stop", "]", ",", "pos", ")", "rna_cov", "[", "scaffold", "]", "[", "0", "]", "+=", "ol", "return", "rna_cov" ]
determine if read overlaps with rna, if so count bases
[ "determine", "if", "read", "overlaps", "with", "rna", "if", "so", "count", "bases" ]
train
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L18-L29
0.002732
goldmann/docker-squash
docker_squash/image.py
Image._parse_image_name
def _parse_image_name(self, image): """ Parses the provided image name and splits it in the name and tag part, if possible. If no tag is provided 'latest' is used. """ if ':' in image and '/' not in image.split(':')[-1]: image_tag = image.split(':')[-1] image_name = image[:-(len(image_tag) + 1)] else: image_tag = "latest" image_name = image return (image_name, image_tag)
python
def _parse_image_name(self, image): """ Parses the provided image name and splits it in the name and tag part, if possible. If no tag is provided 'latest' is used. """ if ':' in image and '/' not in image.split(':')[-1]: image_tag = image.split(':')[-1] image_name = image[:-(len(image_tag) + 1)] else: image_tag = "latest" image_name = image return (image_name, image_tag)
[ "def", "_parse_image_name", "(", "self", ",", "image", ")", ":", "if", "':'", "in", "image", "and", "'/'", "not", "in", "image", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ":", "image_tag", "=", "image", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "image_name", "=", "image", "[", ":", "-", "(", "len", "(", "image_tag", ")", "+", "1", ")", "]", "else", ":", "image_tag", "=", "\"latest\"", "image_name", "=", "image", "return", "(", "image_name", ",", "image_tag", ")" ]
Parses the provided image name and splits it in the name and tag part, if possible. If no tag is provided 'latest' is used.
[ "Parses", "the", "provided", "image", "name", "and", "splits", "it", "in", "the", "name", "and", "tag", "part", "if", "possible", ".", "If", "no", "tag", "is", "provided", "latest", "is", "used", "." ]
train
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L405-L418
0.004115
wonambi-python/wonambi
wonambi/widgets/notes.py
Notes.get_cycle_mrkr
def get_cycle_mrkr(self, end=False): """Mark cycle start or end. Parameters ---------- end : bool If True, marks a cycle end; otherwise, it's a cycle start """ if self.annot is None: # remove if buttons are disabled self.parent.statusBar().showMessage('No score file loaded') return window_start = self.parent.value('window_start') window_length = self.parent.value('window_length') try: self.annot.set_cycle_mrkr(window_start, end=end) except KeyError: msg = ('The start of the window does not correspond to any epoch ' 'in sleep scoring file') self.parent.statusBar().showMessage(msg) lg.debug(msg) else: bound = 'start' if end: bound = 'end' lg.info('User marked ' + str(window_start) + ' as cycle ' + bound) self.parent.overview.mark_cycles(window_start, window_length, end=end)
python
def get_cycle_mrkr(self, end=False): """Mark cycle start or end. Parameters ---------- end : bool If True, marks a cycle end; otherwise, it's a cycle start """ if self.annot is None: # remove if buttons are disabled self.parent.statusBar().showMessage('No score file loaded') return window_start = self.parent.value('window_start') window_length = self.parent.value('window_length') try: self.annot.set_cycle_mrkr(window_start, end=end) except KeyError: msg = ('The start of the window does not correspond to any epoch ' 'in sleep scoring file') self.parent.statusBar().showMessage(msg) lg.debug(msg) else: bound = 'start' if end: bound = 'end' lg.info('User marked ' + str(window_start) + ' as cycle ' + bound) self.parent.overview.mark_cycles(window_start, window_length, end=end)
[ "def", "get_cycle_mrkr", "(", "self", ",", "end", "=", "False", ")", ":", "if", "self", ".", "annot", "is", "None", ":", "# remove if buttons are disabled", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "'No score file loaded'", ")", "return", "window_start", "=", "self", ".", "parent", ".", "value", "(", "'window_start'", ")", "window_length", "=", "self", ".", "parent", ".", "value", "(", "'window_length'", ")", "try", ":", "self", ".", "annot", ".", "set_cycle_mrkr", "(", "window_start", ",", "end", "=", "end", ")", "except", "KeyError", ":", "msg", "=", "(", "'The start of the window does not correspond to any epoch '", "'in sleep scoring file'", ")", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "msg", ")", "lg", ".", "debug", "(", "msg", ")", "else", ":", "bound", "=", "'start'", "if", "end", ":", "bound", "=", "'end'", "lg", ".", "info", "(", "'User marked '", "+", "str", "(", "window_start", ")", "+", "' as cycle '", "+", "bound", ")", "self", ".", "parent", ".", "overview", ".", "mark_cycles", "(", "window_start", ",", "window_length", ",", "end", "=", "end", ")" ]
Mark cycle start or end. Parameters ---------- end : bool If True, marks a cycle end; otherwise, it's a cycle start
[ "Mark", "cycle", "start", "or", "end", "." ]
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L995-L1027
0.001803
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/enrollment/apis/public_api_api.py
PublicAPIApi.get_device_enrollments
def get_device_enrollments(self, **kwargs): # noqa: E501 """Get enrollment list. # noqa: E501 Provides a list of pending and claimed enrollments. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments ``` With query parameters: ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ 'https://api.us-east-1.mbedcloud.com/v3/device-enrollments?limit=10' ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_device_enrollments(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: Number of results to be returned. Between 2 and 1000, inclusive. :param str after: Entity ID to fetch after. :param str order: ASC or DESC :param str include: Comma-separated additional data to return. Currently supported: total_count. :return: EnrollmentIdentities If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_device_enrollments_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_device_enrollments_with_http_info(**kwargs) # noqa: E501 return data
python
def get_device_enrollments(self, **kwargs): # noqa: E501 """Get enrollment list. # noqa: E501 Provides a list of pending and claimed enrollments. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments ``` With query parameters: ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ 'https://api.us-east-1.mbedcloud.com/v3/device-enrollments?limit=10' ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_device_enrollments(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: Number of results to be returned. Between 2 and 1000, inclusive. :param str after: Entity ID to fetch after. :param str order: ASC or DESC :param str include: Comma-separated additional data to return. Currently supported: total_count. :return: EnrollmentIdentities If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_device_enrollments_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_device_enrollments_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "get_device_enrollments", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_device_enrollments_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_device_enrollments_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Get enrollment list. # noqa: E501 Provides a list of pending and claimed enrollments. **Example usage:** ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments ``` With query parameters: ``` curl -X GET \\ -H 'Authorization: Bearer <valid access token>' \\ 'https://api.us-east-1.mbedcloud.com/v3/device-enrollments?limit=10' ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_device_enrollments(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: Number of results to be returned. Between 2 and 1000, inclusive. :param str after: Entity ID to fetch after. :param str order: ASC or DESC :param str include: Comma-separated additional data to return. Currently supported: total_count. :return: EnrollmentIdentities If the method is called asynchronously, returns the request thread.
[ "Get", "enrollment", "list", ".", "#", "noqa", ":", "E501" ]
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/enrollment/apis/public_api_api.py#L729-L752
0.001325
slaveofcode/pycrawler
pycrawler/page.py
extract_js_links
def extract_js_links(bs4): """Extracting js links from BeautifulSoup object :param bs4: `BeautifulSoup` :return: `list` List of links """ links = extract_links(bs4) real_js = [anchor for anchor in links if anchor.endswith(('.js', '.JS'))] js_tags = [anchor['src'] for anchor in bs4.select('script[type="text/javascript"]') if anchor.has_attr('src')] return list(set(real_js+js_tags))
python
def extract_js_links(bs4): """Extracting js links from BeautifulSoup object :param bs4: `BeautifulSoup` :return: `list` List of links """ links = extract_links(bs4) real_js = [anchor for anchor in links if anchor.endswith(('.js', '.JS'))] js_tags = [anchor['src'] for anchor in bs4.select('script[type="text/javascript"]') if anchor.has_attr('src')] return list(set(real_js+js_tags))
[ "def", "extract_js_links", "(", "bs4", ")", ":", "links", "=", "extract_links", "(", "bs4", ")", "real_js", "=", "[", "anchor", "for", "anchor", "in", "links", "if", "anchor", ".", "endswith", "(", "(", "'.js'", ",", "'.JS'", ")", ")", "]", "js_tags", "=", "[", "anchor", "[", "'src'", "]", "for", "anchor", "in", "bs4", ".", "select", "(", "'script[type=\"text/javascript\"]'", ")", "if", "anchor", ".", "has_attr", "(", "'src'", ")", "]", "return", "list", "(", "set", "(", "real_js", "+", "js_tags", ")", ")" ]
Extracting js links from BeautifulSoup object :param bs4: `BeautifulSoup` :return: `list` List of links
[ "Extracting", "js", "links", "from", "BeautifulSoup", "object" ]
train
https://github.com/slaveofcode/pycrawler/blob/6d19b5b378f42f9586e2d3a0d0c013cb03c82f6d/pycrawler/page.py#L182-L196
0.004598