text
stringlengths
89
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
630
def round(self, ndigits=0): """ Rounds the amount using the current ``Decimal`` rounding algorithm. """ if ndigits is None: ndigits = 0 return self.__class__( amount=self.amount.quantize(Decimal('1e' + str(-ndigits))), currency=self.currency)
[ "def", "round", "(", "self", ",", "ndigits", "=", "0", ")", ":", "if", "ndigits", "is", "None", ":", "ndigits", "=", "0", "return", "self", ".", "__class__", "(", "amount", "=", "self", ".", "amount", ".", "quantize", "(", "Decimal", "(", "'1e'", "+", "str", "(", "-", "ndigits", ")", ")", ")", ",", "currency", "=", "self", ".", "currency", ")" ]
34.444444
13.777778
def _make_input(self, action, old_quat): """ Helper function that returns a dictionary with keys dpos, rotation from a raw input array. The first three elements are taken to be displacement in position, and a quaternion indicating the change in rotation with respect to @old_quat. """ return { "dpos": action[:3], # IK controller takes an absolute orientation in robot base frame "rotation": T.quat2mat(T.quat_multiply(old_quat, action[3:7])), }
[ "def", "_make_input", "(", "self", ",", "action", ",", "old_quat", ")", ":", "return", "{", "\"dpos\"", ":", "action", "[", ":", "3", "]", ",", "# IK controller takes an absolute orientation in robot base frame", "\"rotation\"", ":", "T", ".", "quat2mat", "(", "T", ".", "quat_multiply", "(", "old_quat", ",", "action", "[", "3", ":", "7", "]", ")", ")", ",", "}" ]
47.909091
24.818182
def bump(args: argparse.Namespace) -> None: """ :args: An argparse.Namespace object. This function is bound to the 'bump' sub-command. It increments the version integer of the user's choice ('major', 'minor', or 'patch'). """ try: last_tag = last_git_release_tag(git_tags()) except NoGitTagsException: print(SemVer(0, 1, 0)) exit(0) last_ver = git_tag_to_semver(last_tag) if args.type == 'patch': print(last_ver.bump_patch()) elif args.type == 'minor': print(last_ver.bump_minor()) elif args.type == 'major': print(last_ver.bump_major())
[ "def", "bump", "(", "args", ":", "argparse", ".", "Namespace", ")", "->", "None", ":", "try", ":", "last_tag", "=", "last_git_release_tag", "(", "git_tags", "(", ")", ")", "except", "NoGitTagsException", ":", "print", "(", "SemVer", "(", "0", ",", "1", ",", "0", ")", ")", "exit", "(", "0", ")", "last_ver", "=", "git_tag_to_semver", "(", "last_tag", ")", "if", "args", ".", "type", "==", "'patch'", ":", "print", "(", "last_ver", ".", "bump_patch", "(", ")", ")", "elif", "args", ".", "type", "==", "'minor'", ":", "print", "(", "last_ver", ".", "bump_minor", "(", ")", ")", "elif", "args", ".", "type", "==", "'major'", ":", "print", "(", "last_ver", ".", "bump_major", "(", ")", ")" ]
29.142857
15.238095
def stitch_block_rows(block_list): ''' Stitches blocks together into a single block rowwise. These blocks are 2D tables usually generated from tableproc. The final block will be of dimensions (sum(num_rows), max(num_cols)). ''' stitched = list(itertools.chain(*block_list)) max_length = max(len(row) for row in stitched) for row in stitched: if len(row) < max_length: row += [None] * (max_length - len(row)) return stitched
[ "def", "stitch_block_rows", "(", "block_list", ")", ":", "stitched", "=", "list", "(", "itertools", ".", "chain", "(", "*", "block_list", ")", ")", "max_length", "=", "max", "(", "len", "(", "row", ")", "for", "row", "in", "stitched", ")", "for", "row", "in", "stitched", ":", "if", "len", "(", "row", ")", "<", "max_length", ":", "row", "+=", "[", "None", "]", "*", "(", "max_length", "-", "len", "(", "row", ")", ")", "return", "stitched" ]
43.181818
23.363636
def load_stock_links(self): """ Read stock links into the model """ links = self.__get_session().query(dal.AssetClassStock).all() for entity in links: # log(DEBUG, f"adding {entity.symbol} to {entity.assetclassid}") # mapping stock: Stock = Stock(entity.symbol) # find parent classes by id and assign children parent: AssetClass = self.model.get_class_by_id(entity.assetclassid) if parent: # Assign to parent. parent.stocks.append(stock) # Add to index for easy reference self.model.stocks.append(stock)
[ "def", "load_stock_links", "(", "self", ")", ":", "links", "=", "self", ".", "__get_session", "(", ")", ".", "query", "(", "dal", ".", "AssetClassStock", ")", ".", "all", "(", ")", "for", "entity", "in", "links", ":", "# log(DEBUG, f\"adding {entity.symbol} to {entity.assetclassid}\")", "# mapping", "stock", ":", "Stock", "=", "Stock", "(", "entity", ".", "symbol", ")", "# find parent classes by id and assign children", "parent", ":", "AssetClass", "=", "self", ".", "model", ".", "get_class_by_id", "(", "entity", ".", "assetclassid", ")", "if", "parent", ":", "# Assign to parent.", "parent", ".", "stocks", ".", "append", "(", "stock", ")", "# Add to index for easy reference", "self", ".", "model", ".", "stocks", ".", "append", "(", "stock", ")" ]
46.428571
15.5
def freeze(wait, force_kill): '''Freeze manager.''' if wait and force_kill: print('You cannot use both --wait and --force-kill options ' 'at the same time.', file=sys.stderr) return with Session() as session: if wait: while True: resp = session.Manager.status() active_sessions_num = resp['active_sessions'] if active_sessions_num == 0: break print_wait('Waiting for all sessions terminated... ({0} left)' .format(active_sessions_num)) time.sleep(3) print_done('All sessions are terminated.') if force_kill: print_wait('Killing all sessions...') session.Manager.freeze(force_kill=force_kill) if force_kill: print_done('All sessions are killed.') print('Manager is successfully frozen.')
[ "def", "freeze", "(", "wait", ",", "force_kill", ")", ":", "if", "wait", "and", "force_kill", ":", "print", "(", "'You cannot use both --wait and --force-kill options '", "'at the same time.'", ",", "file", "=", "sys", ".", "stderr", ")", "return", "with", "Session", "(", ")", "as", "session", ":", "if", "wait", ":", "while", "True", ":", "resp", "=", "session", ".", "Manager", ".", "status", "(", ")", "active_sessions_num", "=", "resp", "[", "'active_sessions'", "]", "if", "active_sessions_num", "==", "0", ":", "break", "print_wait", "(", "'Waiting for all sessions terminated... ({0} left)'", ".", "format", "(", "active_sessions_num", ")", ")", "time", ".", "sleep", "(", "3", ")", "print_done", "(", "'All sessions are terminated.'", ")", "if", "force_kill", ":", "print_wait", "(", "'Killing all sessions...'", ")", "session", ".", "Manager", ".", "freeze", "(", "force_kill", "=", "force_kill", ")", "if", "force_kill", ":", "print_done", "(", "'All sessions are killed.'", ")", "print", "(", "'Manager is successfully frozen.'", ")" ]
32.892857
19.892857
def read_tx_body(ptr, tx): """ Returns {'ins': [...], 'outs': [...]} """ _obj = {"ins": [], "outs": [], 'locktime': None} # number of inputs ins = read_var_int(ptr, tx) # all inputs for i in range(ins): _obj["ins"].append({ "outpoint": { "hash": read_bytes(ptr, tx, 32)[::-1], "index": read_as_int(ptr, tx, 4) }, "script": read_var_string(ptr, tx), "sequence": read_as_int(ptr, tx, 4) }) # number of outputs outs = read_var_int(ptr, tx) # all outputs for i in range(outs): _obj["outs"].append({ "value": read_as_int(ptr, tx, 8), "script": read_var_string(ptr, tx) }) return _obj
[ "def", "read_tx_body", "(", "ptr", ",", "tx", ")", ":", "_obj", "=", "{", "\"ins\"", ":", "[", "]", ",", "\"outs\"", ":", "[", "]", ",", "'locktime'", ":", "None", "}", "# number of inputs", "ins", "=", "read_var_int", "(", "ptr", ",", "tx", ")", "# all inputs", "for", "i", "in", "range", "(", "ins", ")", ":", "_obj", "[", "\"ins\"", "]", ".", "append", "(", "{", "\"outpoint\"", ":", "{", "\"hash\"", ":", "read_bytes", "(", "ptr", ",", "tx", ",", "32", ")", "[", ":", ":", "-", "1", "]", ",", "\"index\"", ":", "read_as_int", "(", "ptr", ",", "tx", ",", "4", ")", "}", ",", "\"script\"", ":", "read_var_string", "(", "ptr", ",", "tx", ")", ",", "\"sequence\"", ":", "read_as_int", "(", "ptr", ",", "tx", ",", "4", ")", "}", ")", "# number of outputs", "outs", "=", "read_var_int", "(", "ptr", ",", "tx", ")", "# all outputs", "for", "i", "in", "range", "(", "outs", ")", ":", "_obj", "[", "\"outs\"", "]", ".", "append", "(", "{", "\"value\"", ":", "read_as_int", "(", "ptr", ",", "tx", ",", "8", ")", ",", "\"script\"", ":", "read_var_string", "(", "ptr", ",", "tx", ")", "}", ")", "return", "_obj" ]
23.903226
17.83871
def _try_b32_decode(v): """ Attempt to decode a b32-encoded username which is sometimes generated by internal Globus components. The expectation is that the string is a valid ID, username, or b32-encoded name. Therefore, we can do some simple checking on it. If it does not appear to be formatted correctly, return None. """ # should start with "u_" if not v.startswith("u_"): return None # usernames have @ , we want to allow `u_foo@example.com` # b32 names never have @ if "@" in v: return None # trim "u_" v = v[2:] # wrong length if len(v) != 26: return None # append padding and uppercase so that b32decode will work v = v.upper() + (6 * "=") # try to decode try: return str(uuid.UUID(bytes=base64.b32decode(v))) # if it fails, I guess it's a username? Not much left to do except ValueError: return None
[ "def", "_try_b32_decode", "(", "v", ")", ":", "# should start with \"u_\"", "if", "not", "v", ".", "startswith", "(", "\"u_\"", ")", ":", "return", "None", "# usernames have @ , we want to allow `u_foo@example.com`", "# b32 names never have @", "if", "\"@\"", "in", "v", ":", "return", "None", "# trim \"u_\"", "v", "=", "v", "[", "2", ":", "]", "# wrong length", "if", "len", "(", "v", ")", "!=", "26", ":", "return", "None", "# append padding and uppercase so that b32decode will work", "v", "=", "v", ".", "upper", "(", ")", "+", "(", "6", "*", "\"=\"", ")", "# try to decode", "try", ":", "return", "str", "(", "uuid", ".", "UUID", "(", "bytes", "=", "base64", ".", "b32decode", "(", "v", ")", ")", ")", "# if it fails, I guess it's a username? Not much left to do", "except", "ValueError", ":", "return", "None" ]
28.40625
21.96875
def get_config_tuple_from_egrc(egrc_path): """ Create a Config named tuple from the values specified in the .egrc. Expands any paths as necessary. egrc_path must exist and point a file. If not present in the .egrc, properties of the Config are returned as None. """ with open(egrc_path, 'r') as egrc: try: config = ConfigParser.RawConfigParser() except AttributeError: config = ConfigParser() config.readfp(egrc) # default to None examples_dir = None custom_dir = None use_color = None pager_cmd = None squeeze = None subs = None editor_cmd = None if config.has_option(DEFAULT_SECTION, EG_EXAMPLES_DIR): examples_dir = config.get(DEFAULT_SECTION, EG_EXAMPLES_DIR) examples_dir = get_expanded_path(examples_dir) if config.has_option(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR): custom_dir = config.get(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR) custom_dir = get_expanded_path(custom_dir) if config.has_option(DEFAULT_SECTION, USE_COLOR): use_color_raw = config.get(DEFAULT_SECTION, USE_COLOR) use_color = _parse_bool_from_raw_egrc_value(use_color_raw) if config.has_option(DEFAULT_SECTION, PAGER_CMD): pager_cmd_raw = config.get(DEFAULT_SECTION, PAGER_CMD) pager_cmd = ast.literal_eval(pager_cmd_raw) if config.has_option(DEFAULT_SECTION, EDITOR_CMD): editor_cmd_raw = config.get(DEFAULT_SECTION, EDITOR_CMD) editor_cmd = ast.literal_eval(editor_cmd_raw) color_config = get_custom_color_config_from_egrc(config) if config.has_option(DEFAULT_SECTION, SQUEEZE): squeeze_raw = config.get(DEFAULT_SECTION, SQUEEZE) squeeze = _parse_bool_from_raw_egrc_value(squeeze_raw) if config.has_section(SUBSTITUTION_SECTION): subs = get_substitutions_from_config(config) return Config( examples_dir=examples_dir, custom_dir=custom_dir, color_config=color_config, use_color=use_color, pager_cmd=pager_cmd, editor_cmd=editor_cmd, squeeze=squeeze, subs=subs, )
[ "def", "get_config_tuple_from_egrc", "(", "egrc_path", ")", ":", "with", "open", "(", "egrc_path", ",", "'r'", ")", "as", "egrc", ":", "try", ":", "config", "=", "ConfigParser", ".", "RawConfigParser", "(", ")", "except", "AttributeError", ":", "config", "=", "ConfigParser", "(", ")", "config", ".", "readfp", "(", "egrc", ")", "# default to None", "examples_dir", "=", "None", "custom_dir", "=", "None", "use_color", "=", "None", "pager_cmd", "=", "None", "squeeze", "=", "None", "subs", "=", "None", "editor_cmd", "=", "None", "if", "config", ".", "has_option", "(", "DEFAULT_SECTION", ",", "EG_EXAMPLES_DIR", ")", ":", "examples_dir", "=", "config", ".", "get", "(", "DEFAULT_SECTION", ",", "EG_EXAMPLES_DIR", ")", "examples_dir", "=", "get_expanded_path", "(", "examples_dir", ")", "if", "config", ".", "has_option", "(", "DEFAULT_SECTION", ",", "CUSTOM_EXAMPLES_DIR", ")", ":", "custom_dir", "=", "config", ".", "get", "(", "DEFAULT_SECTION", ",", "CUSTOM_EXAMPLES_DIR", ")", "custom_dir", "=", "get_expanded_path", "(", "custom_dir", ")", "if", "config", ".", "has_option", "(", "DEFAULT_SECTION", ",", "USE_COLOR", ")", ":", "use_color_raw", "=", "config", ".", "get", "(", "DEFAULT_SECTION", ",", "USE_COLOR", ")", "use_color", "=", "_parse_bool_from_raw_egrc_value", "(", "use_color_raw", ")", "if", "config", ".", "has_option", "(", "DEFAULT_SECTION", ",", "PAGER_CMD", ")", ":", "pager_cmd_raw", "=", "config", ".", "get", "(", "DEFAULT_SECTION", ",", "PAGER_CMD", ")", "pager_cmd", "=", "ast", ".", "literal_eval", "(", "pager_cmd_raw", ")", "if", "config", ".", "has_option", "(", "DEFAULT_SECTION", ",", "EDITOR_CMD", ")", ":", "editor_cmd_raw", "=", "config", ".", "get", "(", "DEFAULT_SECTION", ",", "EDITOR_CMD", ")", "editor_cmd", "=", "ast", ".", "literal_eval", "(", "editor_cmd_raw", ")", "color_config", "=", "get_custom_color_config_from_egrc", "(", "config", ")", "if", "config", ".", "has_option", "(", "DEFAULT_SECTION", ",", "SQUEEZE", ")", ":", "squeeze_raw", "=", "config", ".", "get", "(", "DEFAULT_SECTION", ",", "SQUEEZE", ")", "squeeze", "=", "_parse_bool_from_raw_egrc_value", "(", "squeeze_raw", ")", "if", "config", ".", "has_section", "(", "SUBSTITUTION_SECTION", ")", ":", "subs", "=", "get_substitutions_from_config", "(", "config", ")", "return", "Config", "(", "examples_dir", "=", "examples_dir", ",", "custom_dir", "=", "custom_dir", ",", "color_config", "=", "color_config", ",", "use_color", "=", "use_color", ",", "pager_cmd", "=", "pager_cmd", ",", "editor_cmd", "=", "editor_cmd", ",", "squeeze", "=", "squeeze", ",", "subs", "=", "subs", ",", ")" ]
35.171875
20.921875
def tag_autocomplete_js(format_string=None): """format_string should be ``app_label model counts`` renders 'tagging_ext/tag_autocomplete_js.html""" if format_string: context_list = format_string.split(' ') context = { 'app_label':context_list[0],'model':context_list[1], 'counts':context_list[2] } else: context = {} return render_to_string('tagging_ext/tagging_autocomplete_js.html', context)
[ "def", "tag_autocomplete_js", "(", "format_string", "=", "None", ")", ":", "if", "format_string", ":", "context_list", "=", "format_string", ".", "split", "(", "' '", ")", "context", "=", "{", "'app_label'", ":", "context_list", "[", "0", "]", ",", "'model'", ":", "context_list", "[", "1", "]", ",", "'counts'", ":", "context_list", "[", "2", "]", "}", "else", ":", "context", "=", "{", "}", "return", "render_to_string", "(", "'tagging_ext/tagging_autocomplete_js.html'", ",", "context", ")" ]
37.583333
21.5
def compute_integrated_acquisition_withGradients(acquisition,x): ''' Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model). :param acquisition: acquisition function with GpyOpt model type GP_MCMC. :param x: location where the acquisition is evaluated. ''' acqu_x = 0 d_acqu_x = 0 for i in range(acquisition.model.num_hmc_samples): acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:] acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x) acqu_x += acqu_x_sample d_acqu_x += d_acqu_x_sample acqu_x = acqu_x/acquisition.model.num_hmc_samples d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples return acqu_x, d_acqu_x
[ "def", "compute_integrated_acquisition_withGradients", "(", "acquisition", ",", "x", ")", ":", "acqu_x", "=", "0", "d_acqu_x", "=", "0", "for", "i", "in", "range", "(", "acquisition", ".", "model", ".", "num_hmc_samples", ")", ":", "acquisition", ".", "model", ".", "model", ".", "kern", "[", ":", "]", "=", "acquisition", ".", "model", ".", "hmc_samples", "[", "i", ",", ":", "]", "acqu_x_sample", ",", "d_acqu_x_sample", "=", "acquisition", ".", "acquisition_function_withGradients", "(", "x", ")", "acqu_x", "+=", "acqu_x_sample", "d_acqu_x", "+=", "d_acqu_x_sample", "acqu_x", "=", "acqu_x", "/", "acquisition", ".", "model", ".", "num_hmc_samples", "d_acqu_x", "=", "d_acqu_x", "/", "acquisition", ".", "model", ".", "num_hmc_samples", "return", "acqu_x", ",", "d_acqu_x" ]
38.380952
31.047619
def _render_item(self, depth, key, value = None, **settings): """ Format single list item. """ strptrn = self.INDENT * depth lchar = self.lchar(settings[self.SETTING_LIST_STYLE]) s = self._es_text(settings, settings[self.SETTING_LIST_FORMATING]) lchar = self.fmt_text(lchar, **s) strptrn = "{}" if value is not None: strptrn += ": {}" s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING]) strptrn = self.fmt_text(strptrn.format(key, value), **s) return '{} {} {}'.format(self.INDENT * depth, lchar, strptrn)
[ "def", "_render_item", "(", "self", ",", "depth", ",", "key", ",", "value", "=", "None", ",", "*", "*", "settings", ")", ":", "strptrn", "=", "self", ".", "INDENT", "*", "depth", "lchar", "=", "self", ".", "lchar", "(", "settings", "[", "self", ".", "SETTING_LIST_STYLE", "]", ")", "s", "=", "self", ".", "_es_text", "(", "settings", ",", "settings", "[", "self", ".", "SETTING_LIST_FORMATING", "]", ")", "lchar", "=", "self", ".", "fmt_text", "(", "lchar", ",", "*", "*", "s", ")", "strptrn", "=", "\"{}\"", "if", "value", "is", "not", "None", ":", "strptrn", "+=", "\": {}\"", "s", "=", "self", ".", "_es_text", "(", "settings", ",", "settings", "[", "self", ".", "SETTING_TEXT_FORMATING", "]", ")", "strptrn", "=", "self", ".", "fmt_text", "(", "strptrn", ".", "format", "(", "key", ",", "value", ")", ",", "*", "*", "s", ")", "return", "'{} {} {}'", ".", "format", "(", "self", ".", "INDENT", "*", "depth", ",", "lchar", ",", "strptrn", ")" ]
36.235294
19.647059
def _expand_formula_(formula_string): """ Accounts for the many ways a user may write a formula string, and returns an expanded chemical formula string. Assumptions: -The Chemical Formula string it is supplied is well-written, and has no hanging parethneses -The number of repeats occurs after the elemental symbol or ) ] character EXCEPT in the case of a hydrate where it is assumed to be in front of the first element -All hydrates explicitly use the · symbol -Only (, (,[, ], ., · are "important" symbols to intrepreting the string. -IONS ARE NOT HANDLED :param formula_string: a messy chemical formula string :return: a non-emperical but expanded formula string """ formula_string = re.sub(r'[^A-Za-z0-9\(\)\[\]\·\.]+', '', formula_string) hydrate_pos = formula_string.find('·') if hydrate_pos >= 0: formula_string = _expand_hydrate_(hydrate_pos, formula_string) search_result = re.search( r'(?:[\(\[]([A-Za-z0-9]+)[\)\]](\d*))', formula_string) if search_result is None: return formula_string this_start = search_result.start() this_end = search_result.end() this_string = search_result.group() this_expansion_array = re.findall( r'(?:[\(\[]([A-Za-z0-9]+)[\)\]](\d*))', this_string) for a in this_expansion_array: if a[1] == "": a = (a[0], 1) parenth_expanded = "" multiplier = float(a[1]) element_array = re.findall('[A-Z][^A-Z]*', a[0]) for e in element_array: occurance_array = re.findall('[0-9][^0-9]*', e) if len(occurance_array) == 0: occurance_array.append(1) for o in occurance_array: symbol = re.findall('[A-Z][a-z]*', e) total_num = float(o) * multiplier if total_num.is_integer(): total_num = int(total_num) total_str = str(total_num) if total_str == "1": total_str = "" new_string = symbol[0] + total_str parenth_expanded += new_string formula_string = formula_string[0:this_start] + \ parenth_expanded + formula_string[this_end:] return _expand_formula_(formula_string)
[ "def", "_expand_formula_", "(", "formula_string", ")", ":", "formula_string", "=", "re", ".", "sub", "(", "r'[^A-Za-z0-9\\(\\)\\[\\]\\·\\.]+',", " ", "',", " ", "ormula_string)", "", "hydrate_pos", "=", "formula_string", ".", "find", "(", "'·')", "", "if", "hydrate_pos", ">=", "0", ":", "formula_string", "=", "_expand_hydrate_", "(", "hydrate_pos", ",", "formula_string", ")", "search_result", "=", "re", ".", "search", "(", "r'(?:[\\(\\[]([A-Za-z0-9]+)[\\)\\]](\\d*))'", ",", "formula_string", ")", "if", "search_result", "is", "None", ":", "return", "formula_string", "this_start", "=", "search_result", ".", "start", "(", ")", "this_end", "=", "search_result", ".", "end", "(", ")", "this_string", "=", "search_result", ".", "group", "(", ")", "this_expansion_array", "=", "re", ".", "findall", "(", "r'(?:[\\(\\[]([A-Za-z0-9]+)[\\)\\]](\\d*))'", ",", "this_string", ")", "for", "a", "in", "this_expansion_array", ":", "if", "a", "[", "1", "]", "==", "\"\"", ":", "a", "=", "(", "a", "[", "0", "]", ",", "1", ")", "parenth_expanded", "=", "\"\"", "multiplier", "=", "float", "(", "a", "[", "1", "]", ")", "element_array", "=", "re", ".", "findall", "(", "'[A-Z][^A-Z]*'", ",", "a", "[", "0", "]", ")", "for", "e", "in", "element_array", ":", "occurance_array", "=", "re", ".", "findall", "(", "'[0-9][^0-9]*'", ",", "e", ")", "if", "len", "(", "occurance_array", ")", "==", "0", ":", "occurance_array", ".", "append", "(", "1", ")", "for", "o", "in", "occurance_array", ":", "symbol", "=", "re", ".", "findall", "(", "'[A-Z][a-z]*'", ",", "e", ")", "total_num", "=", "float", "(", "o", ")", "*", "multiplier", "if", "total_num", ".", "is_integer", "(", ")", ":", "total_num", "=", "int", "(", "total_num", ")", "total_str", "=", "str", "(", "total_num", ")", "if", "total_str", "==", "\"1\"", ":", "total_str", "=", "\"\"", "new_string", "=", "symbol", "[", "0", "]", "+", "total_str", "parenth_expanded", "+=", "new_string", "formula_string", "=", "formula_string", "[", "0", ":", "this_start", "]", "+", "parenth_expanded", "+", "formula_string", "[", "this_end", ":", "]", "return", "_expand_formula_", "(", "formula_string", ")" ]
44.94
15.78
def run_location(self, value): """The run_location property. Args: value (string). the property value. """ if value == self._defaults['runLocation'] and 'runLocation' in self._values: del self._values['runLocation'] else: self._values['runLocation'] = value
[ "def", "run_location", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'runLocation'", "]", "and", "'runLocation'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'runLocation'", "]", "else", ":", "self", ".", "_values", "[", "'runLocation'", "]", "=", "value" ]
33.3
15.7
def compute_file_hashes(file_path, hashes=frozenset(['md5'])): """ Digests data read from file denoted by file_path. """ if not os.path.exists(file_path): logging.warning("%s does not exist" % file_path) return else: logging.debug("Computing [%s] hashes for file [%s]" % (','.join(hashes), file_path)) try: with open(file_path, 'rb') as fd: return compute_hashes(fd, hashes) except (IOError, OSError) as e: logging.warning("Error while calculating digest(s) for file %s: %s" % (file_path, str(e))) raise
[ "def", "compute_file_hashes", "(", "file_path", ",", "hashes", "=", "frozenset", "(", "[", "'md5'", "]", ")", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "logging", ".", "warning", "(", "\"%s does not exist\"", "%", "file_path", ")", "return", "else", ":", "logging", ".", "debug", "(", "\"Computing [%s] hashes for file [%s]\"", "%", "(", "','", ".", "join", "(", "hashes", ")", ",", "file_path", ")", ")", "try", ":", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "fd", ":", "return", "compute_hashes", "(", "fd", ",", "hashes", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "e", ":", "logging", ".", "warning", "(", "\"Error while calculating digest(s) for file %s: %s\"", "%", "(", "file_path", ",", "str", "(", "e", ")", ")", ")", "raise" ]
36.25
20.875
def refresh_products(self, **kwargs): """ Refresh a product's cached info. Basically calls product_get with the passed arguments, and tries to intelligently update our product cache. For example, if we already have cached info for product=foo, and you pass in names=["bar", "baz"], the new cache will have info for products foo, bar, baz. Individual product fields are also updated. """ for product in self.product_get(**kwargs): updated = False for current in self._cache.products[:]: if (current.get("id", -1) != product.get("id", -2) and current.get("name", -1) != product.get("name", -2)): continue _nested_update(current, product) updated = True break if not updated: self._cache.products.append(product)
[ "def", "refresh_products", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "product", "in", "self", ".", "product_get", "(", "*", "*", "kwargs", ")", ":", "updated", "=", "False", "for", "current", "in", "self", ".", "_cache", ".", "products", "[", ":", "]", ":", "if", "(", "current", ".", "get", "(", "\"id\"", ",", "-", "1", ")", "!=", "product", ".", "get", "(", "\"id\"", ",", "-", "2", ")", "and", "current", ".", "get", "(", "\"name\"", ",", "-", "1", ")", "!=", "product", ".", "get", "(", "\"name\"", ",", "-", "2", ")", ")", ":", "continue", "_nested_update", "(", "current", ",", "product", ")", "updated", "=", "True", "break", "if", "not", "updated", ":", "self", ".", "_cache", ".", "products", ".", "append", "(", "product", ")" ]
40.217391
18.652174
def set_result(self, job_id, result): """Set the result for a job. This will overwrite any existing results for the job. Args: job_id: The ID of the WorkItem to set the result for. result: A WorkResult indicating the result of the job. Raises: KeyError: If there is no work-item with a matching job-id. """ with self._conn: try: self._conn.execute( ''' REPLACE INTO results VALUES (?, ?, ?, ?, ?) ''', _work_result_to_row(job_id, result)) except sqlite3.IntegrityError as exc: raise KeyError('Can not add result with job-id {}'.format( job_id)) from exc
[ "def", "set_result", "(", "self", ",", "job_id", ",", "result", ")", ":", "with", "self", ".", "_conn", ":", "try", ":", "self", ".", "_conn", ".", "execute", "(", "'''\n REPLACE INTO results\n VALUES (?, ?, ?, ?, ?)\n '''", ",", "_work_result_to_row", "(", "job_id", ",", "result", ")", ")", "except", "sqlite3", ".", "IntegrityError", "as", "exc", ":", "raise", "KeyError", "(", "'Can not add result with job-id {}'", ".", "format", "(", "job_id", ")", ")", "from", "exc" ]
35
18.318182
def ethnicities_clean(): """ Get dictionary of unformatted ethnicity types mapped to clean corresponding ethnicity strings """ eths_clean = {} fname = pkg_resources.resource_filename(__name__, 'resources/Ethnicity_Groups.csv') with open(fname, 'rU') as csvfile: reader = csv.reader(csvfile, delimiter = ',') first = [] for row in reader: if first: for i in range(len(first)): if first[i] and row[i]: eths_clean[first[i]] = row[i] first = [] else: first = deepcopy(row) return eths_clean
[ "def", "ethnicities_clean", "(", ")", ":", "eths_clean", "=", "{", "}", "fname", "=", "pkg_resources", ".", "resource_filename", "(", "__name__", ",", "'resources/Ethnicity_Groups.csv'", ")", "with", "open", "(", "fname", ",", "'rU'", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "','", ")", "first", "=", "[", "]", "for", "row", "in", "reader", ":", "if", "first", ":", "for", "i", "in", "range", "(", "len", "(", "first", ")", ")", ":", "if", "first", "[", "i", "]", "and", "row", "[", "i", "]", ":", "eths_clean", "[", "first", "[", "i", "]", "]", "=", "row", "[", "i", "]", "first", "=", "[", "]", "else", ":", "first", "=", "deepcopy", "(", "row", ")", "return", "eths_clean" ]
32.411765
17.764706
def generate_table_from(data): "Output a nicely formatted ascii table" table = Texttable(max_width=120) table.add_row(["view", "method", "status", "count", "minimum", "maximum", "mean", "stdev", "queries", "querytime"]) table.set_cols_align(["l", "l", "l", "r", "r", "r", "r", "r", "r", "r"]) for item in sorted(data): mean = round(sum(data[item]['times'])/data[item]['count'], 3) mean_sql = round(sum(data[item]['sql'])/data[item]['count'], 3) mean_sqltime = round(sum(data[item]['sqltime'])/data[item]['count'], 3) sdsq = sum([(i - mean) ** 2 for i in data[item]['times']]) try: stdev = '%.2f' % ((sdsq / (len(data[item]['times']) - 1)) ** .5) except ZeroDivisionError: stdev = '0.00' minimum = "%.2f" % min(data[item]['times']) maximum = "%.2f" % max(data[item]['times']) table.add_row([data[item]['view'], data[item]['method'], data[item]['status'], data[item]['count'], minimum, maximum, '%.3f' % mean, stdev, mean_sql, mean_sqltime]) return table.draw()
[ "def", "generate_table_from", "(", "data", ")", ":", "table", "=", "Texttable", "(", "max_width", "=", "120", ")", "table", ".", "add_row", "(", "[", "\"view\"", ",", "\"method\"", ",", "\"status\"", ",", "\"count\"", ",", "\"minimum\"", ",", "\"maximum\"", ",", "\"mean\"", ",", "\"stdev\"", ",", "\"queries\"", ",", "\"querytime\"", "]", ")", "table", ".", "set_cols_align", "(", "[", "\"l\"", ",", "\"l\"", ",", "\"l\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", "]", ")", "for", "item", "in", "sorted", "(", "data", ")", ":", "mean", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "mean_sql", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'sql'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "mean_sqltime", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'sqltime'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "sdsq", "=", "sum", "(", "[", "(", "i", "-", "mean", ")", "**", "2", "for", "i", "in", "data", "[", "item", "]", "[", "'times'", "]", "]", ")", "try", ":", "stdev", "=", "'%.2f'", "%", "(", "(", "sdsq", "/", "(", "len", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "-", "1", ")", ")", "**", ".5", ")", "except", "ZeroDivisionError", ":", "stdev", "=", "'0.00'", "minimum", "=", "\"%.2f\"", "%", "min", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "maximum", "=", "\"%.2f\"", "%", "max", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "table", ".", "add_row", "(", "[", "data", "[", "item", "]", "[", "'view'", "]", ",", "data", "[", "item", "]", "[", "'method'", "]", ",", "data", "[", "item", "]", "[", "'status'", "]", ",", "data", "[", "item", "]", "[", "'count'", "]", ",", "minimum", ",", "maximum", ",", "'%.3f'", "%", "mean", ",", "stdev", ",", "mean_sql", ",", "mean_sqltime", "]", ")", "return", "table", ".", "draw", "(", ")" ]
46.565217
31.173913
def appliance_time_and_locale_configuration(self): """ Gets the ApplianceTimeAndLocaleConfiguration API client. Returns: ApplianceTimeAndLocaleConfiguration: """ if not self.__appliance_time_and_locale_configuration: self.__appliance_time_and_locale_configuration = ApplianceTimeAndLocaleConfiguration(self.__connection) return self.__appliance_time_and_locale_configuration
[ "def", "appliance_time_and_locale_configuration", "(", "self", ")", ":", "if", "not", "self", ".", "__appliance_time_and_locale_configuration", ":", "self", ".", "__appliance_time_and_locale_configuration", "=", "ApplianceTimeAndLocaleConfiguration", "(", "self", ".", "__connection", ")", "return", "self", ".", "__appliance_time_and_locale_configuration" ]
43.8
22.4
def tagReportCallback(llrpMsg): """Function to run each time the reader reports seeing tags.""" global tagReport tags = llrpMsg.msgdict['RO_ACCESS_REPORT']['TagReportData'] if len(tags): logger.info('saw tag(s): %s', pprint.pformat(tags)) else: logger.info('no tags seen') return for tag in tags: tagReport += tag['TagSeenCount'][0] if "OpSpecResult" in tag: # copy the binary data to the standard output stream data = tag["OpSpecResult"].get("ReadData") if data: if sys.version_info.major < 3: sys.stdout.write(data) else: sys.stdout.buffer.write(data) # bytes logger.debug("hex data: %s", binascii.hexlify(data))
[ "def", "tagReportCallback", "(", "llrpMsg", ")", ":", "global", "tagReport", "tags", "=", "llrpMsg", ".", "msgdict", "[", "'RO_ACCESS_REPORT'", "]", "[", "'TagReportData'", "]", "if", "len", "(", "tags", ")", ":", "logger", ".", "info", "(", "'saw tag(s): %s'", ",", "pprint", ".", "pformat", "(", "tags", ")", ")", "else", ":", "logger", ".", "info", "(", "'no tags seen'", ")", "return", "for", "tag", "in", "tags", ":", "tagReport", "+=", "tag", "[", "'TagSeenCount'", "]", "[", "0", "]", "if", "\"OpSpecResult\"", "in", "tag", ":", "# copy the binary data to the standard output stream", "data", "=", "tag", "[", "\"OpSpecResult\"", "]", ".", "get", "(", "\"ReadData\"", ")", "if", "data", ":", "if", "sys", ".", "version_info", ".", "major", "<", "3", ":", "sys", ".", "stdout", ".", "write", "(", "data", ")", "else", ":", "sys", ".", "stdout", ".", "buffer", ".", "write", "(", "data", ")", "# bytes", "logger", ".", "debug", "(", "\"hex data: %s\"", ",", "binascii", ".", "hexlify", "(", "data", ")", ")" ]
40.15
16.8
def _get_deltas(self, rake): """ Return the value of deltas (delta_R, delta_S, delta_V, delta_I), as defined in "Table 5: Model 1" pag 198 """ # delta_R = 1 for reverse focal mechanism (45<rake<135) # and for interface events, 0 for all other events # delta_S = 1 for Strike-slip focal mechanisms (0<=rake<=45) or # (135<=rake<=180) or (-45<=rake<=0), 0 for all other events # delta_V = 1 for TVZ events, 0 for all other events # delta_I = 1 for interface events, 0 for all other events # All deltas = 0 for Model 3: Deep Region, pag 198 delta_R, delta_S = 0, 0 delta_V, delta_I = 0, 0 if rake > 45.0 and rake < 135.0: delta_R = 1 if (rake >= 0.0 and rake <= 45.0) or \ (rake >= 135 and rake <= 180.0) or \ (rake >= -180.0 and rake <= -135.0) or \ (rake >= -45.0 and rake < 0.0): delta_S = 1 return delta_R, delta_S, delta_V, delta_I
[ "def", "_get_deltas", "(", "self", ",", "rake", ")", ":", "# delta_R = 1 for reverse focal mechanism (45<rake<135)", "# and for interface events, 0 for all other events", "# delta_S = 1 for Strike-slip focal mechanisms (0<=rake<=45) or", "# (135<=rake<=180) or (-45<=rake<=0), 0 for all other events", "# delta_V = 1 for TVZ events, 0 for all other events", "# delta_I = 1 for interface events, 0 for all other events", "# All deltas = 0 for Model 3: Deep Region, pag 198", "delta_R", ",", "delta_S", "=", "0", ",", "0", "delta_V", ",", "delta_I", "=", "0", ",", "0", "if", "rake", ">", "45.0", "and", "rake", "<", "135.0", ":", "delta_R", "=", "1", "if", "(", "rake", ">=", "0.0", "and", "rake", "<=", "45.0", ")", "or", "(", "rake", ">=", "135", "and", "rake", "<=", "180.0", ")", "or", "(", "rake", ">=", "-", "180.0", "and", "rake", "<=", "-", "135.0", ")", "or", "(", "rake", ">=", "-", "45.0", "and", "rake", "<", "0.0", ")", ":", "delta_S", "=", "1", "return", "delta_R", ",", "delta_S", ",", "delta_V", ",", "delta_I" ]
36.925926
18.62963
def minimize_matrix(self): """ This method finds and returns the permutations that produce the lowest ewald sum calls recursive function to iterate through permutations """ if self._algo == EwaldMinimizer.ALGO_FAST or \ self._algo == EwaldMinimizer.ALGO_BEST_FIRST: return self._recurse(self._matrix, self._m_list, set(range(len(self._matrix))))
[ "def", "minimize_matrix", "(", "self", ")", ":", "if", "self", ".", "_algo", "==", "EwaldMinimizer", ".", "ALGO_FAST", "or", "self", ".", "_algo", "==", "EwaldMinimizer", ".", "ALGO_BEST_FIRST", ":", "return", "self", ".", "_recurse", "(", "self", ".", "_matrix", ",", "self", ".", "_m_list", ",", "set", "(", "range", "(", "len", "(", "self", ".", "_matrix", ")", ")", ")", ")" ]
49.555556
19.111111
def save_as_plt(self, fname, pixel_array=None, vmin=None, vmax=None, cmap=None, format=None, origin=None): """ This method saves the image from a numpy array using matplotlib :param fname: Location and name of the image file to be saved. :param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value :param vmin: matplotlib vmin :param vmax: matplotlib vmax :param cmap: matplotlib color map :param format: matplotlib format :param origin: matplotlib origin This method will return True if successful """ from matplotlib.backends.backend_agg \ import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure from pylab import cm if pixel_array is None: pixel_array = self.numpy if cmap is None: cmap = cm.bone fig = Figure(figsize=pixel_array.shape[::-1], dpi=1, frameon=False) canvas = FigureCanvas(fig) fig.figimage(pixel_array, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin) fig.savefig(fname, dpi=1, format=format) return True
[ "def", "save_as_plt", "(", "self", ",", "fname", ",", "pixel_array", "=", "None", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "cmap", "=", "None", ",", "format", "=", "None", ",", "origin", "=", "None", ")", ":", "from", "matplotlib", ".", "backends", ".", "backend_agg", "import", "FigureCanvasAgg", "as", "FigureCanvas", "from", "matplotlib", ".", "figure", "import", "Figure", "from", "pylab", "import", "cm", "if", "pixel_array", "is", "None", ":", "pixel_array", "=", "self", ".", "numpy", "if", "cmap", "is", "None", ":", "cmap", "=", "cm", ".", "bone", "fig", "=", "Figure", "(", "figsize", "=", "pixel_array", ".", "shape", "[", ":", ":", "-", "1", "]", ",", "dpi", "=", "1", ",", "frameon", "=", "False", ")", "canvas", "=", "FigureCanvas", "(", "fig", ")", "fig", ".", "figimage", "(", "pixel_array", ",", "cmap", "=", "cmap", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "origin", "=", "origin", ")", "fig", ".", "savefig", "(", "fname", ",", "dpi", "=", "1", ",", "format", "=", "format", ")", "return", "True" ]
37.9
14.566667
def process_full_position(data, header, var_only=False): """ Return genetic data when all alleles called on same line. Returns an array containing one item, a tuple of five items: (string) chromosome (string) start position (1-based) (array of strings) matching dbSNP entries (string) reference allele sequence (array of strings) the genome's allele sequences """ feature_type = data[header['varType']] # Skip unmatchable, uncovered, or pseudoautosomal-in-X if (feature_type == 'no-ref' or feature_type.startswith('PAR-called-in-X')): return None if var_only and feature_type in ['no-call', 'ref']: return None filters = [] if feature_type == 'no-call': filters.append('NOCALL') if 'varQuality' in header: if 'VQLOW' in data[header['varQuality']]: filters.append('VQLOW') else: var_filter = data[header['varFilter']] if var_filter and not var_filter == "PASS": filters = filters + var_filter.split(';') chrom = data[header['chromosome']] start = data[header['begin']] ref_allele = data[header['reference']] alleles = [data[header['alleleSeq']]] dbsnp_data = [] dbsnp_data = data[header['xRef']].split(';') assert data[header['ploidy']] in ['1', '2'] if feature_type == 'ref' or feature_type == 'no-call': return [{'chrom': chrom, 'start': start, 'dbsnp_data': dbsnp_data, 'ref_seq': ref_allele, 'alleles': alleles, 'allele_count': data[header['ploidy']], 'filters': filters, 'end': data[header['end']]}] else: return [{'chrom': chrom, 'start': start, 'dbsnp_data': dbsnp_data, 'ref_seq': ref_allele, 'alleles': alleles, 'allele_count': data[header['ploidy']], 'filters': filters}]
[ "def", "process_full_position", "(", "data", ",", "header", ",", "var_only", "=", "False", ")", ":", "feature_type", "=", "data", "[", "header", "[", "'varType'", "]", "]", "# Skip unmatchable, uncovered, or pseudoautosomal-in-X", "if", "(", "feature_type", "==", "'no-ref'", "or", "feature_type", ".", "startswith", "(", "'PAR-called-in-X'", ")", ")", ":", "return", "None", "if", "var_only", "and", "feature_type", "in", "[", "'no-call'", ",", "'ref'", "]", ":", "return", "None", "filters", "=", "[", "]", "if", "feature_type", "==", "'no-call'", ":", "filters", ".", "append", "(", "'NOCALL'", ")", "if", "'varQuality'", "in", "header", ":", "if", "'VQLOW'", "in", "data", "[", "header", "[", "'varQuality'", "]", "]", ":", "filters", ".", "append", "(", "'VQLOW'", ")", "else", ":", "var_filter", "=", "data", "[", "header", "[", "'varFilter'", "]", "]", "if", "var_filter", "and", "not", "var_filter", "==", "\"PASS\"", ":", "filters", "=", "filters", "+", "var_filter", ".", "split", "(", "';'", ")", "chrom", "=", "data", "[", "header", "[", "'chromosome'", "]", "]", "start", "=", "data", "[", "header", "[", "'begin'", "]", "]", "ref_allele", "=", "data", "[", "header", "[", "'reference'", "]", "]", "alleles", "=", "[", "data", "[", "header", "[", "'alleleSeq'", "]", "]", "]", "dbsnp_data", "=", "[", "]", "dbsnp_data", "=", "data", "[", "header", "[", "'xRef'", "]", "]", ".", "split", "(", "';'", ")", "assert", "data", "[", "header", "[", "'ploidy'", "]", "]", "in", "[", "'1'", ",", "'2'", "]", "if", "feature_type", "==", "'ref'", "or", "feature_type", "==", "'no-call'", ":", "return", "[", "{", "'chrom'", ":", "chrom", ",", "'start'", ":", "start", ",", "'dbsnp_data'", ":", "dbsnp_data", ",", "'ref_seq'", ":", "ref_allele", ",", "'alleles'", ":", "alleles", ",", "'allele_count'", ":", "data", "[", "header", "[", "'ploidy'", "]", "]", ",", "'filters'", ":", "filters", ",", "'end'", ":", "data", "[", "header", "[", "'end'", "]", "]", "}", "]", "else", ":", "return", "[", "{", "'chrom'", ":", "chrom", ",", "'start'", ":", "start", ",", "'dbsnp_data'", ":", "dbsnp_data", ",", "'ref_seq'", ":", "ref_allele", ",", "'alleles'", ":", "alleles", ",", "'allele_count'", ":", "data", "[", "header", "[", "'ploidy'", "]", "]", ",", "'filters'", ":", "filters", "}", "]" ]
37.056604
12.264151
def diff_aff(self): """Symmetric diffusion affinity matrix Return or calculate the symmetric diffusion affinity matrix .. math:: A(x,y) = K(x,y) (d(x) d(y))^{-1/2} where :math:`d` is the degrees (row sums of the kernel.) Returns ------- diff_aff : array-like, shape=[n_samples, n_samples] symmetric diffusion affinity matrix defined as a doubly-stochastic form of the kernel matrix """ row_degrees = np.array(self.kernel.sum(axis=1)).reshape(-1, 1) col_degrees = np.array(self.kernel.sum(axis=0)).reshape(1, -1) if sparse.issparse(self.kernel): return self.kernel.multiply(1 / np.sqrt(row_degrees)).multiply( 1 / np.sqrt(col_degrees)) else: return (self.kernel / np.sqrt(row_degrees)) / np.sqrt(col_degrees)
[ "def", "diff_aff", "(", "self", ")", ":", "row_degrees", "=", "np", ".", "array", "(", "self", ".", "kernel", ".", "sum", "(", "axis", "=", "1", ")", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "col_degrees", "=", "np", ".", "array", "(", "self", ".", "kernel", ".", "sum", "(", "axis", "=", "0", ")", ")", ".", "reshape", "(", "1", ",", "-", "1", ")", "if", "sparse", ".", "issparse", "(", "self", ".", "kernel", ")", ":", "return", "self", ".", "kernel", ".", "multiply", "(", "1", "/", "np", ".", "sqrt", "(", "row_degrees", ")", ")", ".", "multiply", "(", "1", "/", "np", ".", "sqrt", "(", "col_degrees", ")", ")", "else", ":", "return", "(", "self", ".", "kernel", "/", "np", ".", "sqrt", "(", "row_degrees", ")", ")", "/", "np", ".", "sqrt", "(", "col_degrees", ")" ]
36.956522
23.869565
def seed_url(self): """A URL that can be used to open the page. The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then appended to :py:attr:`base_url` unless the template results in an absolute URL. :return: URL that can be used to open the page. :rtype: str """ url = self.base_url if self.URL_TEMPLATE is not None: url = urlparse.urljoin( self.base_url, self.URL_TEMPLATE.format(**self.url_kwargs) ) if not url: return None url_parts = list(urlparse.urlparse(url)) query = urlparse.parse_qsl(url_parts[4]) for k, v in self.url_kwargs.items(): if v is None: continue if "{{{}}}".format(k) not in str(self.URL_TEMPLATE): for i in iterable(v): query.append((k, i)) url_parts[4] = urlencode(query) return urlparse.urlunparse(url_parts)
[ "def", "seed_url", "(", "self", ")", ":", "url", "=", "self", ".", "base_url", "if", "self", ".", "URL_TEMPLATE", "is", "not", "None", ":", "url", "=", "urlparse", ".", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "URL_TEMPLATE", ".", "format", "(", "*", "*", "self", ".", "url_kwargs", ")", ")", "if", "not", "url", ":", "return", "None", "url_parts", "=", "list", "(", "urlparse", ".", "urlparse", "(", "url", ")", ")", "query", "=", "urlparse", ".", "parse_qsl", "(", "url_parts", "[", "4", "]", ")", "for", "k", ",", "v", "in", "self", ".", "url_kwargs", ".", "items", "(", ")", ":", "if", "v", "is", "None", ":", "continue", "if", "\"{{{}}}\"", ".", "format", "(", "k", ")", "not", "in", "str", "(", "self", ".", "URL_TEMPLATE", ")", ":", "for", "i", "in", "iterable", "(", "v", ")", ":", "query", ".", "append", "(", "(", "k", ",", "i", ")", ")", "url_parts", "[", "4", "]", "=", "urlencode", "(", "query", ")", "return", "urlparse", ".", "urlunparse", "(", "url_parts", ")" ]
30.21875
19.46875
def main(*argv): """ main driver of program """ try: adminUsername = argv[0] adminPassword = argv[1] siteURL = argv[2] username = argv[3] groupName = argv[4] # Logic # # Connect to AGOL # sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword) admin = arcrest.manageorg.Administration(securityHandler=sh) # Get the group ID # community = admin.community groupId = community.getGroupIDs(groupNames=[groupName])[0] # Add the User to the Group # groups = community.groups res = groups.group(groupId=groupId).addUsersToGroups(users=username) if len(res['notAdded'] ) == 0: arcpy.SetParameterAsText(5, True) else: arcpy.SetParameterAsText(5, False) except arcpy.ExecuteError: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror) arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2)) except FunctionError, f_e: messages = f_e.args[0] arcpy.AddError("error in function: %s" % messages["function"]) arcpy.AddError("error on line: %s" % messages["line"]) arcpy.AddError("error in file name: %s" % messages["filename"]) arcpy.AddError("with error message: %s" % messages["synerror"]) arcpy.AddError("ArcPy Error Message: %s" % messages["arc"]) except: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror)
[ "def", "main", "(", "*", "argv", ")", ":", "try", ":", "adminUsername", "=", "argv", "[", "0", "]", "adminPassword", "=", "argv", "[", "1", "]", "siteURL", "=", "argv", "[", "2", "]", "username", "=", "argv", "[", "3", "]", "groupName", "=", "argv", "[", "4", "]", "# Logic", "#", "# Connect to AGOL", "#", "sh", "=", "arcrest", ".", "AGOLTokenSecurityHandler", "(", "adminUsername", ",", "adminPassword", ")", "admin", "=", "arcrest", ".", "manageorg", ".", "Administration", "(", "securityHandler", "=", "sh", ")", "# Get the group ID", "#", "community", "=", "admin", ".", "community", "groupId", "=", "community", ".", "getGroupIDs", "(", "groupNames", "=", "[", "groupName", "]", ")", "[", "0", "]", "# Add the User to the Group", "#", "groups", "=", "community", ".", "groups", "res", "=", "groups", ".", "group", "(", "groupId", "=", "groupId", ")", ".", "addUsersToGroups", "(", "users", "=", "username", ")", "if", "len", "(", "res", "[", "'notAdded'", "]", ")", "==", "0", ":", "arcpy", ".", "SetParameterAsText", "(", "5", ",", "True", ")", "else", ":", "arcpy", ".", "SetParameterAsText", "(", "5", ",", "False", ")", "except", "arcpy", ".", "ExecuteError", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "line", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "filename", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "synerror", ")", "arcpy", ".", "AddError", "(", "\"ArcPy Error Message: %s\"", "%", "arcpy", ".", "GetMessages", "(", "2", ")", ")", "except", "FunctionError", ",", "f_e", ":", "messages", "=", "f_e", ".", "args", "[", "0", "]", "arcpy", ".", "AddError", "(", "\"error in function: %s\"", "%", "messages", "[", "\"function\"", "]", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "messages", "[", "\"line\"", "]", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "messages", "[", "\"filename\"", "]", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "messages", "[", "\"synerror\"", "]", ")", "arcpy", ".", "AddError", "(", "\"ArcPy Error Message: %s\"", "%", "messages", "[", "\"arc\"", "]", ")", "except", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "line", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "filename", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "synerror", ")" ]
40.272727
18.181818
def rows(self): """ Return/yield tuples or lists corresponding to each row to be inserted. """ with self.input().open('r') as fobj: for line in fobj: yield line.strip('\n').split('\t')
[ "def", "rows", "(", "self", ")", ":", "with", "self", ".", "input", "(", ")", ".", "open", "(", "'r'", ")", "as", "fobj", ":", "for", "line", "in", "fobj", ":", "yield", "line", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "'\\t'", ")" ]
34
12.571429
def move_up(self): """Move up one level in the hierarchy, unless already on top.""" if self.current_item.parent is not None: self.current_item = self.current_item.parent for f in self._hooks["up"]: f(self) if self.current_item is self.root: for f in self._hooks["top"]: f(self) return self
[ "def", "move_up", "(", "self", ")", ":", "if", "self", ".", "current_item", ".", "parent", "is", "not", "None", ":", "self", ".", "current_item", "=", "self", ".", "current_item", ".", "parent", "for", "f", "in", "self", ".", "_hooks", "[", "\"up\"", "]", ":", "f", "(", "self", ")", "if", "self", ".", "current_item", "is", "self", ".", "root", ":", "for", "f", "in", "self", ".", "_hooks", "[", "\"top\"", "]", ":", "f", "(", "self", ")", "return", "self" ]
33.818182
13.818182
def get_time_index_range(self, date_search_start=None, date_search_end=None, time_index_start=None, time_index_end=None, time_index=None): """ Generates a time index range based on time bounds given. This is useful for subset data extraction. Parameters ---------- date_search_start: :obj:`datetime.datetime`, optional This is a datetime object with the date of the minimum date for starting. date_search_end: :obj:`datetime.datetime`, optional This is a datetime object with the date of the maximum date for ending. time_index_start: int, optional This is the index of the start of the time array subset. Useful for the old file version. time_index_end: int, optional This is the index of the end of the time array subset. Useful for the old file version. time_index: int, optional This is the index of time to return in the case that your code only wants one index. Used internally. Returns ------- :obj:`numpy.array`: This is an array of time indices used to extract a subset of data. CF-Compliant Qout File Example: .. code:: python from datetime import datetime from RAPIDpy import RAPIDDataset path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: time_index_range = qout_nc.get_time_index_range( date_search_start=datetime(1980, 1, 1), date_search_end=datetime(1980, 12, 11)) Legacy Qout File Example: .. code:: python from datetime import datetime from RAPIDpy import RAPIDDataset path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout, datetime_simulation_start=datetime(1980, 1, 1), simulation_time_step_seconds=3600) as qout_nc: time_index_range = qout_nc.get_time_index_range( date_search_start=datetime(1980, 1, 1), date_search_end=datetime(1980, 12, 11)) """ # get the range of time based on datetime range time_range = None if ((self.is_time_variable_valid() or self._is_legacy_time_valid()) and (date_search_start is not None or date_search_end is not None)): log("Determining time range ({0} to {1})" "...".format(date_search_start, date_search_end), "INFO") time_array = self.get_time_array() if date_search_start is not None: date_search_start_utc = date_search_start if self.out_tzinfo is not None: date_search_start_utc = self.out_tzinfo \ .localize(date_search_start) \ .astimezone(utc) \ .replace(tzinfo=None) seconds_start = (date_search_start_utc - datetime.datetime(1970, 1, 1)).total_seconds() time_range = np.where(time_array >= seconds_start)[0] if date_search_end is not None: date_search_end_utc = date_search_end if self.out_tzinfo is not None: date_search_end_utc = self.out_tzinfo \ .localize(date_search_end) \ .astimezone(utc) \ .replace(tzinfo=None) seconds_end = (date_search_end_utc - datetime.datetime(1970, 1, 1)).total_seconds() if time_range is not None: time_range = np.intersect1d(time_range, np.where(time_array <= seconds_end)[0]) else: time_range = np.where(time_array <= seconds_end)[0] # get the range of time based on time index range elif time_index_start is not None or time_index_end is not None: if time_index_start is None: time_index_start = 0 if time_index_end is None: time_index_end = self.size_time time_range = range(time_index_start, time_index_end) # get only one time step elif time_index is not None: time_range = [time_index] # return all else: time_range = range(self.size_time) return time_range
[ "def", "get_time_index_range", "(", "self", ",", "date_search_start", "=", "None", ",", "date_search_end", "=", "None", ",", "time_index_start", "=", "None", ",", "time_index_end", "=", "None", ",", "time_index", "=", "None", ")", ":", "# get the range of time based on datetime range", "time_range", "=", "None", "if", "(", "(", "self", ".", "is_time_variable_valid", "(", ")", "or", "self", ".", "_is_legacy_time_valid", "(", ")", ")", "and", "(", "date_search_start", "is", "not", "None", "or", "date_search_end", "is", "not", "None", ")", ")", ":", "log", "(", "\"Determining time range ({0} to {1})\"", "\"...\"", ".", "format", "(", "date_search_start", ",", "date_search_end", ")", ",", "\"INFO\"", ")", "time_array", "=", "self", ".", "get_time_array", "(", ")", "if", "date_search_start", "is", "not", "None", ":", "date_search_start_utc", "=", "date_search_start", "if", "self", ".", "out_tzinfo", "is", "not", "None", ":", "date_search_start_utc", "=", "self", ".", "out_tzinfo", ".", "localize", "(", "date_search_start", ")", ".", "astimezone", "(", "utc", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "seconds_start", "=", "(", "date_search_start_utc", "-", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", ")", ".", "total_seconds", "(", ")", "time_range", "=", "np", ".", "where", "(", "time_array", ">=", "seconds_start", ")", "[", "0", "]", "if", "date_search_end", "is", "not", "None", ":", "date_search_end_utc", "=", "date_search_end", "if", "self", ".", "out_tzinfo", "is", "not", "None", ":", "date_search_end_utc", "=", "self", ".", "out_tzinfo", ".", "localize", "(", "date_search_end", ")", ".", "astimezone", "(", "utc", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "seconds_end", "=", "(", "date_search_end_utc", "-", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", ")", ".", "total_seconds", "(", ")", "if", "time_range", "is", "not", "None", ":", "time_range", "=", "np", ".", "intersect1d", "(", "time_range", ",", "np", ".", "where", "(", "time_array", "<=", "seconds_end", ")", "[", "0", "]", ")", "else", ":", "time_range", "=", "np", ".", "where", "(", "time_array", "<=", "seconds_end", ")", "[", "0", "]", "# get the range of time based on time index range", "elif", "time_index_start", "is", "not", "None", "or", "time_index_end", "is", "not", "None", ":", "if", "time_index_start", "is", "None", ":", "time_index_start", "=", "0", "if", "time_index_end", "is", "None", ":", "time_index_end", "=", "self", ".", "size_time", "time_range", "=", "range", "(", "time_index_start", ",", "time_index_end", ")", "# get only one time step", "elif", "time_index", "is", "not", "None", ":", "time_range", "=", "[", "time_index", "]", "# return all", "else", ":", "time_range", "=", "range", "(", "self", ".", "size_time", ")", "return", "time_range" ]
40.94958
20.563025
def deploy_app(self, site_folder, runtime_type=''): ''' a method to deploy a static html page to heroku using php ''' title = '%s.deploy_php' % self.__class__.__name__ # validate inputs input_fields = { 'site_folder': site_folder, 'runtime_type': runtime_type } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verify app subdomain if not self.subdomain: raise Exception('You must access a subdomain before you can deploy to heroku. Try: %s.access()' % self.__class__.__name__) # validate existence of site folder from os import path if not path.exists(site_folder): raise ValueError('%s is not a valid path on localhost.' % site_folder) # validate existence of proper runtime file runtime_file = 'index.html' static_build = False if runtime_type == 'php': runtime_file = 'index.php' elif runtime_type in ('ruby', 'java', 'python', 'jingo'): runtime_file = 'Procfile' elif runtime_type == 'node': runtime_file = 'package.json' else: runtime_type = 'html' static_build = True build_file = path.join(site_folder, runtime_file) if not path.exists(build_file): raise Exception('%s must contain an %s file to build a %s app.' % (site_folder, runtime_file, runtime_type)) if runtime_type == 'python': req_file = path.join(site_folder, 'requirements.txt') if not path.exists(req_file): raise Exception('%s must contain a requirements.txt file to build a python app.' % site_folder) if runtime_type == 'jingo': req_file = path.join(site_folder, 'package.json') if not path.exists(req_file): raise Exception('%s must contain a package.json file to build a jingo app.' % site_folder) # validate container plugin from os import devnull from subprocess import check_output self.printer('Checking heroku plugin requirements ... ', flush=True) sys_command = 'heroku plugins' heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, 'wb')).decode('utf-8') if heroku_plugins.find('heroku-builds') == -1: self.printer('ERROR') raise Exception( 'heroku builds plugin required. Try: heroku plugins:install heroku-builds') self.printer('done.') # construct temporary folder self.printer('Creating temporary files ... ', flush=True) from shutil import copytree, move, ignore_patterns from os import makedirs from time import time from labpack import __module__ from labpack.storage.appdata import appdataClient client_kwargs = { 'collection_name': 'TempFiles', 'prod_name': __module__ } tempfiles_client = appdataClient(**client_kwargs) temp_folder = path.join(tempfiles_client.collection_folder, 'heroku%s' % time()) # define cleanup function def _cleanup_temp(): self.printer('Cleaning up temporary files ... ', flush=True) from shutil import rmtree rmtree(temp_folder, ignore_errors=True) self.printer('done.') # copy site to temporary folder try: makedirs(temp_folder) site_root, site_name = path.split(path.abspath(site_folder)) build_path = path.join(temp_folder, site_name) copytree(site_folder, build_path, ignore=ignore_patterns('*node_modules/*','*.lab/*')) if static_build: index_path = path.join(build_path, 'index.html') home_path = path.join(build_path, 'home.html') compose_path = path.join(build_path, 'compose.json') php_path = path.join(build_path, 'index.php') with open(compose_path, 'wt') as f: f.write('{}') f.close() with open(php_path, 'wt') as f: f.write('<?php include_once("home.html"); ?>') f.close() move(index_path, home_path) except: self.printer('ERROR') _cleanup_temp() raise self.printer('done.') # deploy site to heroku self.printer('Deploying %s to heroku ... ' % site_folder, flush=True) try: sys_command = 'cd %s; heroku builds:create -a %s' % (temp_folder, self.subdomain) self._handle_command(sys_command, print_pipe=True) except: self.printer('ERROR') raise finally: _cleanup_temp() self.printer('Deployment complete.') return True
[ "def", "deploy_app", "(", "self", ",", "site_folder", ",", "runtime_type", "=", "''", ")", ":", "title", "=", "'%s.deploy_php'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs\r", "input_fields", "=", "{", "'site_folder'", ":", "site_folder", ",", "'runtime_type'", ":", "runtime_type", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# verify app subdomain\r", "if", "not", "self", ".", "subdomain", ":", "raise", "Exception", "(", "'You must access a subdomain before you can deploy to heroku. Try: %s.access()'", "%", "self", ".", "__class__", ".", "__name__", ")", "# validate existence of site folder\r", "from", "os", "import", "path", "if", "not", "path", ".", "exists", "(", "site_folder", ")", ":", "raise", "ValueError", "(", "'%s is not a valid path on localhost.'", "%", "site_folder", ")", "# validate existence of proper runtime file\r", "runtime_file", "=", "'index.html'", "static_build", "=", "False", "if", "runtime_type", "==", "'php'", ":", "runtime_file", "=", "'index.php'", "elif", "runtime_type", "in", "(", "'ruby'", ",", "'java'", ",", "'python'", ",", "'jingo'", ")", ":", "runtime_file", "=", "'Procfile'", "elif", "runtime_type", "==", "'node'", ":", "runtime_file", "=", "'package.json'", "else", ":", "runtime_type", "=", "'html'", "static_build", "=", "True", "build_file", "=", "path", ".", "join", "(", "site_folder", ",", "runtime_file", ")", "if", "not", "path", ".", "exists", "(", "build_file", ")", ":", "raise", "Exception", "(", "'%s must contain an %s file to build a %s app.'", "%", "(", "site_folder", ",", "runtime_file", ",", "runtime_type", ")", ")", "if", "runtime_type", "==", "'python'", ":", "req_file", "=", "path", ".", "join", "(", "site_folder", ",", "'requirements.txt'", ")", "if", "not", "path", ".", "exists", "(", "req_file", ")", ":", "raise", "Exception", "(", "'%s must contain a requirements.txt file to build a python app.'", "%", "site_folder", ")", "if", "runtime_type", "==", "'jingo'", ":", "req_file", "=", "path", ".", "join", "(", "site_folder", ",", "'package.json'", ")", "if", "not", "path", ".", "exists", "(", "req_file", ")", ":", "raise", "Exception", "(", "'%s must contain a package.json file to build a jingo app.'", "%", "site_folder", ")", "# validate container plugin\r", "from", "os", "import", "devnull", "from", "subprocess", "import", "check_output", "self", ".", "printer", "(", "'Checking heroku plugin requirements ... '", ",", "flush", "=", "True", ")", "sys_command", "=", "'heroku plugins'", "heroku_plugins", "=", "check_output", "(", "sys_command", ",", "shell", "=", "True", ",", "stderr", "=", "open", "(", "devnull", ",", "'wb'", ")", ")", ".", "decode", "(", "'utf-8'", ")", "if", "heroku_plugins", ".", "find", "(", "'heroku-builds'", ")", "==", "-", "1", ":", "self", ".", "printer", "(", "'ERROR'", ")", "raise", "Exception", "(", "'heroku builds plugin required. Try: heroku plugins:install heroku-builds'", ")", "self", ".", "printer", "(", "'done.'", ")", "# construct temporary folder\r", "self", ".", "printer", "(", "'Creating temporary files ... '", ",", "flush", "=", "True", ")", "from", "shutil", "import", "copytree", ",", "move", ",", "ignore_patterns", "from", "os", "import", "makedirs", "from", "time", "import", "time", "from", "labpack", "import", "__module__", "from", "labpack", ".", "storage", ".", "appdata", "import", "appdataClient", "client_kwargs", "=", "{", "'collection_name'", ":", "'TempFiles'", ",", "'prod_name'", ":", "__module__", "}", "tempfiles_client", "=", "appdataClient", "(", "*", "*", "client_kwargs", ")", "temp_folder", "=", "path", ".", "join", "(", "tempfiles_client", ".", "collection_folder", ",", "'heroku%s'", "%", "time", "(", ")", ")", "# define cleanup function\r", "def", "_cleanup_temp", "(", ")", ":", "self", ".", "printer", "(", "'Cleaning up temporary files ... '", ",", "flush", "=", "True", ")", "from", "shutil", "import", "rmtree", "rmtree", "(", "temp_folder", ",", "ignore_errors", "=", "True", ")", "self", ".", "printer", "(", "'done.'", ")", "# copy site to temporary folder\r", "try", ":", "makedirs", "(", "temp_folder", ")", "site_root", ",", "site_name", "=", "path", ".", "split", "(", "path", ".", "abspath", "(", "site_folder", ")", ")", "build_path", "=", "path", ".", "join", "(", "temp_folder", ",", "site_name", ")", "copytree", "(", "site_folder", ",", "build_path", ",", "ignore", "=", "ignore_patterns", "(", "'*node_modules/*'", ",", "'*.lab/*'", ")", ")", "if", "static_build", ":", "index_path", "=", "path", ".", "join", "(", "build_path", ",", "'index.html'", ")", "home_path", "=", "path", ".", "join", "(", "build_path", ",", "'home.html'", ")", "compose_path", "=", "path", ".", "join", "(", "build_path", ",", "'compose.json'", ")", "php_path", "=", "path", ".", "join", "(", "build_path", ",", "'index.php'", ")", "with", "open", "(", "compose_path", ",", "'wt'", ")", "as", "f", ":", "f", ".", "write", "(", "'{}'", ")", "f", ".", "close", "(", ")", "with", "open", "(", "php_path", ",", "'wt'", ")", "as", "f", ":", "f", ".", "write", "(", "'<?php include_once(\"home.html\"); ?>'", ")", "f", ".", "close", "(", ")", "move", "(", "index_path", ",", "home_path", ")", "except", ":", "self", ".", "printer", "(", "'ERROR'", ")", "_cleanup_temp", "(", ")", "raise", "self", ".", "printer", "(", "'done.'", ")", "# deploy site to heroku\r", "self", ".", "printer", "(", "'Deploying %s to heroku ... '", "%", "site_folder", ",", "flush", "=", "True", ")", "try", ":", "sys_command", "=", "'cd %s; heroku builds:create -a %s'", "%", "(", "temp_folder", ",", "self", ".", "subdomain", ")", "self", ".", "_handle_command", "(", "sys_command", ",", "print_pipe", "=", "True", ")", "except", ":", "self", ".", "printer", "(", "'ERROR'", ")", "raise", "finally", ":", "_cleanup_temp", "(", ")", "self", ".", "printer", "(", "'Deployment complete.'", ")", "return", "True" ]
41.958333
20.275
def resample(self, indexer: Optional[Mapping[Hashable, str]] = None, skipna=None, closed: Optional[str] = None, label: Optional[str] = None, base: int = 0, keep_attrs: Optional[bool] = None, loffset=None, **indexer_kwargs: str): """Returns a Resample object for performing resampling operations. Handles both downsampling and upsampling. If any intervals contain no values from the original object, they will be given the value ``NaN``. Parameters ---------- indexer : {dim: freq}, optional Mapping from the dimension name to resample frequency. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. closed : 'left' or 'right', optional Side of each interval to treat as closed. label : 'left or 'right', optional Side of each interval to use for labeling. base : int, optional For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '24H' frequency, base could range from 0 through 23. loffset : timedelta or str, optional Offset used to adjust the resampled time labels. Some pandas date offset strings are supported. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **indexer_kwargs : {dim: freq} The keyword arguments form of ``indexer``. One of indexer or indexer_kwargs must be provided. Returns ------- resampled : same type as caller This object resampled. Examples -------- Downsample monthly time-series data to seasonal data: >>> da = xr.DataArray(np.linspace(0, 11, num=12), ... coords=[pd.date_range('15/12/1999', ... periods=12, freq=pd.DateOffset(months=1))], ... dims='time') >>> da <xarray.DataArray (time: 12)> array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ... >>> da.resample(time="QS-DEC").mean() <xarray.DataArray (time: 4)> array([ 1., 4., 7., 10.]) Coordinates: * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01 Upsample monthly time-series data to daily data: >>> da.resample(time='1D').interpolate('linear') <xarray.DataArray (time: 337)> array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ]) Coordinates: * time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ... Limit scope of upsampling method >>> da.resample(time='1D').nearest(tolerance='1D') <xarray.DataArray (time: 337)> array([ 0., 0., nan, ..., nan, 11., 11.]) Coordinates: * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15 References ---------- .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases """ # noqa # TODO support non-string indexer after removing the old API. from .dataarray import DataArray from .resample import RESAMPLE_DIM from ..coding.cftimeindex import CFTimeIndex if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) # note: the second argument (now 'skipna') use to be 'dim' if ((skipna is not None and not isinstance(skipna, bool)) or ('how' in indexer_kwargs and 'how' not in self.dims) or ('dim' in indexer_kwargs and 'dim' not in self.dims)): raise TypeError( 'resample() no longer supports the `how` or ' '`dim` arguments. Instead call methods on resample ' "objects, e.g., data.resample(time='1D').mean()") indexer = either_dict_or_kwargs(indexer, indexer_kwargs, 'resample') if len(indexer) != 1: raise ValueError( "Resampling only supported along single dimensions." ) dim, freq = next(iter(indexer.items())) dim_name = dim dim_coord = self[dim] if isinstance(self.indexes[dim_name], CFTimeIndex): from .resample_cftime import CFTimeGrouper grouper = CFTimeGrouper(freq, closed, label, base, loffset) else: # TODO: to_offset() call required for pandas==0.19.2 grouper = pd.Grouper(freq=freq, closed=closed, label=label, base=base, loffset=pd.tseries.frequencies.to_offset( loffset)) group = DataArray(dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM) resampler = self._resample_cls(self, group=group, dim=dim_name, grouper=grouper, resample_dim=RESAMPLE_DIM) return resampler
[ "def", "resample", "(", "self", ",", "indexer", ":", "Optional", "[", "Mapping", "[", "Hashable", ",", "str", "]", "]", "=", "None", ",", "skipna", "=", "None", ",", "closed", ":", "Optional", "[", "str", "]", "=", "None", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ",", "base", ":", "int", "=", "0", ",", "keep_attrs", ":", "Optional", "[", "bool", "]", "=", "None", ",", "loffset", "=", "None", ",", "*", "*", "indexer_kwargs", ":", "str", ")", ":", "# noqa", "# TODO support non-string indexer after removing the old API.", "from", ".", "dataarray", "import", "DataArray", "from", ".", "resample", "import", "RESAMPLE_DIM", "from", ".", ".", "coding", ".", "cftimeindex", "import", "CFTimeIndex", "if", "keep_attrs", "is", "None", ":", "keep_attrs", "=", "_get_keep_attrs", "(", "default", "=", "False", ")", "# note: the second argument (now 'skipna') use to be 'dim'", "if", "(", "(", "skipna", "is", "not", "None", "and", "not", "isinstance", "(", "skipna", ",", "bool", ")", ")", "or", "(", "'how'", "in", "indexer_kwargs", "and", "'how'", "not", "in", "self", ".", "dims", ")", "or", "(", "'dim'", "in", "indexer_kwargs", "and", "'dim'", "not", "in", "self", ".", "dims", ")", ")", ":", "raise", "TypeError", "(", "'resample() no longer supports the `how` or '", "'`dim` arguments. Instead call methods on resample '", "\"objects, e.g., data.resample(time='1D').mean()\"", ")", "indexer", "=", "either_dict_or_kwargs", "(", "indexer", ",", "indexer_kwargs", ",", "'resample'", ")", "if", "len", "(", "indexer", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Resampling only supported along single dimensions.\"", ")", "dim", ",", "freq", "=", "next", "(", "iter", "(", "indexer", ".", "items", "(", ")", ")", ")", "dim_name", "=", "dim", "dim_coord", "=", "self", "[", "dim", "]", "if", "isinstance", "(", "self", ".", "indexes", "[", "dim_name", "]", ",", "CFTimeIndex", ")", ":", "from", ".", "resample_cftime", "import", "CFTimeGrouper", "grouper", "=", "CFTimeGrouper", "(", "freq", ",", "closed", ",", "label", ",", "base", ",", "loffset", ")", "else", ":", "# TODO: to_offset() call required for pandas==0.19.2", "grouper", "=", "pd", ".", "Grouper", "(", "freq", "=", "freq", ",", "closed", "=", "closed", ",", "label", "=", "label", ",", "base", "=", "base", ",", "loffset", "=", "pd", ".", "tseries", ".", "frequencies", ".", "to_offset", "(", "loffset", ")", ")", "group", "=", "DataArray", "(", "dim_coord", ",", "coords", "=", "dim_coord", ".", "coords", ",", "dims", "=", "dim_coord", ".", "dims", ",", "name", "=", "RESAMPLE_DIM", ")", "resampler", "=", "self", ".", "_resample_cls", "(", "self", ",", "group", "=", "group", ",", "dim", "=", "dim_name", ",", "grouper", "=", "grouper", ",", "resample_dim", "=", "RESAMPLE_DIM", ")", "return", "resampler" ]
43.129032
22.072581
def stop_button_click_handler(self): """Method to handle what to do when the stop button is pressed""" self.stop_button.setDisabled(True) # Interrupt computations or stop debugging if not self.shellwidget._reading: self.interrupt_kernel() else: self.shellwidget.write_to_stdin('exit')
[ "def", "stop_button_click_handler", "(", "self", ")", ":", "self", ".", "stop_button", ".", "setDisabled", "(", "True", ")", "# Interrupt computations or stop debugging\r", "if", "not", "self", ".", "shellwidget", ".", "_reading", ":", "self", ".", "interrupt_kernel", "(", ")", "else", ":", "self", ".", "shellwidget", ".", "write_to_stdin", "(", "'exit'", ")" ]
43.5
7.5
def follow(self, login): """Make the authenticated user follow login. :param str login: (required), user to follow :returns: bool """ resp = False if login: url = self._build_url('user', 'following', login) resp = self._boolean(self._put(url), 204, 404) return resp
[ "def", "follow", "(", "self", ",", "login", ")", ":", "resp", "=", "False", "if", "login", ":", "url", "=", "self", ".", "_build_url", "(", "'user'", ",", "'following'", ",", "login", ")", "resp", "=", "self", ".", "_boolean", "(", "self", ".", "_put", "(", "url", ")", ",", "204", ",", "404", ")", "return", "resp" ]
30.545455
17.181818
def create_metric_definition(self, metric_type, metric_id, **tags): """ Create metric definition with custom definition. **tags should be a set of tags, such as units, env .. :param metric_type: MetricType of the new definition :param metric_id: metric_id is the string index of the created metric :param tags: Key/Value tag values of the new metric """ item = { 'id': metric_id } if len(tags) > 0: # We have some arguments to pass.. data_retention = tags.pop('dataRetention', None) if data_retention is not None: item['dataRetention'] = data_retention if len(tags) > 0: item['tags'] = tags json_data = json.dumps(item, indent=2) try: self._post(self._get_url(metric_type), json_data) except HawkularError as e: if e.code == 409: return False raise e return True
[ "def", "create_metric_definition", "(", "self", ",", "metric_type", ",", "metric_id", ",", "*", "*", "tags", ")", ":", "item", "=", "{", "'id'", ":", "metric_id", "}", "if", "len", "(", "tags", ")", ">", "0", ":", "# We have some arguments to pass..", "data_retention", "=", "tags", ".", "pop", "(", "'dataRetention'", ",", "None", ")", "if", "data_retention", "is", "not", "None", ":", "item", "[", "'dataRetention'", "]", "=", "data_retention", "if", "len", "(", "tags", ")", ">", "0", ":", "item", "[", "'tags'", "]", "=", "tags", "json_data", "=", "json", ".", "dumps", "(", "item", ",", "indent", "=", "2", ")", "try", ":", "self", ".", "_post", "(", "self", ".", "_get_url", "(", "metric_type", ")", ",", "json_data", ")", "except", "HawkularError", "as", "e", ":", "if", "e", ".", "code", "==", "409", ":", "return", "False", "raise", "e", "return", "True" ]
34.821429
19.392857
def set(self, key: Any, value: Any) -> None: """ Sets the value of a key to a supplied value """ if key is not None: self[key] = value
[ "def", "set", "(", "self", ",", "key", ":", "Any", ",", "value", ":", "Any", ")", "->", "None", ":", "if", "key", "is", "not", "None", ":", "self", "[", "key", "]", "=", "value" ]
39.75
7
async def send_heartbeat(self, name): """Send a heartbeat for a service. Args: name (string): The name of the service to send a heartbeat for """ await self.send_command(OPERATIONS.CMD_HEARTBEAT, {'name': name}, MESSAGES.HeartbeatResponse, timeout=5.0)
[ "async", "def", "send_heartbeat", "(", "self", ",", "name", ")", ":", "await", "self", ".", "send_command", "(", "OPERATIONS", ".", "CMD_HEARTBEAT", ",", "{", "'name'", ":", "name", "}", ",", "MESSAGES", ".", "HeartbeatResponse", ",", "timeout", "=", "5.0", ")" ]
35.777778
23.222222
def _brahmic(data, scheme_map, **kw): """Transliterate `data` with the given `scheme_map`. This function is used when the source scheme is a Brahmic scheme. :param data: the data to transliterate :param scheme_map: a dict that maps between characters in the old scheme and characters in the new scheme """ if scheme_map.from_scheme.name == northern.GURMUKHI: data = northern.GurmukhiScheme.replace_tippi(text=data) marks = scheme_map.marks virama = scheme_map.virama consonants = scheme_map.consonants non_marks_viraama = scheme_map.non_marks_viraama to_roman = scheme_map.to_scheme.is_roman max_key_length_from_scheme = scheme_map.max_key_length_from_scheme buf = [] i = 0 to_roman_had_consonant = found = False append = buf.append # logging.debug(pprint.pformat(scheme_map.consonants)) # We dont just translate each brAhmic character one after another in order to prefer concise transliterations when possible - for example ज्ञ -> jn in optitrans rather than j~n. while i <= len(data): # The longest token in the source scheme has length `max_key_length_from_scheme`. Iterate # over `data` while taking `max_key_length_from_scheme` characters at a time. If we don`t # find the character group in our scheme map, lop off a character and # try again. # # If we've finished reading through `data`, then `token` will be empty # and the loop below will be skipped. token = data[i:i + max_key_length_from_scheme] while token: if len(token) == 1: if token in marks: append(marks[token]) found = True elif token in virama: append(virama[token]) found = True else: if to_roman_had_consonant: append('a') append(non_marks_viraama.get(token, token)) found = True else: if token in non_marks_viraama: if to_roman_had_consonant: append('a') append(non_marks_viraama.get(token)) found = True if found: to_roman_had_consonant = to_roman and token in consonants i += len(token) break else: token = token[:-1] # Continuing the outer while loop. # We've exhausted the token; this must be some other character. Due to # the implicit 'a', we must explicitly end any lingering consonants # before we can handle the current token. if not found: if to_roman_had_consonant: append(next(iter(virama.values()))) if i < len(data): append(data[i]) to_roman_had_consonant = False i += 1 found = False if to_roman_had_consonant: append('a') return ''.join(buf)
[ "def", "_brahmic", "(", "data", ",", "scheme_map", ",", "*", "*", "kw", ")", ":", "if", "scheme_map", ".", "from_scheme", ".", "name", "==", "northern", ".", "GURMUKHI", ":", "data", "=", "northern", ".", "GurmukhiScheme", ".", "replace_tippi", "(", "text", "=", "data", ")", "marks", "=", "scheme_map", ".", "marks", "virama", "=", "scheme_map", ".", "virama", "consonants", "=", "scheme_map", ".", "consonants", "non_marks_viraama", "=", "scheme_map", ".", "non_marks_viraama", "to_roman", "=", "scheme_map", ".", "to_scheme", ".", "is_roman", "max_key_length_from_scheme", "=", "scheme_map", ".", "max_key_length_from_scheme", "buf", "=", "[", "]", "i", "=", "0", "to_roman_had_consonant", "=", "found", "=", "False", "append", "=", "buf", ".", "append", "# logging.debug(pprint.pformat(scheme_map.consonants))", "# We dont just translate each brAhmic character one after another in order to prefer concise transliterations when possible - for example ज्ञ -> jn in optitrans rather than j~n.", "while", "i", "<=", "len", "(", "data", ")", ":", "# The longest token in the source scheme has length `max_key_length_from_scheme`. Iterate", "# over `data` while taking `max_key_length_from_scheme` characters at a time. If we don`t", "# find the character group in our scheme map, lop off a character and", "# try again.", "#", "# If we've finished reading through `data`, then `token` will be empty", "# and the loop below will be skipped.", "token", "=", "data", "[", "i", ":", "i", "+", "max_key_length_from_scheme", "]", "while", "token", ":", "if", "len", "(", "token", ")", "==", "1", ":", "if", "token", "in", "marks", ":", "append", "(", "marks", "[", "token", "]", ")", "found", "=", "True", "elif", "token", "in", "virama", ":", "append", "(", "virama", "[", "token", "]", ")", "found", "=", "True", "else", ":", "if", "to_roman_had_consonant", ":", "append", "(", "'a'", ")", "append", "(", "non_marks_viraama", ".", "get", "(", "token", ",", "token", ")", ")", "found", "=", "True", "else", ":", "if", "token", "in", "non_marks_viraama", ":", "if", "to_roman_had_consonant", ":", "append", "(", "'a'", ")", "append", "(", "non_marks_viraama", ".", "get", "(", "token", ")", ")", "found", "=", "True", "if", "found", ":", "to_roman_had_consonant", "=", "to_roman", "and", "token", "in", "consonants", "i", "+=", "len", "(", "token", ")", "break", "else", ":", "token", "=", "token", "[", ":", "-", "1", "]", "# Continuing the outer while loop.", "# We've exhausted the token; this must be some other character. Due to", "# the implicit 'a', we must explicitly end any lingering consonants", "# before we can handle the current token.", "if", "not", "found", ":", "if", "to_roman_had_consonant", ":", "append", "(", "next", "(", "iter", "(", "virama", ".", "values", "(", ")", ")", ")", ")", "if", "i", "<", "len", "(", "data", ")", ":", "append", "(", "data", "[", "i", "]", ")", "to_roman_had_consonant", "=", "False", "i", "+=", "1", "found", "=", "False", "if", "to_roman_had_consonant", ":", "append", "(", "'a'", ")", "return", "''", ".", "join", "(", "buf", ")" ]
34.064103
20.871795
def p_CommentOrEmptyLineList(p): ''' CommentOrEmptyLineList : | CommentOrEmptyLine | CommentOrEmptyLineList CommentOrEmptyLine ''' if len(p) <= 1: p[0] = CommentOrEmptyLineList(None, None) elif len(p) <= 2: p[0] = CommentOrEmptyLineList(None, p[1]) else: p[0] = CommentOrEmptyLineList(p[1], p[2])
[ "def", "p_CommentOrEmptyLineList", "(", "p", ")", ":", "if", "len", "(", "p", ")", "<=", "1", ":", "p", "[", "0", "]", "=", "CommentOrEmptyLineList", "(", "None", ",", "None", ")", "elif", "len", "(", "p", ")", "<=", "2", ":", "p", "[", "0", "]", "=", "CommentOrEmptyLineList", "(", "None", ",", "p", "[", "1", "]", ")", "else", ":", "p", "[", "0", "]", "=", "CommentOrEmptyLineList", "(", "p", "[", "1", "]", ",", "p", "[", "2", "]", ")" ]
32.333333
18.5
def _compute_projection(self, X, W): """Compute the LPP projection matrix Parameters ---------- X : array_like, (n_samples, n_features) The input data W : array_like or sparse matrix, (n_samples, n_samples) The precomputed adjacency matrix Returns ------- P : ndarray, (n_features, self.n_components) The matrix encoding the locality preserving projection """ # TODO: check W input; handle sparse case X = check_array(X) D = np.diag(W.sum(1)) L = D - W evals, evecs = eigh_robust(np.dot(X.T, np.dot(L, X)), np.dot(X.T, np.dot(D, X)), eigvals=(0, self.n_components - 1)) return evecs
[ "def", "_compute_projection", "(", "self", ",", "X", ",", "W", ")", ":", "# TODO: check W input; handle sparse case", "X", "=", "check_array", "(", "X", ")", "D", "=", "np", ".", "diag", "(", "W", ".", "sum", "(", "1", ")", ")", "L", "=", "D", "-", "W", "evals", ",", "evecs", "=", "eigh_robust", "(", "np", ".", "dot", "(", "X", ".", "T", ",", "np", ".", "dot", "(", "L", ",", "X", ")", ")", ",", "np", ".", "dot", "(", "X", ".", "T", ",", "np", ".", "dot", "(", "D", ",", "X", ")", ")", ",", "eigvals", "=", "(", "0", ",", "self", ".", "n_components", "-", "1", ")", ")", "return", "evecs" ]
32.833333
18.875
def start_event_loop_qt4(app=None): """Start the qt4 event loop in a consistent manner.""" if app is None: app = get_app_qt4(['']) if not is_event_loop_running_qt4(app): app._in_event_loop = True app.exec_() app._in_event_loop = False else: app._in_event_loop = True
[ "def", "start_event_loop_qt4", "(", "app", "=", "None", ")", ":", "if", "app", "is", "None", ":", "app", "=", "get_app_qt4", "(", "[", "''", "]", ")", "if", "not", "is_event_loop_running_qt4", "(", "app", ")", ":", "app", ".", "_in_event_loop", "=", "True", "app", ".", "exec_", "(", ")", "app", ".", "_in_event_loop", "=", "False", "else", ":", "app", ".", "_in_event_loop", "=", "True" ]
31.3
10.9
def focusOutEvent( self, event ): """ Overloads the focus out event to cancel editing when the widget loses focus. :param event | <QFocusEvent> """ super(XNavigationEdit, self).focusOutEvent(event) self.cancelEdit()
[ "def", "focusOutEvent", "(", "self", ",", "event", ")", ":", "super", "(", "XNavigationEdit", ",", "self", ")", ".", "focusOutEvent", "(", "event", ")", "self", ".", "cancelEdit", "(", ")" ]
28.5
16.7
def updateCurrentValue(self, value): """ Disables snapping during the current value update to ensure a smooth transition for node animations. Since this can only be called via code, we don't need to worry about snapping to the grid for a user. """ xsnap = None ysnap = None if value != self.endValue(): xsnap = self.targetObject().isXSnappedToGrid() ysnap = self.targetObject().isYSnappedToGrid() self.targetObject().setXSnapToGrid(False) self.targetObject().setYSnapToGrid(False) super(XNodeAnimation, self).updateCurrentValue(value) if value != self.endValue(): self.targetObject().setXSnapToGrid(xsnap) self.targetObject().setYSnapToGrid(ysnap)
[ "def", "updateCurrentValue", "(", "self", ",", "value", ")", ":", "xsnap", "=", "None", "ysnap", "=", "None", "if", "value", "!=", "self", ".", "endValue", "(", ")", ":", "xsnap", "=", "self", ".", "targetObject", "(", ")", ".", "isXSnappedToGrid", "(", ")", "ysnap", "=", "self", ".", "targetObject", "(", ")", ".", "isYSnappedToGrid", "(", ")", "self", ".", "targetObject", "(", ")", ".", "setXSnapToGrid", "(", "False", ")", "self", ".", "targetObject", "(", ")", ".", "setYSnapToGrid", "(", "False", ")", "super", "(", "XNodeAnimation", ",", "self", ")", ".", "updateCurrentValue", "(", "value", ")", "if", "value", "!=", "self", ".", "endValue", "(", ")", ":", "self", ".", "targetObject", "(", ")", ".", "setXSnapToGrid", "(", "xsnap", ")", "self", ".", "targetObject", "(", ")", ".", "setYSnapToGrid", "(", "ysnap", ")" ]
39.047619
18.571429
def get_votes_comment(self, *args, **kwargs): """ :allowed_param: 'commentId' """ return bind_api( api=self, path='/comments/{commentId}/votes', payload_type='vote', payload_list=True, allowed_param=['commentId'] )(*args, **kwargs)
[ "def", "get_votes_comment", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "bind_api", "(", "api", "=", "self", ",", "path", "=", "'/comments/{commentId}/votes'", ",", "payload_type", "=", "'vote'", ",", "payload_list", "=", "True", ",", "allowed_param", "=", "[", "'commentId'", "]", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
31.4
8
def pec(self, value): """True if Packet Error Codes (PEC) are enabled""" pec = bool(value) if pec != self._pec: if ioctl(self._fd, SMBUS.I2C_PEC, pec): raise IOError(ffi.errno) self._pec = pec
[ "def", "pec", "(", "self", ",", "value", ")", ":", "pec", "=", "bool", "(", "value", ")", "if", "pec", "!=", "self", ".", "_pec", ":", "if", "ioctl", "(", "self", ".", "_fd", ",", "SMBUS", ".", "I2C_PEC", ",", "pec", ")", ":", "raise", "IOError", "(", "ffi", ".", "errno", ")", "self", ".", "_pec", "=", "pec" ]
35.714286
10
def other_set_producer(socket, which_set, image_archive, patch_archive, groundtruth): """Push image files read from the valid/test set TAR to a socket. Parameters ---------- socket : :class:`zmq.Socket` PUSH socket on which to send images. which_set : str Which set of images is being processed. One of 'train', 'valid', 'test'. Used for extracting the appropriate images from the patch archive. image_archive : str or file-like object The filename or file-handle for the TAR archive containing images. patch_archive : str or file-like object Filename or file handle for the TAR archive of patch images. groundtruth : iterable Iterable container containing scalar 0-based class index for each image, sorted by filename. """ patch_images = extract_patch_images(patch_archive, which_set) num_patched = 0 with tar_open(image_archive) as tar: filenames = sorted(info.name for info in tar if info.isfile()) images = (load_from_tar_or_patch(tar, filename, patch_images) for filename in filenames) pathless_filenames = (os.path.split(fn)[-1] for fn in filenames) image_iterator = equizip(images, pathless_filenames, groundtruth) for (image_data, patched), filename, class_index in image_iterator: if patched: num_patched += 1 socket.send_pyobj((filename, class_index), zmq.SNDMORE) socket.send(image_data, copy=False) if num_patched != len(patch_images): raise Exception
[ "def", "other_set_producer", "(", "socket", ",", "which_set", ",", "image_archive", ",", "patch_archive", ",", "groundtruth", ")", ":", "patch_images", "=", "extract_patch_images", "(", "patch_archive", ",", "which_set", ")", "num_patched", "=", "0", "with", "tar_open", "(", "image_archive", ")", "as", "tar", ":", "filenames", "=", "sorted", "(", "info", ".", "name", "for", "info", "in", "tar", "if", "info", ".", "isfile", "(", ")", ")", "images", "=", "(", "load_from_tar_or_patch", "(", "tar", ",", "filename", ",", "patch_images", ")", "for", "filename", "in", "filenames", ")", "pathless_filenames", "=", "(", "os", ".", "path", ".", "split", "(", "fn", ")", "[", "-", "1", "]", "for", "fn", "in", "filenames", ")", "image_iterator", "=", "equizip", "(", "images", ",", "pathless_filenames", ",", "groundtruth", ")", "for", "(", "image_data", ",", "patched", ")", ",", "filename", ",", "class_index", "in", "image_iterator", ":", "if", "patched", ":", "num_patched", "+=", "1", "socket", ".", "send_pyobj", "(", "(", "filename", ",", "class_index", ")", ",", "zmq", ".", "SNDMORE", ")", "socket", ".", "send", "(", "image_data", ",", "copy", "=", "False", ")", "if", "num_patched", "!=", "len", "(", "patch_images", ")", ":", "raise", "Exception" ]
44.111111
19.333333
def fill_key_info(self, key_info, signature_method): """ Fills the KeyInfo node :param key_info: KeyInfo node :type key_info: lxml.etree.Element :param signature_method: Signature node to use :type signature_method: str :return: None """ x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP) if x509_data is not None: self.fill_x509_data(x509_data) key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP) if key_name is not None and self.key_name is not None: key_name.text = self.key_name key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP) if key_value is not None: key_value.text = '\n' signature = constants.TransformUsageSignatureMethod[ signature_method ] key = self.public_key if self.public_key is None: key = self.private_key.public_key() if not isinstance( key, signature['method'].public_key_class ): raise Exception('Key not compatible with signature method') signature['method'].key_value(key_value, key)
[ "def", "fill_key_info", "(", "self", ",", "key_info", ",", "signature_method", ")", ":", "x509_data", "=", "key_info", ".", "find", "(", "'ds:X509Data'", ",", "namespaces", "=", "constants", ".", "NS_MAP", ")", "if", "x509_data", "is", "not", "None", ":", "self", ".", "fill_x509_data", "(", "x509_data", ")", "key_name", "=", "key_info", ".", "find", "(", "'ds:KeyName'", ",", "namespaces", "=", "constants", ".", "NS_MAP", ")", "if", "key_name", "is", "not", "None", "and", "self", ".", "key_name", "is", "not", "None", ":", "key_name", ".", "text", "=", "self", ".", "key_name", "key_value", "=", "key_info", ".", "find", "(", "'ds:KeyValue'", ",", "namespaces", "=", "constants", ".", "NS_MAP", ")", "if", "key_value", "is", "not", "None", ":", "key_value", ".", "text", "=", "'\\n'", "signature", "=", "constants", ".", "TransformUsageSignatureMethod", "[", "signature_method", "]", "key", "=", "self", ".", "public_key", "if", "self", ".", "public_key", "is", "None", ":", "key", "=", "self", ".", "private_key", ".", "public_key", "(", ")", "if", "not", "isinstance", "(", "key", ",", "signature", "[", "'method'", "]", ".", "public_key_class", ")", ":", "raise", "Exception", "(", "'Key not compatible with signature method'", ")", "signature", "[", "'method'", "]", ".", "key_value", "(", "key_value", ",", "key", ")" ]
42.62069
14
def kill(self, unique_id, configs=None): """ Issues a kill -9 to the specified process calls the deployers get_pid function for the process. If no pid_file/pid_keyword is specified a generic grep of ps aux command is executed on remote machine based on process parameters which may not be reliable if more process are running with similar name :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGKILL, configs)
[ "def", "kill", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "_send_signal", "(", "unique_id", ",", "signal", ".", "SIGKILL", ",", "configs", ")" ]
52
23.555556
def fit(self, values, bins): """ Fit the transform using the given values (in our case ic50s). Parameters ---------- values : ic50 values bins : bins for the cumulative distribution function Anything that can be passed to numpy.histogram's "bins" argument can be used here. """ assert self.cdf is None assert self.bin_edges is None assert len(values) > 0 (hist, self.bin_edges) = numpy.histogram(values, bins=bins) self.cdf = numpy.ones(len(hist) + 3) * numpy.nan self.cdf[0] = 0.0 self.cdf[1] = 0.0 self.cdf[-1] = 100.0 numpy.cumsum(hist * 100.0 / numpy.sum(hist), out=self.cdf[2:-1]) assert not numpy.isnan(self.cdf).any()
[ "def", "fit", "(", "self", ",", "values", ",", "bins", ")", ":", "assert", "self", ".", "cdf", "is", "None", "assert", "self", ".", "bin_edges", "is", "None", "assert", "len", "(", "values", ")", ">", "0", "(", "hist", ",", "self", ".", "bin_edges", ")", "=", "numpy", ".", "histogram", "(", "values", ",", "bins", "=", "bins", ")", "self", ".", "cdf", "=", "numpy", ".", "ones", "(", "len", "(", "hist", ")", "+", "3", ")", "*", "numpy", ".", "nan", "self", ".", "cdf", "[", "0", "]", "=", "0.0", "self", ".", "cdf", "[", "1", "]", "=", "0.0", "self", ".", "cdf", "[", "-", "1", "]", "=", "100.0", "numpy", ".", "cumsum", "(", "hist", "*", "100.0", "/", "numpy", ".", "sum", "(", "hist", ")", ",", "out", "=", "self", ".", "cdf", "[", "2", ":", "-", "1", "]", ")", "assert", "not", "numpy", ".", "isnan", "(", "self", ".", "cdf", ")", ".", "any", "(", ")" ]
36.428571
16.619048
def to_glyphs_background_image(self, ufo_glyph, layer): """Copy the background image from the UFO Glyph to the GSLayer.""" ufo_image = ufo_glyph.image if ufo_image.fileName is None: return image = self.glyphs_module.GSBackgroundImage() image.path = ufo_image.fileName image.transform = Transform(*ufo_image.transformation) if CROP_KEY in ufo_glyph.lib: x, y, w, h = ufo_glyph.lib[CROP_KEY] image.crop = Rect(Point(x, y), Size(w, h)) if LOCKED_KEY in ufo_glyph.lib: image.locked = ufo_glyph.lib[LOCKED_KEY] if ALPHA_KEY in ufo_glyph.lib: image.alpha = ufo_glyph.lib[ALPHA_KEY] layer.backgroundImage = image
[ "def", "to_glyphs_background_image", "(", "self", ",", "ufo_glyph", ",", "layer", ")", ":", "ufo_image", "=", "ufo_glyph", ".", "image", "if", "ufo_image", ".", "fileName", "is", "None", ":", "return", "image", "=", "self", ".", "glyphs_module", ".", "GSBackgroundImage", "(", ")", "image", ".", "path", "=", "ufo_image", ".", "fileName", "image", ".", "transform", "=", "Transform", "(", "*", "ufo_image", ".", "transformation", ")", "if", "CROP_KEY", "in", "ufo_glyph", ".", "lib", ":", "x", ",", "y", ",", "w", ",", "h", "=", "ufo_glyph", ".", "lib", "[", "CROP_KEY", "]", "image", ".", "crop", "=", "Rect", "(", "Point", "(", "x", ",", "y", ")", ",", "Size", "(", "w", ",", "h", ")", ")", "if", "LOCKED_KEY", "in", "ufo_glyph", ".", "lib", ":", "image", ".", "locked", "=", "ufo_glyph", ".", "lib", "[", "LOCKED_KEY", "]", "if", "ALPHA_KEY", "in", "ufo_glyph", ".", "lib", ":", "image", ".", "alpha", "=", "ufo_glyph", ".", "lib", "[", "ALPHA_KEY", "]", "layer", ".", "backgroundImage", "=", "image" ]
41.875
8.875
def items(self, folder_id, subfolder_id, ann_id=None): '''Yields an unodered generator of items in a subfolder. The generator yields items, which are represented by a tuple of ``content_id`` and ``subtopic_id``. The format of these identifiers is unspecified. By default (with ``ann_id=None``), subfolders are shown for all anonymous users. Optionally, ``ann_id`` can be set to a username, which restricts the list to only subfolders owned by that user. :param str folder_id: Folder id :param str subfolder_id: Subfolder id :param str ann_id: Username :rtype: generator of ``(content_id, subtopic_id)`` ''' self.assert_valid_folder_id(folder_id) self.assert_valid_folder_id(subfolder_id) ann_id = self._annotator(ann_id) folder_cid = self.wrap_folder_content_id(ann_id, folder_id) subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id) ident = (folder_cid, subfolder_sid) if self.store.get(folder_cid) is None: raise KeyError(folder_id) for lab in self.label_store.directly_connected(ident): cid = lab.other(folder_cid) subid = lab.subtopic_for(cid) yield (cid, subid)
[ "def", "items", "(", "self", ",", "folder_id", ",", "subfolder_id", ",", "ann_id", "=", "None", ")", ":", "self", ".", "assert_valid_folder_id", "(", "folder_id", ")", "self", ".", "assert_valid_folder_id", "(", "subfolder_id", ")", "ann_id", "=", "self", ".", "_annotator", "(", "ann_id", ")", "folder_cid", "=", "self", ".", "wrap_folder_content_id", "(", "ann_id", ",", "folder_id", ")", "subfolder_sid", "=", "self", ".", "wrap_subfolder_subtopic_id", "(", "subfolder_id", ")", "ident", "=", "(", "folder_cid", ",", "subfolder_sid", ")", "if", "self", ".", "store", ".", "get", "(", "folder_cid", ")", "is", "None", ":", "raise", "KeyError", "(", "folder_id", ")", "for", "lab", "in", "self", ".", "label_store", ".", "directly_connected", "(", "ident", ")", ":", "cid", "=", "lab", ".", "other", "(", "folder_cid", ")", "subid", "=", "lab", ".", "subtopic_for", "(", "cid", ")", "yield", "(", "cid", ",", "subid", ")" ]
43.413793
18.172414
def from_json(json): """Creates a Track from a JSON file. No preprocessing is done. Arguments: json: map with the keys: name (optional) and segments. Return: A track instance """ segments = [Segment.from_json(s) for s in json['segments']] return Track(json['name'], segments).compute_metrics()
[ "def", "from_json", "(", "json", ")", ":", "segments", "=", "[", "Segment", ".", "from_json", "(", "s", ")", "for", "s", "in", "json", "[", "'segments'", "]", "]", "return", "Track", "(", "json", "[", "'name'", "]", ",", "segments", ")", ".", "compute_metrics", "(", ")" ]
30.333333
20.083333
def phrase_replace(self, replace_dict): """ Replace phrases with single token, mapping defined in replace_dict """ def r(tokens): text = ' ' + ' '.join(tokens) for k, v in replace_dict.items(): text = text.replace(" " + k + " ", " " + v + " ") return text.split() self.stems = list(map(r, self.stems))
[ "def", "phrase_replace", "(", "self", ",", "replace_dict", ")", ":", "def", "r", "(", "tokens", ")", ":", "text", "=", "' '", "+", "' '", ".", "join", "(", "tokens", ")", "for", "k", ",", "v", "in", "replace_dict", ".", "items", "(", ")", ":", "text", "=", "text", ".", "replace", "(", "\" \"", "+", "k", "+", "\" \"", ",", "\" \"", "+", "v", "+", "\" \"", ")", "return", "text", ".", "split", "(", ")", "self", ".", "stems", "=", "list", "(", "map", "(", "r", ",", "self", ".", "stems", ")", ")" ]
35
17.166667
def unexpanduser(path): r""" Replaces home directory with '~' """ homedir = expanduser('~') if path.startswith(homedir): path = '~' + path[len(homedir):] return path
[ "def", "unexpanduser", "(", "path", ")", ":", "homedir", "=", "expanduser", "(", "'~'", ")", "if", "path", ".", "startswith", "(", "homedir", ")", ":", "path", "=", "'~'", "+", "path", "[", "len", "(", "homedir", ")", ":", "]", "return", "path" ]
23.75
8.125
def load_segment(self, f, is_irom_segment=False): """ Load the next segment from the image file """ file_offs = f.tell() (offset, size) = struct.unpack('<II', f.read(8)) self.warn_if_unusual_segment(offset, size, is_irom_segment) segment_data = f.read(size) if len(segment_data) < size: raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data))) segment = ImageSegment(offset, segment_data, file_offs) self.segments.append(segment) return segment
[ "def", "load_segment", "(", "self", ",", "f", ",", "is_irom_segment", "=", "False", ")", ":", "file_offs", "=", "f", ".", "tell", "(", ")", "(", "offset", ",", "size", ")", "=", "struct", ".", "unpack", "(", "'<II'", ",", "f", ".", "read", "(", "8", ")", ")", "self", ".", "warn_if_unusual_segment", "(", "offset", ",", "size", ",", "is_irom_segment", ")", "segment_data", "=", "f", ".", "read", "(", "size", ")", "if", "len", "(", "segment_data", ")", "<", "size", ":", "raise", "FatalError", "(", "'End of file reading segment 0x%x, length %d (actual length %d)'", "%", "(", "offset", ",", "size", ",", "len", "(", "segment_data", ")", ")", ")", "segment", "=", "ImageSegment", "(", "offset", ",", "segment_data", ",", "file_offs", ")", "self", ".", "segments", ".", "append", "(", "segment", ")", "return", "segment" ]
52.727273
18.818182
def load_values(self): """ Go through the env var map, transferring the values to this object as attributes. :raises: RuntimeError if a required env var isn't defined. """ for config_name, evar in self.evar_defs.items(): if evar.is_required and evar.name not in os.environ: raise RuntimeError(( "Missing required environment variable: {evar_name}\n" "{help_txt}" ).format(evar_name=evar.name, help_txt=evar.help_txt)) # Env var is present. Transfer its value over. if evar.name in os.environ: self[config_name] = os.environ.get(evar.name) else: self[config_name] = evar.default_val # Perform any validations or transformations. for filter in evar.filters: current_val = self.get(config_name) new_val = filter(current_val, evar) self[config_name] = new_val # This is the top-level filter that is often useful for checking # the values of related env vars (instead of individual validation). self._filter_all()
[ "def", "load_values", "(", "self", ")", ":", "for", "config_name", ",", "evar", "in", "self", ".", "evar_defs", ".", "items", "(", ")", ":", "if", "evar", ".", "is_required", "and", "evar", ".", "name", "not", "in", "os", ".", "environ", ":", "raise", "RuntimeError", "(", "(", "\"Missing required environment variable: {evar_name}\\n\"", "\"{help_txt}\"", ")", ".", "format", "(", "evar_name", "=", "evar", ".", "name", ",", "help_txt", "=", "evar", ".", "help_txt", ")", ")", "# Env var is present. Transfer its value over.", "if", "evar", ".", "name", "in", "os", ".", "environ", ":", "self", "[", "config_name", "]", "=", "os", ".", "environ", ".", "get", "(", "evar", ".", "name", ")", "else", ":", "self", "[", "config_name", "]", "=", "evar", ".", "default_val", "# Perform any validations or transformations.", "for", "filter", "in", "evar", ".", "filters", ":", "current_val", "=", "self", ".", "get", "(", "config_name", ")", "new_val", "=", "filter", "(", "current_val", ",", "evar", ")", "self", "[", "config_name", "]", "=", "new_val", "# This is the top-level filter that is often useful for checking", "# the values of related env vars (instead of individual validation).", "self", ".", "_filter_all", "(", ")" ]
43.703704
18.222222
def _dispense_during_transfer(self, vol, loc, **kwargs): """ Performs a :any:`dispense` when running a :any:`transfer`, and optionally a :any:`mix`, :any:`touch_tip`, and/or :any:`blow_out` afterwards. """ mix_after = kwargs.get('mix_after', (0, 0)) rate = kwargs.get('rate', 1) air_gap = kwargs.get('air_gap', 0) well, _ = unpack_location(loc) if air_gap: self.dispense(air_gap, well.top(5), rate=rate) self.dispense(vol, loc, rate=rate) self._mix_during_transfer(mix_after, well, **kwargs)
[ "def", "_dispense_during_transfer", "(", "self", ",", "vol", ",", "loc", ",", "*", "*", "kwargs", ")", ":", "mix_after", "=", "kwargs", ".", "get", "(", "'mix_after'", ",", "(", "0", ",", "0", ")", ")", "rate", "=", "kwargs", ".", "get", "(", "'rate'", ",", "1", ")", "air_gap", "=", "kwargs", ".", "get", "(", "'air_gap'", ",", "0", ")", "well", ",", "_", "=", "unpack_location", "(", "loc", ")", "if", "air_gap", ":", "self", ".", "dispense", "(", "air_gap", ",", "well", ".", "top", "(", "5", ")", ",", "rate", "=", "rate", ")", "self", ".", "dispense", "(", "vol", ",", "loc", ",", "rate", "=", "rate", ")", "self", ".", "_mix_during_transfer", "(", "mix_after", ",", "well", ",", "*", "*", "kwargs", ")" ]
36.625
14.25
def cred_def_id(self, issuer_did: str, schema_seq_no: int) -> str: """ Return credential definition identifier for input issuer DID and schema sequence number. :param issuer_did: DID of credential definition issuer :param schema_seq_no: schema sequence number :return: credential definition identifier """ return '{}:3:CL:{}{}'.format( # 3 marks indy cred def id, CL is sig type issuer_did, schema_seq_no, self.cd_id_tag(True))
[ "def", "cred_def_id", "(", "self", ",", "issuer_did", ":", "str", ",", "schema_seq_no", ":", "int", ")", "->", "str", ":", "return", "'{}:3:CL:{}{}'", ".", "format", "(", "# 3 marks indy cred def id, CL is sig type", "issuer_did", ",", "schema_seq_no", ",", "self", ".", "cd_id_tag", "(", "True", ")", ")" ]
39.230769
21.846154
def apply_attribute(node: Node, attr: XmlAttr): """Maps xml attribute to instance node property and setups bindings""" setter = get_setter(attr) stripped_value = attr.value.strip() if attr.value else '' if is_expression(stripped_value): (binding_type, expr_body) = parse_expression(stripped_value) binder().apply(binding_type, node=node, attr=attr, modifier=setter, expr_body=expr_body) else: setter(node, attr.name, attr.value)
[ "def", "apply_attribute", "(", "node", ":", "Node", ",", "attr", ":", "XmlAttr", ")", ":", "setter", "=", "get_setter", "(", "attr", ")", "stripped_value", "=", "attr", ".", "value", ".", "strip", "(", ")", "if", "attr", ".", "value", "else", "''", "if", "is_expression", "(", "stripped_value", ")", ":", "(", "binding_type", ",", "expr_body", ")", "=", "parse_expression", "(", "stripped_value", ")", "binder", "(", ")", ".", "apply", "(", "binding_type", ",", "node", "=", "node", ",", "attr", "=", "attr", ",", "modifier", "=", "setter", ",", "expr_body", "=", "expr_body", ")", "else", ":", "setter", "(", "node", ",", "attr", ".", "name", ",", "attr", ".", "value", ")" ]
51.555556
17.777778
def check_auth(user): ''' Check if the user should or shouldn't be inside the system: - If the user is staff or superuser: LOGIN GRANTED - If the user has a Person and it is not "disabled": LOGIN GRANTED - Elsewhere: LOGIN DENIED ''' # Initialize authentication auth = None person = None # Check if there is an user if user: # It means that Django accepted the user and it is active if user.is_staff or user.is_superuser: # This is an administrator, let it in auth = user else: # It is a normal user, check if there is a person behind person = getattr(user, "person", None) if not person: # Check if there is related one person_related = getattr(user, "people", None) if person_related: # Must be only one if person_related.count() == 1: person = person_related.get() if person and ((person.disabled is None) or (person.disabled > timezone.now())): # There is a person, no disabled found or the found one is fine to log in auth = user # Return back the final decision return auth
[ "def", "check_auth", "(", "user", ")", ":", "# Initialize authentication", "auth", "=", "None", "person", "=", "None", "# Check if there is an user", "if", "user", ":", "# It means that Django accepted the user and it is active", "if", "user", ".", "is_staff", "or", "user", ".", "is_superuser", ":", "# This is an administrator, let it in", "auth", "=", "user", "else", ":", "# It is a normal user, check if there is a person behind", "person", "=", "getattr", "(", "user", ",", "\"person\"", ",", "None", ")", "if", "not", "person", ":", "# Check if there is related one", "person_related", "=", "getattr", "(", "user", ",", "\"people\"", ",", "None", ")", "if", "person_related", ":", "# Must be only one", "if", "person_related", ".", "count", "(", ")", "==", "1", ":", "person", "=", "person_related", ".", "get", "(", ")", "if", "person", "and", "(", "(", "person", ".", "disabled", "is", "None", ")", "or", "(", "person", ".", "disabled", ">", "timezone", ".", "now", "(", ")", ")", ")", ":", "# There is a person, no disabled found or the found one is fine to log in", "auth", "=", "user", "# Return back the final decision", "return", "auth" ]
34.472222
22.138889
def _create_array(self, arr: np.ndarray) -> int: """Returns the handle of a RawArray created from the given numpy array. Args: arr: A numpy ndarray. Returns: The handle (int) of the array. Raises: ValueError: if arr is not a ndarray or of an unsupported dtype. If the array is of an unsupported type, using a view of the array to another dtype and then converting on get is often a work around. """ if not isinstance(arr, np.ndarray): raise ValueError('Array is not a numpy ndarray.') try: c_arr = np.ctypeslib.as_ctypes(arr) except (KeyError, NotImplementedError): raise ValueError( 'Array has unsupported dtype {}.'.format(arr.dtype)) # pylint: disable=protected-access raw_arr = RawArray(c_arr._type_, c_arr) with self._lock: if self._count >= len(self._arrays): self._arrays += len(self._arrays) * [None] self._get_next_free() # Note storing the shape is a workaround for an issue encountered # when upgrading to numpy 1.15. # See https://github.com/numpy/numpy/issues/11636 self._arrays[self._current] = (raw_arr, arr.shape) self._count += 1 return self._current
[ "def", "_create_array", "(", "self", ",", "arr", ":", "np", ".", "ndarray", ")", "->", "int", ":", "if", "not", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", ":", "raise", "ValueError", "(", "'Array is not a numpy ndarray.'", ")", "try", ":", "c_arr", "=", "np", ".", "ctypeslib", ".", "as_ctypes", "(", "arr", ")", "except", "(", "KeyError", ",", "NotImplementedError", ")", ":", "raise", "ValueError", "(", "'Array has unsupported dtype {}.'", ".", "format", "(", "arr", ".", "dtype", ")", ")", "# pylint: disable=protected-access", "raw_arr", "=", "RawArray", "(", "c_arr", ".", "_type_", ",", "c_arr", ")", "with", "self", ".", "_lock", ":", "if", "self", ".", "_count", ">=", "len", "(", "self", ".", "_arrays", ")", ":", "self", ".", "_arrays", "+=", "len", "(", "self", ".", "_arrays", ")", "*", "[", "None", "]", "self", ".", "_get_next_free", "(", ")", "# Note storing the shape is a workaround for an issue encountered", "# when upgrading to numpy 1.15.", "# See https://github.com/numpy/numpy/issues/11636", "self", ".", "_arrays", "[", "self", ".", "_current", "]", "=", "(", "raw_arr", ",", "arr", ".", "shape", ")", "self", ".", "_count", "+=", "1", "return", "self", ".", "_current" ]
34.358974
21.333333
def collapse_focussed(self): """ Collapse currently focussed position; works only if the underlying tree allows it. """ if implementsCollapseAPI(self._tree): w, focuspos = self.get_focus() self._tree.collapse(focuspos) self._walker.clear_cache() self.refresh()
[ "def", "collapse_focussed", "(", "self", ")", ":", "if", "implementsCollapseAPI", "(", "self", ".", "_tree", ")", ":", "w", ",", "focuspos", "=", "self", ".", "get_focus", "(", ")", "self", ".", "_tree", ".", "collapse", "(", "focuspos", ")", "self", ".", "_walker", ".", "clear_cache", "(", ")", "self", ".", "refresh", "(", ")" ]
33.9
8.7
def _send_bootstrap_request(self, request): """Make a request using an ephemeral broker connection This routine is used to make broker-unaware requests to get the initial cluster metadata. It cycles through the configured hosts, trying to connect and send the request to each in turn. This temporary connection is closed once a response is received. Note that most Kafka APIs require requests be sent to a specific broker. This method will only function for broker-agnostic requests like: * `Metadata <https://kafka.apache.org/protocol.html#The_Messages_Metadata>`_ * `FindCoordinator <https://kafka.apache.org/protocol.html#The_Messages_FindCoordinator>`_ :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: - `KafkaUnavailableError` when making the request of all known hosts has failed. - `twisted.internet.defer.TimeoutError` when connecting or making a request exceeds the timeout. """ hostports = list(self._bootstrap_hosts) random.shuffle(hostports) for host, port in hostports: ep = self._endpoint_factory(self.reactor, host, port) try: protocol = yield ep.connect(_bootstrapFactory) except Exception as e: log.debug("%s: bootstrap connect to %s:%s -> %s", self, host, port, e) continue try: response = yield protocol.request(request).addTimeout(self.timeout, self.reactor) except Exception: log.debug("%s: bootstrap request to %s:%s failed", self, host, port, exc_info=True) else: returnValue(response) finally: protocol.transport.loseConnection() raise KafkaUnavailableError("Failed to bootstrap from hosts {}".format(hostports))
[ "def", "_send_bootstrap_request", "(", "self", ",", "request", ")", ":", "hostports", "=", "list", "(", "self", ".", "_bootstrap_hosts", ")", "random", ".", "shuffle", "(", "hostports", ")", "for", "host", ",", "port", "in", "hostports", ":", "ep", "=", "self", ".", "_endpoint_factory", "(", "self", ".", "reactor", ",", "host", ",", "port", ")", "try", ":", "protocol", "=", "yield", "ep", ".", "connect", "(", "_bootstrapFactory", ")", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "\"%s: bootstrap connect to %s:%s -> %s\"", ",", "self", ",", "host", ",", "port", ",", "e", ")", "continue", "try", ":", "response", "=", "yield", "protocol", ".", "request", "(", "request", ")", ".", "addTimeout", "(", "self", ".", "timeout", ",", "self", ".", "reactor", ")", "except", "Exception", ":", "log", ".", "debug", "(", "\"%s: bootstrap request to %s:%s failed\"", ",", "self", ",", "host", ",", "port", ",", "exc_info", "=", "True", ")", "else", ":", "returnValue", "(", "response", ")", "finally", ":", "protocol", ".", "transport", ".", "loseConnection", "(", ")", "raise", "KafkaUnavailableError", "(", "\"Failed to bootstrap from hosts {}\"", ".", "format", "(", "hostports", ")", ")" ]
43.479167
26.25
def filter(self, dict_name, priority_min='-inf', priority_max='+inf', start=0, limit=None): '''Get a subset of a dictionary. This retrieves only keys with priority scores greater than or equal to `priority_min` and less than or equal to `priority_max`. Of those keys, it skips the first `start` ones, and then returns at most `limit` keys. With default parameters, this retrieves the entire dictionary, making it a more expensive version of :meth:`pull`. This can be used to limit the dictionary by priority score, for instance using the score as a time stamp and only retrieving values before or after a specific time; or it can be used to get slices of the dictionary if there are too many items to use :meth:`pull`. This is a read-only operation and does not require a session lock, but if this is run in a session context, the lock will be honored. :param str dict_name: name of the dictionary to retrieve :param float priority_min: lowest score to retrieve :param float priority_max: highest score to retrieve :param int start: number of items to skip :param int limit: number of items to retrieve :return: corresponding (partial) Python dictionary :raise rejester.LockError: if the session lock timed out ''' conn = redis.Redis(connection_pool=self.pool) script = conn.register_script(''' if (ARGV[1] == "") or (redis.call("get", KEYS[1]) == ARGV[1]) then -- find all the keys and priorities within range local next_keys = redis.call("zrangebyscore", KEYS[3], ARGV[2], ARGV[3], "limit", ARGV[4], ARGV[5]) if not next_keys[1] then return {} end local t = {} for i = 1, #next_keys do local next_val = redis.call("hget", KEYS[2], next_keys[i]) table.insert(t, next_keys[i]) table.insert(t, next_val) end return t else -- ERROR: No longer own the lock return -1 end ''') if limit is None: limit = -1 res = script(keys=[self._lock_name, self._namespace(dict_name), self._namespace(dict_name) + 'keys'], args=[self._session_lock_identifier or '', priority_min, priority_max, start, limit]) if res == -1: raise LockError() split_res = dict([(self._decode(res[i]), self._decode(res[i+1])) for i in xrange(0, len(res)-1, 2)]) return split_res
[ "def", "filter", "(", "self", ",", "dict_name", ",", "priority_min", "=", "'-inf'", ",", "priority_max", "=", "'+inf'", ",", "start", "=", "0", ",", "limit", "=", "None", ")", ":", "conn", "=", "redis", ".", "Redis", "(", "connection_pool", "=", "self", ".", "pool", ")", "script", "=", "conn", ".", "register_script", "(", "'''\n if (ARGV[1] == \"\") or (redis.call(\"get\", KEYS[1]) == ARGV[1])\n then\n -- find all the keys and priorities within range\n local next_keys = redis.call(\"zrangebyscore\", KEYS[3],\n ARGV[2], ARGV[3],\n \"limit\", ARGV[4], ARGV[5])\n \n if not next_keys[1] then\n return {}\n end\n\n local t = {}\n for i = 1, #next_keys do\n local next_val = redis.call(\"hget\", KEYS[2], next_keys[i])\n table.insert(t, next_keys[i])\n table.insert(t, next_val)\n end\n\n return t\n else\n -- ERROR: No longer own the lock\n return -1\n end\n '''", ")", "if", "limit", "is", "None", ":", "limit", "=", "-", "1", "res", "=", "script", "(", "keys", "=", "[", "self", ".", "_lock_name", ",", "self", ".", "_namespace", "(", "dict_name", ")", ",", "self", ".", "_namespace", "(", "dict_name", ")", "+", "'keys'", "]", ",", "args", "=", "[", "self", ".", "_session_lock_identifier", "or", "''", ",", "priority_min", ",", "priority_max", ",", "start", ",", "limit", "]", ")", "if", "res", "==", "-", "1", ":", "raise", "LockError", "(", ")", "split_res", "=", "dict", "(", "[", "(", "self", ".", "_decode", "(", "res", "[", "i", "]", ")", ",", "self", ".", "_decode", "(", "res", "[", "i", "+", "1", "]", ")", ")", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "res", ")", "-", "1", ",", "2", ")", "]", ")", "return", "split_res" ]
41.5
21.558824
def clip(dataset, normal='x', origin=None, invert=True): """ Clip a dataset by a plane by specifying the origin and normal. If no parameters are given the clip will occur in the center of that dataset Parameters ---------- normal : tuple(float) or str Length 3 tuple for the normal vector direction. Can also be specified as a string conventional direction such as ``'x'`` for ``(1,0,0)`` or ``'-x'`` for ``(-1,0,0)``, etc. origin : tuple(float) The center ``(x,y,z)`` coordinate of the plane on which the clip occurs invert : bool Flag on whether to flip/invert the clip """ if isinstance(normal, str): normal = NORMALS[normal.lower()] # find center of data if origin not specified if origin is None: origin = dataset.center # create the plane for clipping plane = _generate_plane(normal, origin) # run the clip alg = vtk.vtkClipDataSet() alg.SetInputDataObject(dataset) # Use the grid as the data we desire to cut alg.SetClipFunction(plane) # the the cutter to use the plane we made alg.SetInsideOut(invert) # invert the clip if needed alg.Update() # Perfrom the Cut return _get_output(alg)
[ "def", "clip", "(", "dataset", ",", "normal", "=", "'x'", ",", "origin", "=", "None", ",", "invert", "=", "True", ")", ":", "if", "isinstance", "(", "normal", ",", "str", ")", ":", "normal", "=", "NORMALS", "[", "normal", ".", "lower", "(", ")", "]", "# find center of data if origin not specified", "if", "origin", "is", "None", ":", "origin", "=", "dataset", ".", "center", "# create the plane for clipping", "plane", "=", "_generate_plane", "(", "normal", ",", "origin", ")", "# run the clip", "alg", "=", "vtk", ".", "vtkClipDataSet", "(", ")", "alg", ".", "SetInputDataObject", "(", "dataset", ")", "# Use the grid as the data we desire to cut", "alg", ".", "SetClipFunction", "(", "plane", ")", "# the the cutter to use the plane we made", "alg", ".", "SetInsideOut", "(", "invert", ")", "# invert the clip if needed", "alg", ".", "Update", "(", ")", "# Perfrom the Cut", "return", "_get_output", "(", "alg", ")" ]
39.029412
19.558824
def has_space(self, length=1, offset=0): """Returns boolean if self.pos + length < working string length.""" return self.pos + (length + offset) - 1 < self.length
[ "def", "has_space", "(", "self", ",", "length", "=", "1", ",", "offset", "=", "0", ")", ":", "return", "self", ".", "pos", "+", "(", "length", "+", "offset", ")", "-", "1", "<", "self", ".", "length" ]
58.666667
7
def script_post_save(model, os_path, contents_manager, **kwargs): """convert notebooks to Python script after save with nbconvert replaces `ipython notebook --script` """ from nbconvert.exporters.script import ScriptExporter if model['type'] != 'notebook': return global _script_exporter if _script_exporter is None: _script_exporter = ScriptExporter(parent=contents_manager) log = contents_manager.log base, ext = os.path.splitext(os_path) # py_fname = base + '.py' script, resources = _script_exporter.from_filename(os_path) script_fname = base + resources.get('output_extension', '.txt') log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir)) with io.open(script_fname, 'w', encoding='utf-8') as f: f.write(script)
[ "def", "script_post_save", "(", "model", ",", "os_path", ",", "contents_manager", ",", "*", "*", "kwargs", ")", ":", "from", "nbconvert", ".", "exporters", ".", "script", "import", "ScriptExporter", "if", "model", "[", "'type'", "]", "!=", "'notebook'", ":", "return", "global", "_script_exporter", "if", "_script_exporter", "is", "None", ":", "_script_exporter", "=", "ScriptExporter", "(", "parent", "=", "contents_manager", ")", "log", "=", "contents_manager", ".", "log", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "os_path", ")", "# py_fname = base + '.py'", "script", ",", "resources", "=", "_script_exporter", ".", "from_filename", "(", "os_path", ")", "script_fname", "=", "base", "+", "resources", ".", "get", "(", "'output_extension'", ",", "'.txt'", ")", "log", ".", "info", "(", "\"Saving script /%s\"", ",", "to_api_path", "(", "script_fname", ",", "contents_manager", ".", "root_dir", ")", ")", "with", "io", ".", "open", "(", "script_fname", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "script", ")" ]
32.36
22.2
def decorate(func, caller, extras=()): """ decorate(func, caller) decorates a function using a caller. """ evaldict = dict(_call_=caller, _func_=func) es = '' for i, extra in enumerate(extras): ex = '_e%d_' % i evaldict[ex] = extra es += ex + ', ' fun = FunctionMaker.create( func, "return _call_(_func_, %s%%(shortsignature)s)" % es, evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun
[ "def", "decorate", "(", "func", ",", "caller", ",", "extras", "=", "(", ")", ")", ":", "evaldict", "=", "dict", "(", "_call_", "=", "caller", ",", "_func_", "=", "func", ")", "es", "=", "''", "for", "i", ",", "extra", "in", "enumerate", "(", "extras", ")", ":", "ex", "=", "'_e%d_'", "%", "i", "evaldict", "[", "ex", "]", "=", "extra", "es", "+=", "ex", "+", "', '", "fun", "=", "FunctionMaker", ".", "create", "(", "func", ",", "\"return _call_(_func_, %s%%(shortsignature)s)\"", "%", "es", ",", "evaldict", ",", "__wrapped__", "=", "func", ")", "if", "hasattr", "(", "func", ",", "'__qualname__'", ")", ":", "fun", ".", "__qualname__", "=", "func", ".", "__qualname__", "return", "fun" ]
32.0625
11.3125
def update_access_key(self, access_key_id, status, user_name=None): """ Changes the status of the specified access key from Active to Inactive or vice versa. This action can be used to disable a user's key as part of a key rotation workflow. If the user_name is not specified, the user_name is determined implicitly based on the AWS Access Key ID used to sign the request. :type access_key_id: string :param access_key_id: The ID of the access key. :type status: string :param status: Either Active or Inactive. :type user_name: string :param user_name: The username of user (optional). """ params = {'AccessKeyId' : access_key_id, 'Status' : status} if user_name: params['UserName'] = user_name return self.get_response('UpdateAccessKey', params)
[ "def", "update_access_key", "(", "self", ",", "access_key_id", ",", "status", ",", "user_name", "=", "None", ")", ":", "params", "=", "{", "'AccessKeyId'", ":", "access_key_id", ",", "'Status'", ":", "status", "}", "if", "user_name", ":", "params", "[", "'UserName'", "]", "=", "user_name", "return", "self", ".", "get_response", "(", "'UpdateAccessKey'", ",", "params", ")" ]
37
20.166667
def _fix_call_activities_signavio(self, bpmn, filename): """ Signavio produces slightly invalid BPMN for call activity nodes... It is supposed to put a reference to the id of the called process in to the calledElement attribute. Instead it stores a string (which is the name of the process - not its ID, in our interpretation) in an extension tag. This code gets the name of the 'subprocess reference', finds a process with a matching name, and sets the calledElement attribute to the id of the process. """ for node in xpath_eval(bpmn)(".//bpmn:callActivity"): calledElement = node.get('calledElement', None) if not calledElement: signavioMetaData = xpath_eval(node, extra_ns={ 'signavio': SIGNAVIO_NS})( './/signavio:signavioMetaData[@metaKey="entry"]') if not signavioMetaData: raise ValidationException( 'No Signavio "Subprocess reference" specified.', node=node, filename=filename) subprocess_reference = one(signavioMetaData).get('metaValue') matches = [] for b in list(self.bpmn.values()): for p in xpath_eval(b)(".//bpmn:process"): if (p.get('name', p.get('id', None)) == subprocess_reference): matches.append(p) if not matches: raise ValidationException( "No matching process definition found for '%s'." % subprocess_reference, node=node, filename=filename) if len(matches) != 1: raise ValidationException( "More than one matching process definition " " found for '%s'." % subprocess_reference, node=node, filename=filename) node.set('calledElement', matches[0].get('id'))
[ "def", "_fix_call_activities_signavio", "(", "self", ",", "bpmn", ",", "filename", ")", ":", "for", "node", "in", "xpath_eval", "(", "bpmn", ")", "(", "\".//bpmn:callActivity\"", ")", ":", "calledElement", "=", "node", ".", "get", "(", "'calledElement'", ",", "None", ")", "if", "not", "calledElement", ":", "signavioMetaData", "=", "xpath_eval", "(", "node", ",", "extra_ns", "=", "{", "'signavio'", ":", "SIGNAVIO_NS", "}", ")", "(", "'.//signavio:signavioMetaData[@metaKey=\"entry\"]'", ")", "if", "not", "signavioMetaData", ":", "raise", "ValidationException", "(", "'No Signavio \"Subprocess reference\" specified.'", ",", "node", "=", "node", ",", "filename", "=", "filename", ")", "subprocess_reference", "=", "one", "(", "signavioMetaData", ")", ".", "get", "(", "'metaValue'", ")", "matches", "=", "[", "]", "for", "b", "in", "list", "(", "self", ".", "bpmn", ".", "values", "(", ")", ")", ":", "for", "p", "in", "xpath_eval", "(", "b", ")", "(", "\".//bpmn:process\"", ")", ":", "if", "(", "p", ".", "get", "(", "'name'", ",", "p", ".", "get", "(", "'id'", ",", "None", ")", ")", "==", "subprocess_reference", ")", ":", "matches", ".", "append", "(", "p", ")", "if", "not", "matches", ":", "raise", "ValidationException", "(", "\"No matching process definition found for '%s'.\"", "%", "subprocess_reference", ",", "node", "=", "node", ",", "filename", "=", "filename", ")", "if", "len", "(", "matches", ")", "!=", "1", ":", "raise", "ValidationException", "(", "\"More than one matching process definition \"", "\" found for '%s'.\"", "%", "subprocess_reference", ",", "node", "=", "node", ",", "filename", "=", "filename", ")", "node", ".", "set", "(", "'calledElement'", ",", "matches", "[", "0", "]", ".", "get", "(", "'id'", ")", ")" ]
51.4
20.3
def _calc_hash_da(self, rs): """Compute hash of D and A timestamps for single-step D+A case. """ self.hash_d = hash_(rs.get_state())[:6] self.hash_a = self.hash_d
[ "def", "_calc_hash_da", "(", "self", ",", "rs", ")", ":", "self", ".", "hash_d", "=", "hash_", "(", "rs", ".", "get_state", "(", ")", ")", "[", ":", "6", "]", "self", ".", "hash_a", "=", "self", ".", "hash_d" ]
38
5.2
def host_domains(self, ip=None, limit=None, **kwargs): """Pass in an IP address.""" return self._results('reverse-ip', '/v1/{0}/host-domains'.format(ip), limit=limit, **kwargs)
[ "def", "host_domains", "(", "self", ",", "ip", "=", "None", ",", "limit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_results", "(", "'reverse-ip'", ",", "'/v1/{0}/host-domains'", ".", "format", "(", "ip", ")", ",", "limit", "=", "limit", ",", "*", "*", "kwargs", ")" ]
63.333333
24.666667
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None): """pretty print for confusion matrixes""" columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length empty_cell = " " * columnwidth # Print header print(" " + empty_cell, end=" ") for label in labels: print("%{0}s".format(columnwidth) % label, end=" ") print() # Print rows for i, label1 in enumerate(labels): print(" %{0}s".format(columnwidth) % label1, end=" ") for j in range(len(labels)): cell = "%{0}.1f".format(columnwidth) % cm[i, j] if hide_zeroes: cell = cell if float(cm[i, j]) != 0 else empty_cell if hide_diagonal: cell = cell if i != j else empty_cell if hide_threshold: cell = cell if cm[i, j] > hide_threshold else empty_cell print(cell, end=" ") print()
[ "def", "print_cm", "(", "cm", ",", "labels", ",", "hide_zeroes", "=", "False", ",", "hide_diagonal", "=", "False", ",", "hide_threshold", "=", "None", ")", ":", "columnwidth", "=", "max", "(", "[", "len", "(", "x", ")", "for", "x", "in", "labels", "]", "+", "[", "5", "]", ")", "# 5 is value length", "empty_cell", "=", "\" \"", "*", "columnwidth", "# Print header", "print", "(", "\" \"", "+", "empty_cell", ",", "end", "=", "\" \"", ")", "for", "label", "in", "labels", ":", "print", "(", "\"%{0}s\"", ".", "format", "(", "columnwidth", ")", "%", "label", ",", "end", "=", "\" \"", ")", "print", "(", ")", "# Print rows", "for", "i", ",", "label1", "in", "enumerate", "(", "labels", ")", ":", "print", "(", "\" %{0}s\"", ".", "format", "(", "columnwidth", ")", "%", "label1", ",", "end", "=", "\" \"", ")", "for", "j", "in", "range", "(", "len", "(", "labels", ")", ")", ":", "cell", "=", "\"%{0}.1f\"", ".", "format", "(", "columnwidth", ")", "%", "cm", "[", "i", ",", "j", "]", "if", "hide_zeroes", ":", "cell", "=", "cell", "if", "float", "(", "cm", "[", "i", ",", "j", "]", ")", "!=", "0", "else", "empty_cell", "if", "hide_diagonal", ":", "cell", "=", "cell", "if", "i", "!=", "j", "else", "empty_cell", "if", "hide_threshold", ":", "cell", "=", "cell", "if", "cm", "[", "i", ",", "j", "]", ">", "hide_threshold", "else", "empty_cell", "print", "(", "cell", ",", "end", "=", "\" \"", ")", "print", "(", ")" ]
42.227273
17.454545
def segmentlistdict(self): """ A segmentlistdict object describing the instruments and time spanned by this CacheEntry. A new object is constructed each time this attribute is accessed (segments are immutable so there is no reason to try to share a reference to the CacheEntry's internal segment; modifications of one would not be reflected in the other anyway). Example: >>> c = CacheEntry(u"H1 S5 815901601 576.5 file://localhost/home/kipp/tmp/1/H1-815901601-576.xml") >>> c.segmentlistdict {u'H1': [segment(LIGOTimeGPS(815901601, 0), LIGOTimeGPS(815902177, 500000000))]} The \"observatory\" column of the cache entry, which is frequently used to store instrument names, is parsed into instrument names for the dictionary keys using the same rules as pycbc_glue.ligolw.lsctables.instrument_set_from_ifos(). Example: >>> c = CacheEntry(u"H1H2, S5 815901601 576.5 file://localhost/home/kipp/tmp/1/H1H2-815901601-576.xml") >>> c.segmentlistdict {u'H1H2': [segment(LIGOTimeGPS(815901601, 0), LIGOTimeGPS(815902177, 500000000))]} """ # the import has to be done here to break the cyclic # dependancy from pycbc_glue.ligolw.lsctables import instrument_set_from_ifos instruments = instrument_set_from_ifos(self.observatory) or (None,) return segments.segmentlistdict((instrument, segments.segmentlist(self.segment is not None and [self.segment] or [])) for instrument in instruments)
[ "def", "segmentlistdict", "(", "self", ")", ":", "# the import has to be done here to break the cyclic", "# dependancy", "from", "pycbc_glue", ".", "ligolw", ".", "lsctables", "import", "instrument_set_from_ifos", "instruments", "=", "instrument_set_from_ifos", "(", "self", ".", "observatory", ")", "or", "(", "None", ",", ")", "return", "segments", ".", "segmentlistdict", "(", "(", "instrument", ",", "segments", ".", "segmentlist", "(", "self", ".", "segment", "is", "not", "None", "and", "[", "self", ".", "segment", "]", "or", "[", "]", ")", ")", "for", "instrument", "in", "instruments", ")" ]
43.90625
28.84375
def file_url(self, entity_id, filename, channel=None): '''Generate a URL for a file in an archive without requesting it. @param entity_id The ID of the entity to look up. @param filename The name of the file in the archive. @param channel Optional channel name. ''' url = '{}/{}/archive/{}'.format(self.url, _get_path(entity_id), filename) return _add_channel(url, channel)
[ "def", "file_url", "(", "self", ",", "entity_id", ",", "filename", ",", "channel", "=", "None", ")", ":", "url", "=", "'{}/{}/archive/{}'", ".", "format", "(", "self", ".", "url", ",", "_get_path", "(", "entity_id", ")", ",", "filename", ")", "return", "_add_channel", "(", "url", ",", "channel", ")" ]
46.1
19.9
def transform_value(self, value): """Convert the value to be stored. This does nothing by default but subclasses can change this. Then the index will be able to filter on the transformed value. For example if the transform capitalizes some text, the filter would be ``myfield__capitalized__eq='FOO'`` """ if not self.transform: return value try: # we store a staticmethod but we accept a method taking `self` and `value` return self.transform(self, value) except TypeError as e: if 'argument' in str(e): # try to limit only to arguments error return self.transform(value)
[ "def", "transform_value", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "transform", ":", "return", "value", "try", ":", "# we store a staticmethod but we accept a method taking `self` and `value`", "return", "self", ".", "transform", "(", "self", ",", "value", ")", "except", "TypeError", "as", "e", ":", "if", "'argument'", "in", "str", "(", "e", ")", ":", "# try to limit only to arguments error", "return", "self", ".", "transform", "(", "value", ")" ]
38.555556
21.277778
async def get_match(self, m_id, force_update=False) -> Match: """ get a single match by id |methcoro| Args: m_id: match id force_update (default=False): True to force an update to the Challonge API Returns: Match Raises: APIException """ found_m = self._find_match(m_id) if force_update or found_m is None: await self.get_matches() found_m = self._find_match(m_id) return found_m
[ "async", "def", "get_match", "(", "self", ",", "m_id", ",", "force_update", "=", "False", ")", "->", "Match", ":", "found_m", "=", "self", ".", "_find_match", "(", "m_id", ")", "if", "force_update", "or", "found_m", "is", "None", ":", "await", "self", ".", "get_matches", "(", ")", "found_m", "=", "self", ".", "_find_match", "(", "m_id", ")", "return", "found_m" ]
24.190476
21.285714
def smeft_evolve_leadinglog(C_in, scale_in, scale_out, newphys=True): """Solve the SMEFT RGEs in the leading log approximation. Input C_in and output C_out are dictionaries of arrays.""" C_out = deepcopy(C_in) b = beta.beta(C_out, newphys=newphys) for k, C in C_out.items(): C_out[k] = C + b[k] / (16 * pi**2) * log(scale_out / scale_in) return C_out
[ "def", "smeft_evolve_leadinglog", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "True", ")", ":", "C_out", "=", "deepcopy", "(", "C_in", ")", "b", "=", "beta", ".", "beta", "(", "C_out", ",", "newphys", "=", "newphys", ")", "for", "k", ",", "C", "in", "C_out", ".", "items", "(", ")", ":", "C_out", "[", "k", "]", "=", "C", "+", "b", "[", "k", "]", "/", "(", "16", "*", "pi", "**", "2", ")", "*", "log", "(", "scale_out", "/", "scale_in", ")", "return", "C_out" ]
41.666667
16.444444
def _findrange(parlist,roots=['JUMP','DMXR1_','DMXR2_','DMX_','efac','log10_efac']): """Rewrite a list of parameters name by detecting ranges (e.g., JUMP1, JUMP2, ...) and compressing them.""" rootdict = {root: [] for root in roots} res = [] for par in parlist: found = False for root in roots: if len(par) > len(root) and par[:len(root)] == root: rootdict[root].append(int(par[len(root):])) found = True if not found: res.append(par) for root in roots: if rootdict[root]: if len(rootdict[root]) > 1: rmin, rmax = min(rootdict[root]), max(rootdict[root]) res.append('{0}{{{1}-{2}}}{3}'.format(root,rmin,rmax, '(incomplete)' if rmax - rmin != len(rootdict[root]) - 1 else '')) else: res.append('{0}{1}'.format(root,rootdict[root][0])) return res
[ "def", "_findrange", "(", "parlist", ",", "roots", "=", "[", "'JUMP'", ",", "'DMXR1_'", ",", "'DMXR2_'", ",", "'DMX_'", ",", "'efac'", ",", "'log10_efac'", "]", ")", ":", "rootdict", "=", "{", "root", ":", "[", "]", "for", "root", "in", "roots", "}", "res", "=", "[", "]", "for", "par", "in", "parlist", ":", "found", "=", "False", "for", "root", "in", "roots", ":", "if", "len", "(", "par", ")", ">", "len", "(", "root", ")", "and", "par", "[", ":", "len", "(", "root", ")", "]", "==", "root", ":", "rootdict", "[", "root", "]", ".", "append", "(", "int", "(", "par", "[", "len", "(", "root", ")", ":", "]", ")", ")", "found", "=", "True", "if", "not", "found", ":", "res", ".", "append", "(", "par", ")", "for", "root", "in", "roots", ":", "if", "rootdict", "[", "root", "]", ":", "if", "len", "(", "rootdict", "[", "root", "]", ")", ">", "1", ":", "rmin", ",", "rmax", "=", "min", "(", "rootdict", "[", "root", "]", ")", ",", "max", "(", "rootdict", "[", "root", "]", ")", "res", ".", "append", "(", "'{0}{{{1}-{2}}}{3}'", ".", "format", "(", "root", ",", "rmin", ",", "rmax", ",", "'(incomplete)'", "if", "rmax", "-", "rmin", "!=", "len", "(", "rootdict", "[", "root", "]", ")", "-", "1", "else", "''", ")", ")", "else", ":", "res", ".", "append", "(", "'{0}{1}'", ".", "format", "(", "root", ",", "rootdict", "[", "root", "]", "[", "0", "]", ")", ")", "return", "res" ]
38.44
23
def com_google_fonts_check_metadata_valid_filename_values(font, family_metadata): """METADATA.pb font.filename field contains font name in right format?""" expected = os.path.basename(font) failed = True for font_metadata in family_metadata.fonts: if font_metadata.filename == expected: failed = False yield PASS, ("METADATA.pb filename field contains" " font name in right format.") break if failed: yield FAIL, ("None of the METADATA.pb filename fields match" f" correct font name format (\"{expected}\").")
[ "def", "com_google_fonts_check_metadata_valid_filename_values", "(", "font", ",", "family_metadata", ")", ":", "expected", "=", "os", ".", "path", ".", "basename", "(", "font", ")", "failed", "=", "True", "for", "font_metadata", "in", "family_metadata", ".", "fonts", ":", "if", "font_metadata", ".", "filename", "==", "expected", ":", "failed", "=", "False", "yield", "PASS", ",", "(", "\"METADATA.pb filename field contains\"", "\" font name in right format.\"", ")", "break", "if", "failed", ":", "yield", "FAIL", ",", "(", "\"None of the METADATA.pb filename fields match\"", "f\" correct font name format (\\\"{expected}\\\").\"", ")" ]
44.642857
17.428571
def _init_map(self): """stub""" super(edXAssetContentFormRecord, self)._init_map() AssetContentTextFormRecord._init_map(self) FilesFormRecord._init_map(self) ProvenanceFormRecord._init_map(self)
[ "def", "_init_map", "(", "self", ")", ":", "super", "(", "edXAssetContentFormRecord", ",", "self", ")", ".", "_init_map", "(", ")", "AssetContentTextFormRecord", ".", "_init_map", "(", "self", ")", "FilesFormRecord", ".", "_init_map", "(", "self", ")", "ProvenanceFormRecord", ".", "_init_map", "(", "self", ")" ]
38.166667
8.833333
def single_request_timeout(self, value): """The timeout (seconds) for a single HTTP REST API request.""" check_type(value, int) assert value is None or value > 0 self._single_request_timeout = value
[ "def", "single_request_timeout", "(", "self", ",", "value", ")", ":", "check_type", "(", "value", ",", "int", ")", "assert", "value", "is", "None", "or", "value", ">", "0", "self", ".", "_single_request_timeout", "=", "value" ]
45.2
3
def logical_name(self): """The logical name of the seat. This is an identifier to group sets of devices within the compositor. Returns: str: The logical name of this seat. """ pchar = self._libinput.libinput_seat_get_logical_name(self._handle) return string_at(pchar).decode()
[ "def", "logical_name", "(", "self", ")", ":", "pchar", "=", "self", ".", "_libinput", ".", "libinput_seat_get_logical_name", "(", "self", ".", "_handle", ")", "return", "string_at", "(", "pchar", ")", ".", "decode", "(", ")" ]
25.818182
21.363636
def _data(self, asa=None): """ Returns the contents of the node: a dataframe or a file path, or passes the node and its contents to a callable. """ if asa is not None: if self._store is None or self._hashes is None: msg = ( "Can only use asa functions with built dataframes." " Build this package and try again." ) raise ValueError(msg) return asa(self, [self._store.object_path(obj) for obj in self._hashes]) else: if self.__cached_data is None: # TODO(dima): Temporary code. if self._target() == TargetType.PANDAS: self.__cached_data = self._store.load_dataframe(self._hashes) elif self._target() == TargetType.NUMPY: self.__cached_data = self._store.load_numpy(self._hashes) else: self.__cached_data = self._store.get_file(self._hashes) return self.__cached_data
[ "def", "_data", "(", "self", ",", "asa", "=", "None", ")", ":", "if", "asa", "is", "not", "None", ":", "if", "self", ".", "_store", "is", "None", "or", "self", ".", "_hashes", "is", "None", ":", "msg", "=", "(", "\"Can only use asa functions with built dataframes.\"", "\" Build this package and try again.\"", ")", "raise", "ValueError", "(", "msg", ")", "return", "asa", "(", "self", ",", "[", "self", ".", "_store", ".", "object_path", "(", "obj", ")", "for", "obj", "in", "self", ".", "_hashes", "]", ")", "else", ":", "if", "self", ".", "__cached_data", "is", "None", ":", "# TODO(dima): Temporary code.", "if", "self", ".", "_target", "(", ")", "==", "TargetType", ".", "PANDAS", ":", "self", ".", "__cached_data", "=", "self", ".", "_store", ".", "load_dataframe", "(", "self", ".", "_hashes", ")", "elif", "self", ".", "_target", "(", ")", "==", "TargetType", ".", "NUMPY", ":", "self", ".", "__cached_data", "=", "self", ".", "_store", ".", "load_numpy", "(", "self", ".", "_hashes", ")", "else", ":", "self", ".", "__cached_data", "=", "self", ".", "_store", ".", "get_file", "(", "self", ".", "_hashes", ")", "return", "self", ".", "__cached_data" ]
45.695652
18.565217
def _compute_grover_oracle_matrix(bitstring_map: Dict[str, int]) -> np.ndarray: """ Computes the unitary matrix that encodes the oracle function for Grover's algorithm :param bitstring_map: dict with string keys corresponding to bitstrings, and integer values corresponding to the desired phase on the output state. :return: a numpy array corresponding to the unitary matrix for oracle for the given bitstring_map """ n_bits = len(list(bitstring_map.keys())[0]) oracle_matrix = np.zeros(shape=(2 ** n_bits, 2 ** n_bits)) for b in range(2 ** n_bits): pad_str = np.binary_repr(b, n_bits) phase_factor = bitstring_map[pad_str] oracle_matrix[b, b] = phase_factor return oracle_matrix
[ "def", "_compute_grover_oracle_matrix", "(", "bitstring_map", ":", "Dict", "[", "str", ",", "int", "]", ")", "->", "np", ".", "ndarray", ":", "n_bits", "=", "len", "(", "list", "(", "bitstring_map", ".", "keys", "(", ")", ")", "[", "0", "]", ")", "oracle_matrix", "=", "np", ".", "zeros", "(", "shape", "=", "(", "2", "**", "n_bits", ",", "2", "**", "n_bits", ")", ")", "for", "b", "in", "range", "(", "2", "**", "n_bits", ")", ":", "pad_str", "=", "np", ".", "binary_repr", "(", "b", ",", "n_bits", ")", "phase_factor", "=", "bitstring_map", "[", "pad_str", "]", "oracle_matrix", "[", "b", ",", "b", "]", "=", "phase_factor", "return", "oracle_matrix" ]
49.4375
22.3125
def _build_body_schema(serializer, body_parameters): """ body is built differently, since it's a single argument no matter what. """ description = "" if isinstance(body_parameters, Param): schema = serializer.to_json_schema(body_parameters.arginfo.type) description = body_parameters.description required = True else: if len(body_parameters) == 0: return None required = set() body_properties = {} for name, param in body_parameters.items(): arginfo = param.arginfo body_properties[name] = serializer.to_json_schema(arginfo.type) body_properties[name]["description"] = param.description if arginfo.default is NoDefault: required.add(name) schema = { "type": "object", "required": list(required), "properties": body_properties, } required = len(required) > 0 return BodyParameter( { "name": "body", "description": description, "required": required, "schema": schema, } )
[ "def", "_build_body_schema", "(", "serializer", ",", "body_parameters", ")", ":", "description", "=", "\"\"", "if", "isinstance", "(", "body_parameters", ",", "Param", ")", ":", "schema", "=", "serializer", ".", "to_json_schema", "(", "body_parameters", ".", "arginfo", ".", "type", ")", "description", "=", "body_parameters", ".", "description", "required", "=", "True", "else", ":", "if", "len", "(", "body_parameters", ")", "==", "0", ":", "return", "None", "required", "=", "set", "(", ")", "body_properties", "=", "{", "}", "for", "name", ",", "param", "in", "body_parameters", ".", "items", "(", ")", ":", "arginfo", "=", "param", ".", "arginfo", "body_properties", "[", "name", "]", "=", "serializer", ".", "to_json_schema", "(", "arginfo", ".", "type", ")", "body_properties", "[", "name", "]", "[", "\"description\"", "]", "=", "param", ".", "description", "if", "arginfo", ".", "default", "is", "NoDefault", ":", "required", ".", "add", "(", "name", ")", "schema", "=", "{", "\"type\"", ":", "\"object\"", ",", "\"required\"", ":", "list", "(", "required", ")", ",", "\"properties\"", ":", "body_properties", ",", "}", "required", "=", "len", "(", "required", ")", ">", "0", "return", "BodyParameter", "(", "{", "\"name\"", ":", "\"body\"", ",", "\"description\"", ":", "description", ",", "\"required\"", ":", "required", ",", "\"schema\"", ":", "schema", ",", "}", ")" ]
34.9375
14.84375
def phase_diff(self, phase): '''Compute the phase differential along a given axis Parameters ---------- phase : np.ndarray Input phase (in radians) Returns ------- dphase : np.ndarray like `phase` The phase differential. ''' if self.conv is None: axis = 0 elif self.conv in ('channels_last', 'tf'): axis = 0 elif self.conv in ('channels_first', 'th'): axis = 1 # Compute the phase differential dphase = np.empty(phase.shape, dtype=phase.dtype) zero_idx = [slice(None)] * phase.ndim zero_idx[axis] = slice(1) else_idx = [slice(None)] * phase.ndim else_idx[axis] = slice(1, None) zero_idx = tuple(zero_idx) else_idx = tuple(else_idx) dphase[zero_idx] = phase[zero_idx] dphase[else_idx] = np.diff(np.unwrap(phase, axis=axis), axis=axis) return dphase
[ "def", "phase_diff", "(", "self", ",", "phase", ")", ":", "if", "self", ".", "conv", "is", "None", ":", "axis", "=", "0", "elif", "self", ".", "conv", "in", "(", "'channels_last'", ",", "'tf'", ")", ":", "axis", "=", "0", "elif", "self", ".", "conv", "in", "(", "'channels_first'", ",", "'th'", ")", ":", "axis", "=", "1", "# Compute the phase differential", "dphase", "=", "np", ".", "empty", "(", "phase", ".", "shape", ",", "dtype", "=", "phase", ".", "dtype", ")", "zero_idx", "=", "[", "slice", "(", "None", ")", "]", "*", "phase", ".", "ndim", "zero_idx", "[", "axis", "]", "=", "slice", "(", "1", ")", "else_idx", "=", "[", "slice", "(", "None", ")", "]", "*", "phase", ".", "ndim", "else_idx", "[", "axis", "]", "=", "slice", "(", "1", ",", "None", ")", "zero_idx", "=", "tuple", "(", "zero_idx", ")", "else_idx", "=", "tuple", "(", "else_idx", ")", "dphase", "[", "zero_idx", "]", "=", "phase", "[", "zero_idx", "]", "dphase", "[", "else_idx", "]", "=", "np", ".", "diff", "(", "np", ".", "unwrap", "(", "phase", ",", "axis", "=", "axis", ")", ",", "axis", "=", "axis", ")", "return", "dphase" ]
29.875
16.625
def insort_event_right(self, event, lo=0, hi=None): """Insert event in queue, and keep it sorted assuming queue is sorted. If event is already in queue, insert it to the right of the rightmost event (to keep FIFO order). Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: event: a (time in sec since unix epoch, callback, args, kwds) tuple. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(self.queue) while lo < hi: mid = (lo + hi) // 2 if event[0] < self.queue[mid][0]: hi = mid else: lo = mid + 1 self.queue.insert(lo, event)
[ "def", "insort_event_right", "(", "self", ",", "event", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "lo", "<", "0", ":", "raise", "ValueError", "(", "'lo must be non-negative'", ")", "if", "hi", "is", "None", ":", "hi", "=", "len", "(", "self", ".", "queue", ")", "while", "lo", "<", "hi", ":", "mid", "=", "(", "lo", "+", "hi", ")", "//", "2", "if", "event", "[", "0", "]", "<", "self", ".", "queue", "[", "mid", "]", "[", "0", "]", ":", "hi", "=", "mid", "else", ":", "lo", "=", "mid", "+", "1", "self", ".", "queue", ".", "insert", "(", "lo", ",", "event", ")" ]
28.5
20.958333
def wait(self, *args, **kwargs): """Wait for the completion event to be set.""" if _debug: IOCB._debug("wait(%d) %r %r", self.ioID, args, kwargs) # waiting from a non-daemon thread could be trouble return self.ioComplete.wait(*args, **kwargs)
[ "def", "wait", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "_debug", ":", "IOCB", ".", "_debug", "(", "\"wait(%d) %r %r\"", ",", "self", ".", "ioID", ",", "args", ",", "kwargs", ")", "# waiting from a non-daemon thread could be trouble", "return", "self", ".", "ioComplete", ".", "wait", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
45
18.666667
def top_path(sources, sinks, net_flux): """ Use the Dijkstra algorithm for finding the shortest path connecting a set of source states from a set of sink states. Parameters ---------- sources : array_like, int One-dimensional list of nodes to define the source states. sinks : array_like, int One-dimensional list of nodes to define the sink states. net_flux : np.ndarray, shape = [n_states, n_states] Net flux of the MSM Returns ------- top_path : np.ndarray Array corresponding to the top path between sources and sinks. It is an array of states visited along the path. flux : float Flux traveling through this path -- this is equal to the minimum flux over edges in the path. See Also -------- msmbuilder.tpt.paths : function for calculating many high flux paths through a network. References ---------- .. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of transition paths. J. Stat. Phys. 123, 503-523 (2006). .. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory for Markov jump processes. Multiscale Model. Simul. 7, 1192-1219 (2009). .. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding pathways in network models of coarse-grained protein dynamics. J. Chem. Phys. 130, 205102 (2009). .. [4] Dijkstra, E. W. A Note on Two Problems in Connexion with Graphs. Numeriche Mathematik 1, 269-271 (1959). .. [5] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations." PNAS 106.45 (2009): 19011-19016. """ sources = np.array(sources, dtype=np.int).reshape((-1,)) sinks = np.array(sinks, dtype=np.int).reshape((-1,)) n_states = net_flux.shape[0] queue = list(sources) # nodes to check (the "queue") # going to use list.pop method so I can't keep it as an array visited = np.zeros(n_states).astype(np.bool) # have we already checked this node? previous_node = np.ones(n_states).astype(np.int) * -1 # what node was found before finding this one min_fluxes = np.ones(n_states) * -1 * np.inf # what is the flux of the highest flux path # from this node to the source set. min_fluxes[sources] = np.inf # source states are connected to the source # so this distance is zero which means the flux is infinite while len(queue) > 0: # iterate until there's nothing to check anymore test_node = queue.pop(min_fluxes[queue].argmax()) # find the node in the queue that has the # highest flux path to it from the source set visited[test_node] = True if np.all(visited[sinks]): # if we've visited all of the sink states, then we just have to choose # the path that goes to the sink state that is closest to the source break # if test_node in sinks: # I *think* we want to break ... or are there paths we still # need to check? # continue # I think if sinks is more than one state we have to check everything # now update the distances for each neighbor of the test_node: neighbors = np.where(net_flux[test_node, :] > 0)[0] if len(neighbors) == 0: continue new_fluxes = net_flux[test_node, neighbors].flatten() # flux from test_node to each neighbor new_fluxes[np.where(new_fluxes > min_fluxes[test_node])] = min_fluxes[test_node] # previous step to get to test_node was lower flux, so that is still the path flux ind = np.where((1 - visited[neighbors]) & (new_fluxes > min_fluxes[neighbors])) min_fluxes[neighbors[ind]] = new_fluxes[ind] previous_node[neighbors[ind]] = test_node # each of these neighbors came from this test_node # we don't want to update the nodes that have already been visited queue.extend(neighbors[ind]) top_path = [] # populate the path in reverse top_path.append(int(sinks[min_fluxes[sinks].argmax()])) # find the closest sink state while previous_node[top_path[-1]] != -1: top_path.append(previous_node[top_path[-1]]) return np.array(top_path[::-1]), min_fluxes[top_path[0]]
[ "def", "top_path", "(", "sources", ",", "sinks", ",", "net_flux", ")", ":", "sources", "=", "np", ".", "array", "(", "sources", ",", "dtype", "=", "np", ".", "int", ")", ".", "reshape", "(", "(", "-", "1", ",", ")", ")", "sinks", "=", "np", ".", "array", "(", "sinks", ",", "dtype", "=", "np", ".", "int", ")", ".", "reshape", "(", "(", "-", "1", ",", ")", ")", "n_states", "=", "net_flux", ".", "shape", "[", "0", "]", "queue", "=", "list", "(", "sources", ")", "# nodes to check (the \"queue\")", "# going to use list.pop method so I can't keep it as an array", "visited", "=", "np", ".", "zeros", "(", "n_states", ")", ".", "astype", "(", "np", ".", "bool", ")", "# have we already checked this node?", "previous_node", "=", "np", ".", "ones", "(", "n_states", ")", ".", "astype", "(", "np", ".", "int", ")", "*", "-", "1", "# what node was found before finding this one", "min_fluxes", "=", "np", ".", "ones", "(", "n_states", ")", "*", "-", "1", "*", "np", ".", "inf", "# what is the flux of the highest flux path", "# from this node to the source set.", "min_fluxes", "[", "sources", "]", "=", "np", ".", "inf", "# source states are connected to the source", "# so this distance is zero which means the flux is infinite", "while", "len", "(", "queue", ")", ">", "0", ":", "# iterate until there's nothing to check anymore", "test_node", "=", "queue", ".", "pop", "(", "min_fluxes", "[", "queue", "]", ".", "argmax", "(", ")", ")", "# find the node in the queue that has the", "# highest flux path to it from the source set", "visited", "[", "test_node", "]", "=", "True", "if", "np", ".", "all", "(", "visited", "[", "sinks", "]", ")", ":", "# if we've visited all of the sink states, then we just have to choose", "# the path that goes to the sink state that is closest to the source", "break", "# if test_node in sinks: # I *think* we want to break ... or are there paths we still", "# need to check?", "# continue", "# I think if sinks is more than one state we have to check everything", "# now update the distances for each neighbor of the test_node:", "neighbors", "=", "np", ".", "where", "(", "net_flux", "[", "test_node", ",", ":", "]", ">", "0", ")", "[", "0", "]", "if", "len", "(", "neighbors", ")", "==", "0", ":", "continue", "new_fluxes", "=", "net_flux", "[", "test_node", ",", "neighbors", "]", ".", "flatten", "(", ")", "# flux from test_node to each neighbor", "new_fluxes", "[", "np", ".", "where", "(", "new_fluxes", ">", "min_fluxes", "[", "test_node", "]", ")", "]", "=", "min_fluxes", "[", "test_node", "]", "# previous step to get to test_node was lower flux, so that is still the path flux", "ind", "=", "np", ".", "where", "(", "(", "1", "-", "visited", "[", "neighbors", "]", ")", "&", "(", "new_fluxes", ">", "min_fluxes", "[", "neighbors", "]", ")", ")", "min_fluxes", "[", "neighbors", "[", "ind", "]", "]", "=", "new_fluxes", "[", "ind", "]", "previous_node", "[", "neighbors", "[", "ind", "]", "]", "=", "test_node", "# each of these neighbors came from this test_node", "# we don't want to update the nodes that have already been visited", "queue", ".", "extend", "(", "neighbors", "[", "ind", "]", ")", "top_path", "=", "[", "]", "# populate the path in reverse", "top_path", ".", "append", "(", "int", "(", "sinks", "[", "min_fluxes", "[", "sinks", "]", ".", "argmax", "(", ")", "]", ")", ")", "# find the closest sink state", "while", "previous_node", "[", "top_path", "[", "-", "1", "]", "]", "!=", "-", "1", ":", "top_path", ".", "append", "(", "previous_node", "[", "top_path", "[", "-", "1", "]", "]", ")", "return", "np", ".", "array", "(", "top_path", "[", ":", ":", "-", "1", "]", ")", ",", "min_fluxes", "[", "top_path", "[", "0", "]", "]" ]
37.269565
23.33913
def peak_interval(data, alpha=_alpha, npoints=_npoints): """ Identify interval using Gaussian kernel density estimator. """ peak = kde_peak(data,npoints) x = np.sort(data.flat); n = len(x) # The number of entries in the interval window = int(np.rint((1.0-alpha)*n)) # The start, stop, and width of all possible intervals starts = x[:n-window]; ends = x[window:] widths = ends - starts # Just the intervals containing the peak select = (peak >= starts) & (peak <= ends) widths = widths[select] if len(widths) == 0: raise ValueError('Too few elements for interval calculation') min_idx = np.argmin(widths) lo = x[min_idx] hi = x[min_idx+window] return interval(peak,lo,hi)
[ "def", "peak_interval", "(", "data", ",", "alpha", "=", "_alpha", ",", "npoints", "=", "_npoints", ")", ":", "peak", "=", "kde_peak", "(", "data", ",", "npoints", ")", "x", "=", "np", ".", "sort", "(", "data", ".", "flat", ")", "n", "=", "len", "(", "x", ")", "# The number of entries in the interval", "window", "=", "int", "(", "np", ".", "rint", "(", "(", "1.0", "-", "alpha", ")", "*", "n", ")", ")", "# The start, stop, and width of all possible intervals", "starts", "=", "x", "[", ":", "n", "-", "window", "]", "ends", "=", "x", "[", "window", ":", "]", "widths", "=", "ends", "-", "starts", "# Just the intervals containing the peak", "select", "=", "(", "peak", ">=", "starts", ")", "&", "(", "peak", "<=", "ends", ")", "widths", "=", "widths", "[", "select", "]", "if", "len", "(", "widths", ")", "==", "0", ":", "raise", "ValueError", "(", "'Too few elements for interval calculation'", ")", "min_idx", "=", "np", ".", "argmin", "(", "widths", ")", "lo", "=", "x", "[", "min_idx", "]", "hi", "=", "x", "[", "min_idx", "+", "window", "]", "return", "interval", "(", "peak", ",", "lo", ",", "hi", ")" ]
36.55
10.35
def deflections_from_grid(self, grid, **kwargs): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ eta = np.multiply(1. / self.scale_radius, self.grid_to_grid_radii(grid)) deflection_grid = np.multiply((4. * self.kappa_s * self.scale_radius / eta), self.deflection_func_sph(eta)) return self.grid_to_grid_cartesian(grid, deflection_grid)
[ "def", "deflections_from_grid", "(", "self", ",", "grid", ",", "*", "*", "kwargs", ")", ":", "eta", "=", "np", ".", "multiply", "(", "1.", "/", "self", ".", "scale_radius", ",", "self", ".", "grid_to_grid_radii", "(", "grid", ")", ")", "deflection_grid", "=", "np", ".", "multiply", "(", "(", "4.", "*", "self", ".", "kappa_s", "*", "self", ".", "scale_radius", "/", "eta", ")", ",", "self", ".", "deflection_func_sph", "(", "eta", ")", ")", "return", "self", ".", "grid_to_grid_cartesian", "(", "grid", ",", "deflection_grid", ")" ]
38.533333
30.666667
def _replaces(self): """tge""" return {concat(a, c, b[1:]) for a, b in self.slices[:-1] for c in ALPHABET}
[ "def", "_replaces", "(", "self", ")", ":", "return", "{", "concat", "(", "a", ",", "c", ",", "b", "[", "1", ":", "]", ")", "for", "a", ",", "b", "in", "self", ".", "slices", "[", ":", "-", "1", "]", "for", "c", "in", "ALPHABET", "}" ]
30
7
def is_encodable(self, typ: TypeStr, arg: Any) -> bool: """ Determines if the python value ``arg`` is encodable as a value of the ABI type ``typ``. :param typ: A string representation for the ABI type against which the python value ``arg`` will be checked e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc. :param arg: The python value whose encodability should be checked. :returns: ``True`` if ``arg`` is encodable as a value of the ABI type ``typ``. Otherwise, ``False``. """ encoder = self._registry.get_encoder(typ) try: encoder.validate_value(arg) except EncodingError: return False except AttributeError: try: encoder(arg) except EncodingError: return False return True
[ "def", "is_encodable", "(", "self", ",", "typ", ":", "TypeStr", ",", "arg", ":", "Any", ")", "->", "bool", ":", "encoder", "=", "self", ".", "_registry", ".", "get_encoder", "(", "typ", ")", "try", ":", "encoder", ".", "validate_value", "(", "arg", ")", "except", "EncodingError", ":", "return", "False", "except", "AttributeError", ":", "try", ":", "encoder", "(", "arg", ")", "except", "EncodingError", ":", "return", "False", "return", "True" ]
33.615385
20.230769
def map_psmnrcol_to_quantcol(quantcols, psmcols, tablefn_map): """This function yields tuples of table filename, isobaric quant column and if necessary number-of-PSM column""" if not psmcols: for fn in quantcols: for qcol in quantcols[fn]: yield (tablefn_map[fn], qcol) else: for fn in quantcols: for qcol, psmcol in zip(quantcols[fn], psmcols[fn]): yield (tablefn_map[fn], qcol, psmcol)
[ "def", "map_psmnrcol_to_quantcol", "(", "quantcols", ",", "psmcols", ",", "tablefn_map", ")", ":", "if", "not", "psmcols", ":", "for", "fn", "in", "quantcols", ":", "for", "qcol", "in", "quantcols", "[", "fn", "]", ":", "yield", "(", "tablefn_map", "[", "fn", "]", ",", "qcol", ")", "else", ":", "for", "fn", "in", "quantcols", ":", "for", "qcol", ",", "psmcol", "in", "zip", "(", "quantcols", "[", "fn", "]", ",", "psmcols", "[", "fn", "]", ")", ":", "yield", "(", "tablefn_map", "[", "fn", "]", ",", "qcol", ",", "psmcol", ")" ]
42.272727
12.909091
def serialize_to_bundle(self, transformer, path, model_name, serialize_node=True): """ :type transformer: sklearn.tree.tree.BaseDecisionTree :type path: str :type model_name: str :type serialize_node: bool :param transformer: :param path: :param model_name: :return: """ # Define attributes attributes = list() attributes.append(('num_features', transformer.n_features_)) if isinstance(transformer, DecisionTreeClassifier): attributes.append(('num_classes', int(transformer.n_classes_))) inputs = [] outputs = [] if serialize_node: # define node inputs and outputs inputs = [{ "name": transformer.input_features, "port": "features" }] outputs = [{ "name": transformer.prediction_column, "port": "prediction" }] self.serialize(transformer, path, model_name, attributes, inputs, outputs, node=serialize_node) # Serialize tree.json tree_path = "{}/{}.node/tree.json".format(path, model_name) if not serialize_node: tree_path = "{}/{}/tree.json".format(path, model_name) with open(tree_path, 'w') as outfile: self.serialize_tree(transformer, transformer.feature_names, outfile)
[ "def", "serialize_to_bundle", "(", "self", ",", "transformer", ",", "path", ",", "model_name", ",", "serialize_node", "=", "True", ")", ":", "# Define attributes", "attributes", "=", "list", "(", ")", "attributes", ".", "append", "(", "(", "'num_features'", ",", "transformer", ".", "n_features_", ")", ")", "if", "isinstance", "(", "transformer", ",", "DecisionTreeClassifier", ")", ":", "attributes", ".", "append", "(", "(", "'num_classes'", ",", "int", "(", "transformer", ".", "n_classes_", ")", ")", ")", "inputs", "=", "[", "]", "outputs", "=", "[", "]", "if", "serialize_node", ":", "# define node inputs and outputs", "inputs", "=", "[", "{", "\"name\"", ":", "transformer", ".", "input_features", ",", "\"port\"", ":", "\"features\"", "}", "]", "outputs", "=", "[", "{", "\"name\"", ":", "transformer", ".", "prediction_column", ",", "\"port\"", ":", "\"prediction\"", "}", "]", "self", ".", "serialize", "(", "transformer", ",", "path", ",", "model_name", ",", "attributes", ",", "inputs", ",", "outputs", ",", "node", "=", "serialize_node", ")", "# Serialize tree.json", "tree_path", "=", "\"{}/{}.node/tree.json\"", ".", "format", "(", "path", ",", "model_name", ")", "if", "not", "serialize_node", ":", "tree_path", "=", "\"{}/{}/tree.json\"", ".", "format", "(", "path", ",", "model_name", ")", "with", "open", "(", "tree_path", ",", "'w'", ")", "as", "outfile", ":", "self", ".", "serialize_tree", "(", "transformer", ",", "transformer", ".", "feature_names", ",", "outfile", ")" ]
35.375
20.625
def center_plot(self, ax): """ Centers and keep the aspect ratio in a 3D representation. Created to help higher classes to manage cascade representation of multiple lower objects. :param ax: Axes to apply the method. :type ax: mplot3d.Axes3D :returns: None """ # Domain domain = self.get_domain() bound = np.max(domain[1]-domain[0]) centroid = self.get_centroid() pos = np.vstack((centroid-bound/2, centroid+bound/2)) # Axis limits ax.set_xlim3d(left=pos[0,0], right=pos[1,0]) ax.set_ylim3d(bottom=pos[0,1], top=pos[1,1]) ax.set_zlim3d(bottom=pos[0,2], top=pos[1,2])
[ "def", "center_plot", "(", "self", ",", "ax", ")", ":", "# Domain\r", "domain", "=", "self", ".", "get_domain", "(", ")", "bound", "=", "np", ".", "max", "(", "domain", "[", "1", "]", "-", "domain", "[", "0", "]", ")", "centroid", "=", "self", ".", "get_centroid", "(", ")", "pos", "=", "np", ".", "vstack", "(", "(", "centroid", "-", "bound", "/", "2", ",", "centroid", "+", "bound", "/", "2", ")", ")", "# Axis limits\r", "ax", ".", "set_xlim3d", "(", "left", "=", "pos", "[", "0", ",", "0", "]", ",", "right", "=", "pos", "[", "1", ",", "0", "]", ")", "ax", ".", "set_ylim3d", "(", "bottom", "=", "pos", "[", "0", ",", "1", "]", ",", "top", "=", "pos", "[", "1", ",", "1", "]", ")", "ax", ".", "set_zlim3d", "(", "bottom", "=", "pos", "[", "0", ",", "2", "]", ",", "top", "=", "pos", "[", "1", ",", "2", "]", ")" ]
33.181818
15.818182
def sys_mmap_pgoff(self, address, size, prot, flags, fd, offset): """Wrapper for mmap2""" return self.sys_mmap2(address, size, prot, flags, fd, offset)
[ "def", "sys_mmap_pgoff", "(", "self", ",", "address", ",", "size", ",", "prot", ",", "flags", ",", "fd", ",", "offset", ")", ":", "return", "self", ".", "sys_mmap2", "(", "address", ",", "size", ",", "prot", ",", "flags", ",", "fd", ",", "offset", ")" ]
55
18
def setParamValue(self, row, **kwargs): """Sets the arguments as field=val for parameter indexed by *row* :param row: the ith parameter number :type row: int """ param = self._parameters[row] for key, val in kwargs.items(): param[key] = val
[ "def", "setParamValue", "(", "self", ",", "row", ",", "*", "*", "kwargs", ")", ":", "param", "=", "self", ".", "_parameters", "[", "row", "]", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "param", "[", "key", "]", "=", "val" ]
30
9.5

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
0
Add dataset card

Models trained or fine-tuned on kejian/codesearchnet-python-linelen40-full