text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def indx_table(node_dict, tbl_mode=False): """Print Table for dict=formatted list conditionally include numbers.""" nt = PrettyTable() nt.header = False nt.padding_width = 2 nt.border = False clr_num = C_TI + "NUM" clr_name = C_TI + "NAME" clr_state = "STATE" + C_NORM t_lu = {True: [clr_num, "NAME", "REGION", "CLOUD", "SIZE", "PUBLIC IP", clr_state], False: [clr_name, "REGION", "CLOUD", "SIZE", "PUBLIC IP", clr_state]} nt.add_row(t_lu[tbl_mode]) for i, node in node_dict.items(): state = C_STAT[node.state] + node.state + C_NORM inum = C_WARN + str(i) + C_NORM if node.public_ips: n_ip = node.public_ips else: n_ip = "-" r_lu = {True: [inum, node.name, node.zone, node.cloud, node.size, n_ip, state], False: [node.name, node.zone, node.cloud, node.size, n_ip, state]} nt.add_row(r_lu[tbl_mode]) if not tbl_mode: print(nt) else: idx_tbl = nt.get_string() return idx_tbl
[ "def", "indx_table", "(", "node_dict", ",", "tbl_mode", "=", "False", ")", ":", "nt", "=", "PrettyTable", "(", ")", "nt", ".", "header", "=", "False", "nt", ".", "padding_width", "=", "2", "nt", ".", "border", "=", "False", "clr_num", "=", "C_TI", "+", "\"NUM\"", "clr_name", "=", "C_TI", "+", "\"NAME\"", "clr_state", "=", "\"STATE\"", "+", "C_NORM", "t_lu", "=", "{", "True", ":", "[", "clr_num", ",", "\"NAME\"", ",", "\"REGION\"", ",", "\"CLOUD\"", ",", "\"SIZE\"", ",", "\"PUBLIC IP\"", ",", "clr_state", "]", ",", "False", ":", "[", "clr_name", ",", "\"REGION\"", ",", "\"CLOUD\"", ",", "\"SIZE\"", ",", "\"PUBLIC IP\"", ",", "clr_state", "]", "}", "nt", ".", "add_row", "(", "t_lu", "[", "tbl_mode", "]", ")", "for", "i", ",", "node", "in", "node_dict", ".", "items", "(", ")", ":", "state", "=", "C_STAT", "[", "node", ".", "state", "]", "+", "node", ".", "state", "+", "C_NORM", "inum", "=", "C_WARN", "+", "str", "(", "i", ")", "+", "C_NORM", "if", "node", ".", "public_ips", ":", "n_ip", "=", "node", ".", "public_ips", "else", ":", "n_ip", "=", "\"-\"", "r_lu", "=", "{", "True", ":", "[", "inum", ",", "node", ".", "name", ",", "node", ".", "zone", ",", "node", ".", "cloud", ",", "node", ".", "size", ",", "n_ip", ",", "state", "]", ",", "False", ":", "[", "node", ".", "name", ",", "node", ".", "zone", ",", "node", ".", "cloud", ",", "node", ".", "size", ",", "n_ip", ",", "state", "]", "}", "nt", ".", "add_row", "(", "r_lu", "[", "tbl_mode", "]", ")", "if", "not", "tbl_mode", ":", "print", "(", "nt", ")", "else", ":", "idx_tbl", "=", "nt", ".", "get_string", "(", ")", "return", "idx_tbl" ]
35.612903
0.000882
def _is_national_number_suffix_of_other(numobj1, numobj2): """Returns true when one national number is the suffix of the other or both are the same. """ nn1 = str(numobj1.national_number) nn2 = str(numobj2.national_number) # Note that endswith returns True if the numbers are equal. return nn1.endswith(nn2) or nn2.endswith(nn1)
[ "def", "_is_national_number_suffix_of_other", "(", "numobj1", ",", "numobj2", ")", ":", "nn1", "=", "str", "(", "numobj1", ".", "national_number", ")", "nn2", "=", "str", "(", "numobj2", ".", "national_number", ")", "# Note that endswith returns True if the numbers are equal.", "return", "nn1", ".", "endswith", "(", "nn2", ")", "or", "nn2", ".", "endswith", "(", "nn1", ")" ]
43.625
0.002809
def echo_html_fenye_str(rec_num, fenye_num): ''' 生成分页的导航 ''' pagination_num = int(math.ceil(rec_num * 1.0 / 10)) if pagination_num == 1 or pagination_num == 0: fenye_str = '' elif pagination_num > 1: pager_mid, pager_pre, pager_next, pager_last, pager_home = '', '', '', '', '' fenye_str = '<ul class="pagination">' if fenye_num > 1: pager_home = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>First Page</a></li>'''.format('', 1) pager_pre = ''' <li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>Previous Page</a></li>'''.format('', fenye_num - 1) if fenye_num > 5: cur_num = fenye_num - 4 else: cur_num = 1 if pagination_num > 10 and cur_num < pagination_num - 10: show_num = cur_num + 10 else: show_num = pagination_num + 1 for num in range(cur_num, show_num): if num == fenye_num: checkstr = 'active' else: checkstr = '' tmp_str_df = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>{1}</a></li>'''.format(checkstr, num) pager_mid += tmp_str_df if fenye_num < pagination_num: pager_next = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>Next Page</a></li>'''.format('', fenye_num + 1) pager_last = '''<li class="{0}" name='fenye' onclick='change(this);' value='{1}'><a>End Page</a></li>'''.format('', pagination_num) fenye_str += pager_home + pager_pre + pager_mid + pager_next + pager_last fenye_str += '</ul>' else: return '' return fenye_str
[ "def", "echo_html_fenye_str", "(", "rec_num", ",", "fenye_num", ")", ":", "pagination_num", "=", "int", "(", "math", ".", "ceil", "(", "rec_num", "*", "1.0", "/", "10", ")", ")", "if", "pagination_num", "==", "1", "or", "pagination_num", "==", "0", ":", "fenye_str", "=", "''", "elif", "pagination_num", ">", "1", ":", "pager_mid", ",", "pager_pre", ",", "pager_next", ",", "pager_last", ",", "pager_home", "=", "''", ",", "''", ",", "''", ",", "''", ",", "''", "fenye_str", "=", "'<ul class=\"pagination\">'", "if", "fenye_num", ">", "1", ":", "pager_home", "=", "'''<li class=\"{0}\" name='fenye' onclick='change(this);'\n value='{1}'><a>First Page</a></li>'''", ".", "format", "(", "''", ",", "1", ")", "pager_pre", "=", "''' <li class=\"{0}\" name='fenye' onclick='change(this);'\n value='{1}'><a>Previous Page</a></li>'''", ".", "format", "(", "''", ",", "fenye_num", "-", "1", ")", "if", "fenye_num", ">", "5", ":", "cur_num", "=", "fenye_num", "-", "4", "else", ":", "cur_num", "=", "1", "if", "pagination_num", ">", "10", "and", "cur_num", "<", "pagination_num", "-", "10", ":", "show_num", "=", "cur_num", "+", "10", "else", ":", "show_num", "=", "pagination_num", "+", "1", "for", "num", "in", "range", "(", "cur_num", ",", "show_num", ")", ":", "if", "num", "==", "fenye_num", ":", "checkstr", "=", "'active'", "else", ":", "checkstr", "=", "''", "tmp_str_df", "=", "'''<li class=\"{0}\" name='fenye' onclick='change(this);'\n value='{1}'><a>{1}</a></li>'''", ".", "format", "(", "checkstr", ",", "num", ")", "pager_mid", "+=", "tmp_str_df", "if", "fenye_num", "<", "pagination_num", ":", "pager_next", "=", "'''<li class=\"{0}\" name='fenye' onclick='change(this);'\n value='{1}'><a>Next Page</a></li>'''", ".", "format", "(", "''", ",", "fenye_num", "+", "1", ")", "pager_last", "=", "'''<li class=\"{0}\" name='fenye' onclick='change(this);'\n value='{1}'><a>End Page</a></li>'''", ".", "format", "(", "''", ",", "pagination_num", ")", "fenye_str", "+=", "pager_home", "+", "pager_pre", "+", "pager_mid", "+", "pager_next", "+", "pager_last", "fenye_str", "+=", "'</ul>'", "else", ":", "return", "''", "return", "fenye_str" ]
32.907407
0.004918
def import_ecdsakey_from_public_pem(pem, scheme='ecdsa-sha2-nistp256'): """ <Purpose> Generate an ECDSA key object from 'pem'. In addition, a keyid identifier for the ECDSA key is generated. The object returned conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA' and has the form: {'keytype': 'ecdsa-sha2-nistp256', 'scheme': 'ecdsa-sha2-nistp256', 'keyid': keyid, 'keyval': {'public': '-----BEGIN PUBLIC KEY----- ...', 'private': ''}} The public portion of the ECDSA key is a string in PEM format. >>> ecdsa_key = generate_ecdsa_key() >>> public = ecdsa_key['keyval']['public'] >>> ecdsa_key['keyval']['private'] = '' >>> scheme = ecdsa_key['scheme'] >>> ecdsa_key2 = import_ecdsakey_from_public_pem(public, scheme) >>> securesystemslib.formats.ECDSAKEY_SCHEMA.matches(ecdsa_key) True >>> securesystemslib.formats.ECDSAKEY_SCHEMA.matches(ecdsa_key2) True <Arguments> pem: A string in PEM format (it should contain a public ECDSA key). scheme: The signature scheme used by the imported key. <Exceptions> securesystemslib.exceptions.FormatError, if 'pem' is improperly formatted. <Side Effects> Only the public portion of the PEM is extracted. Leading or trailing whitespace is not included in the PEM string stored in the rsakey object returned. <Returns> A dictionary containing the ECDSA keys and other identifying information. Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'. """ # Does 'pem' have the correct format? # This check will ensure arguments has the appropriate number # of objects and object types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if the check fails. securesystemslib.formats.PEMECDSA_SCHEMA.check_match(pem) # Is 'scheme' properly formatted? securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme) # Ensure the PEM string has a public header and footer. Although a simple # validation of 'pem' is performed here, a fully valid PEM string is needed # later to successfully verify signatures. Performing stricter validation of # PEMs are left to the external libraries that use 'pem'. if is_pem_public(pem): public_pem = extract_pem(pem, private_pem=False) else: raise securesystemslib.exceptions.FormatError('Invalid public' ' pem: ' + repr(pem)) # Begin building the ECDSA key dictionary. ecdsakey_dict = {} keytype = 'ecdsa-sha2-nistp256' # Generate the keyid of the ECDSA key. 'key_value' corresponds to the # 'keyval' entry of the 'ECDSAKEY_SCHEMA' dictionary. The private key # information is not included in the generation of the 'keyid' identifier. # Convert any '\r\n' (e.g., Windows) newline characters to '\n' so that a # consistent keyid is generated. key_value = {'public': public_pem.replace('\r\n', '\n'), 'private': ''} keyid = _get_keyid(keytype, scheme, key_value) ecdsakey_dict['keytype'] = keytype ecdsakey_dict['scheme'] = scheme ecdsakey_dict['keyid'] = keyid ecdsakey_dict['keyval'] = key_value # Add "keyid_hash_algorithms" so that equal ECDSA keys with different keyids # can be associated using supported keyid_hash_algorithms. ecdsakey_dict['keyid_hash_algorithms'] = \ securesystemslib.settings.HASH_ALGORITHMS return ecdsakey_dict
[ "def", "import_ecdsakey_from_public_pem", "(", "pem", ",", "scheme", "=", "'ecdsa-sha2-nistp256'", ")", ":", "# Does 'pem' have the correct format?", "# This check will ensure arguments has the appropriate number", "# of objects and object types, and that all dict keys are properly named.", "# Raise 'securesystemslib.exceptions.FormatError' if the check fails.", "securesystemslib", ".", "formats", ".", "PEMECDSA_SCHEMA", ".", "check_match", "(", "pem", ")", "# Is 'scheme' properly formatted?", "securesystemslib", ".", "formats", ".", "ECDSA_SCHEME_SCHEMA", ".", "check_match", "(", "scheme", ")", "# Ensure the PEM string has a public header and footer. Although a simple", "# validation of 'pem' is performed here, a fully valid PEM string is needed", "# later to successfully verify signatures. Performing stricter validation of", "# PEMs are left to the external libraries that use 'pem'.", "if", "is_pem_public", "(", "pem", ")", ":", "public_pem", "=", "extract_pem", "(", "pem", ",", "private_pem", "=", "False", ")", "else", ":", "raise", "securesystemslib", ".", "exceptions", ".", "FormatError", "(", "'Invalid public'", "' pem: '", "+", "repr", "(", "pem", ")", ")", "# Begin building the ECDSA key dictionary.", "ecdsakey_dict", "=", "{", "}", "keytype", "=", "'ecdsa-sha2-nistp256'", "# Generate the keyid of the ECDSA key. 'key_value' corresponds to the", "# 'keyval' entry of the 'ECDSAKEY_SCHEMA' dictionary. The private key", "# information is not included in the generation of the 'keyid' identifier.", "# Convert any '\\r\\n' (e.g., Windows) newline characters to '\\n' so that a", "# consistent keyid is generated.", "key_value", "=", "{", "'public'", ":", "public_pem", ".", "replace", "(", "'\\r\\n'", ",", "'\\n'", ")", ",", "'private'", ":", "''", "}", "keyid", "=", "_get_keyid", "(", "keytype", ",", "scheme", ",", "key_value", ")", "ecdsakey_dict", "[", "'keytype'", "]", "=", "keytype", "ecdsakey_dict", "[", "'scheme'", "]", "=", "scheme", "ecdsakey_dict", "[", "'keyid'", "]", "=", "keyid", "ecdsakey_dict", "[", "'keyval'", "]", "=", "key_value", "# Add \"keyid_hash_algorithms\" so that equal ECDSA keys with different keyids", "# can be associated using supported keyid_hash_algorithms.", "ecdsakey_dict", "[", "'keyid_hash_algorithms'", "]", "=", "securesystemslib", ".", "settings", ".", "HASH_ALGORITHMS", "return", "ecdsakey_dict" ]
37.033333
0.009936
async def send_activities(self, context: TurnContext, activities: List[Activity]): """ Logs a series of activities to the console. :param context: :param activities: :return: """ if context is None: raise TypeError('ConsoleAdapter.send_activities(): `context` argument cannot be None.') if type(activities) != list: raise TypeError('ConsoleAdapter.send_activities(): `activities` argument must be a list.') if len(activities) == 0: raise ValueError('ConsoleAdapter.send_activities(): `activities` argument cannot have a length of 0.') async def next_activity(i: int): responses = [] if i < len(activities): responses.append(ResourceResponse()) a = activities[i] if a.type == 'delay': await asyncio.sleep(a.delay) await next_activity(i + 1) elif a.type == ActivityTypes.message: if a.attachments is not None and len(a.attachments) > 0: append = '(1 attachment)' if len(a.attachments) == 1 else f'({len(a.attachments)} attachments)' print(f'{a.text} {append}') else: print(a.text) await next_activity(i + 1) else: print(f'[{a.type}]') await next_activity(i + 1) else: return responses await next_activity(0)
[ "async", "def", "send_activities", "(", "self", ",", "context", ":", "TurnContext", ",", "activities", ":", "List", "[", "Activity", "]", ")", ":", "if", "context", "is", "None", ":", "raise", "TypeError", "(", "'ConsoleAdapter.send_activities(): `context` argument cannot be None.'", ")", "if", "type", "(", "activities", ")", "!=", "list", ":", "raise", "TypeError", "(", "'ConsoleAdapter.send_activities(): `activities` argument must be a list.'", ")", "if", "len", "(", "activities", ")", "==", "0", ":", "raise", "ValueError", "(", "'ConsoleAdapter.send_activities(): `activities` argument cannot have a length of 0.'", ")", "async", "def", "next_activity", "(", "i", ":", "int", ")", ":", "responses", "=", "[", "]", "if", "i", "<", "len", "(", "activities", ")", ":", "responses", ".", "append", "(", "ResourceResponse", "(", ")", ")", "a", "=", "activities", "[", "i", "]", "if", "a", ".", "type", "==", "'delay'", ":", "await", "asyncio", ".", "sleep", "(", "a", ".", "delay", ")", "await", "next_activity", "(", "i", "+", "1", ")", "elif", "a", ".", "type", "==", "ActivityTypes", ".", "message", ":", "if", "a", ".", "attachments", "is", "not", "None", "and", "len", "(", "a", ".", "attachments", ")", ">", "0", ":", "append", "=", "'(1 attachment)'", "if", "len", "(", "a", ".", "attachments", ")", "==", "1", "else", "f'({len(a.attachments)} attachments)'", "print", "(", "f'{a.text} {append}'", ")", "else", ":", "print", "(", "a", ".", "text", ")", "await", "next_activity", "(", "i", "+", "1", ")", "else", ":", "print", "(", "f'[{a.type}]'", ")", "await", "next_activity", "(", "i", "+", "1", ")", "else", ":", "return", "responses", "await", "next_activity", "(", "0", ")" ]
40.526316
0.004439
def next(self): """Return the next page. The page label is defined in ``settings.NEXT_LABEL``. Return an empty string if current page is the last. """ if self._page.has_next(): return self._endless_page( self._page.next_page_number(), label=settings.NEXT_LABEL) return ''
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_page", ".", "has_next", "(", ")", ":", "return", "self", ".", "_endless_page", "(", "self", ".", "_page", ".", "next_page_number", "(", ")", ",", "label", "=", "settings", ".", "NEXT_LABEL", ")", "return", "''" ]
32.181818
0.005495
def unpack_xml(text) -> ET.ElementTree: """Unpack an XML string from AniDB API.""" etree: ET.ElementTree = ET.parse(io.StringIO(text)) _check_for_errors(etree) return etree
[ "def", "unpack_xml", "(", "text", ")", "->", "ET", ".", "ElementTree", ":", "etree", ":", "ET", ".", "ElementTree", "=", "ET", ".", "parse", "(", "io", ".", "StringIO", "(", "text", ")", ")", "_check_for_errors", "(", "etree", ")", "return", "etree" ]
36.8
0.005319
def RemoveMultiLineComments(filename, lines, error): """Removes multiline (c-style) comments from lines.""" lineix = 0 while lineix < len(lines): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if lineix_begin >= len(lines): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if lineix_end >= len(lines): error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) lineix = lineix_end + 1
[ "def", "RemoveMultiLineComments", "(", "filename", ",", "lines", ",", "error", ")", ":", "lineix", "=", "0", "while", "lineix", "<", "len", "(", "lines", ")", ":", "lineix_begin", "=", "FindNextMultiLineCommentStart", "(", "lines", ",", "lineix", ")", "if", "lineix_begin", ">=", "len", "(", "lines", ")", ":", "return", "lineix_end", "=", "FindNextMultiLineCommentEnd", "(", "lines", ",", "lineix_begin", ")", "if", "lineix_end", ">=", "len", "(", "lines", ")", ":", "error", "(", "filename", ",", "lineix_begin", "+", "1", ",", "'readability/multiline_comment'", ",", "5", ",", "'Could not find end of multi-line comment'", ")", "return", "RemoveMultiLineCommentsFromRange", "(", "lines", ",", "lineix_begin", ",", "lineix_end", "+", "1", ")", "lineix", "=", "lineix_end", "+", "1" ]
42.571429
0.011494
def get_module_name(self, path_args): """returns the module_name and remaining path args. return -- tuple -- (module_name, path_args)""" controller_prefix = self.controller_prefix cset = self.module_names module_name = controller_prefix mod_name = module_name while path_args: mod_name += "." + path_args[0] if mod_name in cset: module_name = mod_name path_args.pop(0) else: break return module_name, path_args
[ "def", "get_module_name", "(", "self", ",", "path_args", ")", ":", "controller_prefix", "=", "self", ".", "controller_prefix", "cset", "=", "self", ".", "module_names", "module_name", "=", "controller_prefix", "mod_name", "=", "module_name", "while", "path_args", ":", "mod_name", "+=", "\".\"", "+", "path_args", "[", "0", "]", "if", "mod_name", "in", "cset", ":", "module_name", "=", "mod_name", "path_args", ".", "pop", "(", "0", ")", "else", ":", "break", "return", "module_name", ",", "path_args" ]
32
0.003571
def _add_parser_arguments_analyze(self, subparsers): """Create a parser for the 'analyze' subcommand. """ lyze_pars = subparsers.add_parser( "analyze", help="Perform basic analysis on this catalog.") lyze_pars.add_argument( '--count', '-c', dest='count', default=False, action='store_true', help='Determine counts of entries, files, etc.') return lyze_pars
[ "def", "_add_parser_arguments_analyze", "(", "self", ",", "subparsers", ")", ":", "lyze_pars", "=", "subparsers", ".", "add_parser", "(", "\"analyze\"", ",", "help", "=", "\"Perform basic analysis on this catalog.\"", ")", "lyze_pars", ".", "add_argument", "(", "'--count'", ",", "'-c'", ",", "dest", "=", "'count'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "'Determine counts of entries, files, etc.'", ")", "return", "lyze_pars" ]
34.307692
0.004367
def wr_dat_files(self, expanded=False, write_dir=''): """ Write each of the specified dat files """ # Get the set of dat files to be written, and # the channels to be written to each file. file_names, dat_channels = describe_list_indices(self.file_name) # Get the fmt and byte offset corresponding to each dat file DAT_FMTS = {} dat_offsets = {} for fn in file_names: DAT_FMTS[fn] = self.fmt[dat_channels[fn][0]] # byte_offset may not be present if self.byte_offset is None: dat_offsets[fn] = 0 else: dat_offsets[fn] = self.byte_offset[dat_channels[fn][0]] # Write the dat files if expanded: for fn in file_names: wr_dat_file(fn, DAT_FMTS[fn], None , dat_offsets[fn], True, [self.e_d_signal[ch] for ch in dat_channels[fn]], self.samps_per_frame, write_dir=write_dir) else: # Create a copy to prevent overwrite dsig = self.d_signal.copy() for fn in file_names: wr_dat_file(fn, DAT_FMTS[fn], dsig[:, dat_channels[fn][0]:dat_channels[fn][-1]+1], dat_offsets[fn], write_dir=write_dir)
[ "def", "wr_dat_files", "(", "self", ",", "expanded", "=", "False", ",", "write_dir", "=", "''", ")", ":", "# Get the set of dat files to be written, and", "# the channels to be written to each file.", "file_names", ",", "dat_channels", "=", "describe_list_indices", "(", "self", ".", "file_name", ")", "# Get the fmt and byte offset corresponding to each dat file", "DAT_FMTS", "=", "{", "}", "dat_offsets", "=", "{", "}", "for", "fn", "in", "file_names", ":", "DAT_FMTS", "[", "fn", "]", "=", "self", ".", "fmt", "[", "dat_channels", "[", "fn", "]", "[", "0", "]", "]", "# byte_offset may not be present", "if", "self", ".", "byte_offset", "is", "None", ":", "dat_offsets", "[", "fn", "]", "=", "0", "else", ":", "dat_offsets", "[", "fn", "]", "=", "self", ".", "byte_offset", "[", "dat_channels", "[", "fn", "]", "[", "0", "]", "]", "# Write the dat files", "if", "expanded", ":", "for", "fn", "in", "file_names", ":", "wr_dat_file", "(", "fn", ",", "DAT_FMTS", "[", "fn", "]", ",", "None", ",", "dat_offsets", "[", "fn", "]", ",", "True", ",", "[", "self", ".", "e_d_signal", "[", "ch", "]", "for", "ch", "in", "dat_channels", "[", "fn", "]", "]", ",", "self", ".", "samps_per_frame", ",", "write_dir", "=", "write_dir", ")", "else", ":", "# Create a copy to prevent overwrite", "dsig", "=", "self", ".", "d_signal", ".", "copy", "(", ")", "for", "fn", "in", "file_names", ":", "wr_dat_file", "(", "fn", ",", "DAT_FMTS", "[", "fn", "]", ",", "dsig", "[", ":", ",", "dat_channels", "[", "fn", "]", "[", "0", "]", ":", "dat_channels", "[", "fn", "]", "[", "-", "1", "]", "+", "1", "]", ",", "dat_offsets", "[", "fn", "]", ",", "write_dir", "=", "write_dir", ")" ]
39.029412
0.002941
def set_staff_url(parser, token): """ Assign an URL to be the "admin link" of this page. Example:: {% set_staff_url %}{% url 'admin:fluent_pages_page_change' page.id %}{% end_set_staff_url %} """ nodelist = parser.parse(('end_set_staff_url',)) parser.delete_first_token() return AdminUrlNode(nodelist)
[ "def", "set_staff_url", "(", "parser", ",", "token", ")", ":", "nodelist", "=", "parser", ".", "parse", "(", "(", "'end_set_staff_url'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "return", "AdminUrlNode", "(", "nodelist", ")" ]
32.9
0.005917
def trace_api(self): """ Helper for trace-related API calls. See https://cloud.google.com/trace/docs/reference/v2/rpc/google.devtools. cloudtrace.v2 """ if self._trace_api is None: self._trace_api = make_trace_api(self) return self._trace_api
[ "def", "trace_api", "(", "self", ")", ":", "if", "self", ".", "_trace_api", "is", "None", ":", "self", ".", "_trace_api", "=", "make_trace_api", "(", "self", ")", "return", "self", ".", "_trace_api" ]
28.090909
0.00627
def FilterItems(self, filterFn, key=None): """Filter items within a Reservoir, using a filtering function. Args: filterFn: A function that returns True for the items to be kept. key: An optional bucket key to filter. If not specified, will filter all all buckets. Returns: The number of items removed. """ with self._mutex: if key: if key in self._buckets: return self._buckets[key].FilterItems(filterFn) else: return 0 else: return sum(bucket.FilterItems(filterFn) for bucket in self._buckets.values())
[ "def", "FilterItems", "(", "self", ",", "filterFn", ",", "key", "=", "None", ")", ":", "with", "self", ".", "_mutex", ":", "if", "key", ":", "if", "key", "in", "self", ".", "_buckets", ":", "return", "self", ".", "_buckets", "[", "key", "]", ".", "FilterItems", "(", "filterFn", ")", "else", ":", "return", "0", "else", ":", "return", "sum", "(", "bucket", ".", "FilterItems", "(", "filterFn", ")", "for", "bucket", "in", "self", ".", "_buckets", ".", "values", "(", ")", ")" ]
30.35
0.007987
def _validate_namespaces(namespaces): """Validate wildcards and renaming in namespaces. Target namespaces should have the same number of wildcards as the source. No target namespaces overlap exactly with each other. Logs a warning when wildcard namespaces have a chance of overlapping. """ for source, namespace in namespaces.items(): target = namespace.dest_name _validate_namespace(source) _validate_namespace(target) if source.count("*") > 1 or target.count("*") > 1: raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' cannot contain more " "than one '*' character." % (source, target) ) if source.count("*") != target.count("*"): raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' must contain the " "same number of '*' characters." % (source, target) ) if "*" not in source: continue # Make sure that wildcards are not moved from database name to # collection name or vice versa, eg "db*.foo" => "db.foo_*" if ( wildcard_in_db(source) and not wildcard_in_db(target) or (not wildcard_in_db(source) and wildcard_in_db(target)) ): raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' is invalid. A '*' " "that appears in the source database name must also appear" "in the target database name. A '*' that appears in the " "source collection name must also appear in the target " "collection name" % (source, target) ) for source1, source2 in combinations(namespaces.keys(), 2): if wildcards_overlap(source1, source2): LOG.warning( 'Namespaces "%s" and "%s" may match the ' "same source namespace.", source1, source2, ) target1 = namespaces[source1].dest_name target2 = namespaces[source2].dest_name if target1 == target2: raise errors.InvalidConfiguration( "Multiple namespaces cannot be combined into one target " "namespace. Trying to map '%s' to '%s' but '%s' already " "corresponds to '%s' in the target system." % (source2, target2, source1, target1) ) if wildcards_overlap(target1, target2): LOG.warning( "Multiple namespaces cannot be combined into one target " "namespace. Mapping from '%s' to '%s' might overlap " "with mapping from '%s' to '%s'." % (source2, target2, source1, target1) )
[ "def", "_validate_namespaces", "(", "namespaces", ")", ":", "for", "source", ",", "namespace", "in", "namespaces", ".", "items", "(", ")", ":", "target", "=", "namespace", ".", "dest_name", "_validate_namespace", "(", "source", ")", "_validate_namespace", "(", "target", ")", "if", "source", ".", "count", "(", "\"*\"", ")", ">", "1", "or", "target", ".", "count", "(", "\"*\"", ")", ">", "1", ":", "raise", "errors", ".", "InvalidConfiguration", "(", "\"The namespace mapping from '%s' to '%s' cannot contain more \"", "\"than one '*' character.\"", "%", "(", "source", ",", "target", ")", ")", "if", "source", ".", "count", "(", "\"*\"", ")", "!=", "target", ".", "count", "(", "\"*\"", ")", ":", "raise", "errors", ".", "InvalidConfiguration", "(", "\"The namespace mapping from '%s' to '%s' must contain the \"", "\"same number of '*' characters.\"", "%", "(", "source", ",", "target", ")", ")", "if", "\"*\"", "not", "in", "source", ":", "continue", "# Make sure that wildcards are not moved from database name to", "# collection name or vice versa, eg \"db*.foo\" => \"db.foo_*\"", "if", "(", "wildcard_in_db", "(", "source", ")", "and", "not", "wildcard_in_db", "(", "target", ")", "or", "(", "not", "wildcard_in_db", "(", "source", ")", "and", "wildcard_in_db", "(", "target", ")", ")", ")", ":", "raise", "errors", ".", "InvalidConfiguration", "(", "\"The namespace mapping from '%s' to '%s' is invalid. A '*' \"", "\"that appears in the source database name must also appear\"", "\"in the target database name. A '*' that appears in the \"", "\"source collection name must also appear in the target \"", "\"collection name\"", "%", "(", "source", ",", "target", ")", ")", "for", "source1", ",", "source2", "in", "combinations", "(", "namespaces", ".", "keys", "(", ")", ",", "2", ")", ":", "if", "wildcards_overlap", "(", "source1", ",", "source2", ")", ":", "LOG", ".", "warning", "(", "'Namespaces \"%s\" and \"%s\" may match the '", "\"same source namespace.\"", ",", "source1", ",", "source2", ",", ")", "target1", "=", "namespaces", "[", "source1", "]", ".", "dest_name", "target2", "=", "namespaces", "[", "source2", "]", ".", "dest_name", "if", "target1", "==", "target2", ":", "raise", "errors", ".", "InvalidConfiguration", "(", "\"Multiple namespaces cannot be combined into one target \"", "\"namespace. Trying to map '%s' to '%s' but '%s' already \"", "\"corresponds to '%s' in the target system.\"", "%", "(", "source2", ",", "target2", ",", "source1", ",", "target1", ")", ")", "if", "wildcards_overlap", "(", "target1", ",", "target2", ")", ":", "LOG", ".", "warning", "(", "\"Multiple namespaces cannot be combined into one target \"", "\"namespace. Mapping from '%s' to '%s' might overlap \"", "\"with mapping from '%s' to '%s'.\"", "%", "(", "source2", ",", "target2", ",", "source1", ",", "target1", ")", ")" ]
45.983333
0.001065
def check_job_status(self, key=JobDetails.topkey, fail_running=False, fail_pending=False, force_check=False): """Check the status of a particular job By default this checks the status of the top-level job, but can by made to drill into the sub-jobs. Parameters ---------- key : str Key associated to the job in question fail_running : `bool` If True, consider running jobs as failed fail_pending : `bool` If True, consider pending jobs as failed force_check : `bool` Drill into status of individual jobs` instead of using top level job only Returns ------- status : `JobStatus` Job status flag """ if key in self.jobs: status = self.jobs[key].status if status in [JobStatus.unknown, JobStatus.ready, JobStatus.pending, JobStatus.running] or force_check: status = self._interface.check_job(self.jobs[key]) if status == JobStatus.running and fail_running: status = JobStatus.failed if status == JobStatus.pending and fail_pending: status = JobStatus.failed self.jobs[key].status = status if self._job_archive: self._job_archive.register_job(self.jobs[key]) else: status = JobStatus.no_job return status
[ "def", "check_job_status", "(", "self", ",", "key", "=", "JobDetails", ".", "topkey", ",", "fail_running", "=", "False", ",", "fail_pending", "=", "False", ",", "force_check", "=", "False", ")", ":", "if", "key", "in", "self", ".", "jobs", ":", "status", "=", "self", ".", "jobs", "[", "key", "]", ".", "status", "if", "status", "in", "[", "JobStatus", ".", "unknown", ",", "JobStatus", ".", "ready", ",", "JobStatus", ".", "pending", ",", "JobStatus", ".", "running", "]", "or", "force_check", ":", "status", "=", "self", ".", "_interface", ".", "check_job", "(", "self", ".", "jobs", "[", "key", "]", ")", "if", "status", "==", "JobStatus", ".", "running", "and", "fail_running", ":", "status", "=", "JobStatus", ".", "failed", "if", "status", "==", "JobStatus", ".", "pending", "and", "fail_pending", ":", "status", "=", "JobStatus", ".", "failed", "self", ".", "jobs", "[", "key", "]", ".", "status", "=", "status", "if", "self", ".", "_job_archive", ":", "self", ".", "_job_archive", ".", "register_job", "(", "self", ".", "jobs", "[", "key", "]", ")", "else", ":", "status", "=", "JobStatus", ".", "no_job", "return", "status" ]
33.355556
0.003883
def hil_rc_inputs_raw_send(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi, force_mavlink1=False): ''' Sent from simulation to autopilot. The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) chan1_raw : RC channel 1 value, in microseconds (uint16_t) chan2_raw : RC channel 2 value, in microseconds (uint16_t) chan3_raw : RC channel 3 value, in microseconds (uint16_t) chan4_raw : RC channel 4 value, in microseconds (uint16_t) chan5_raw : RC channel 5 value, in microseconds (uint16_t) chan6_raw : RC channel 6 value, in microseconds (uint16_t) chan7_raw : RC channel 7 value, in microseconds (uint16_t) chan8_raw : RC channel 8 value, in microseconds (uint16_t) chan9_raw : RC channel 9 value, in microseconds (uint16_t) chan10_raw : RC channel 10 value, in microseconds (uint16_t) chan11_raw : RC channel 11 value, in microseconds (uint16_t) chan12_raw : RC channel 12 value, in microseconds (uint16_t) rssi : Receive signal strength indicator, 0: 0%, 255: 100% (uint8_t) ''' return self.send(self.hil_rc_inputs_raw_encode(time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi), force_mavlink1=force_mavlink1)
[ "def", "hil_rc_inputs_raw_send", "(", "self", ",", "time_usec", ",", "chan1_raw", ",", "chan2_raw", ",", "chan3_raw", ",", "chan4_raw", ",", "chan5_raw", ",", "chan6_raw", ",", "chan7_raw", ",", "chan8_raw", ",", "chan9_raw", ",", "chan10_raw", ",", "chan11_raw", ",", "chan12_raw", ",", "rssi", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "hil_rc_inputs_raw_encode", "(", "time_usec", ",", "chan1_raw", ",", "chan2_raw", ",", "chan3_raw", ",", "chan4_raw", ",", "chan5_raw", ",", "chan6_raw", ",", "chan7_raw", ",", "chan8_raw", ",", "chan9_raw", ",", "chan10_raw", ",", "chan11_raw", ",", "chan12_raw", ",", "rssi", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
84.88
0.008854
def rangeChange(self, pw, ranges): """Adjusts the stimulus signal to keep it at the top of a plot, after any ajustment to the axes ranges takes place. This is a slot for the undocumented pyqtgraph signal sigRangeChanged. From what I can tell the arguments are: :param pw: reference to the emitting object (plot widget in my case) :type pw: object :param ranges: I am only interested when this turns out to be a nested list of axis bounds :type ranges: object """ if hasattr(ranges, '__iter__'): # adjust the stim signal so that it falls in the correct range yrange_size = ranges[1][1] - ranges[1][0] stim_x, stim_y = self.stimPlot.getData() if stim_y is not None: stim_height = yrange_size*STIM_HEIGHT # take it to 0 stim_y = stim_y - np.amin(stim_y) # normalize if np.amax(stim_y) != 0: stim_y = stim_y/np.amax(stim_y) # scale for new size stim_y = stim_y*stim_height # raise to right place in plot stim_y = stim_y + (ranges[1][1] - (stim_height*1.1 + (stim_height*0.2))) self.stimPlot.setData(stim_x, stim_y) # rmax = self.rasterTop*yrange_size + ranges[1][0] # rmin = self.rasterBottom*yrange_size + ranges[1][0] self.updateRasterBounds()
[ "def", "rangeChange", "(", "self", ",", "pw", ",", "ranges", ")", ":", "if", "hasattr", "(", "ranges", ",", "'__iter__'", ")", ":", "# adjust the stim signal so that it falls in the correct range", "yrange_size", "=", "ranges", "[", "1", "]", "[", "1", "]", "-", "ranges", "[", "1", "]", "[", "0", "]", "stim_x", ",", "stim_y", "=", "self", ".", "stimPlot", ".", "getData", "(", ")", "if", "stim_y", "is", "not", "None", ":", "stim_height", "=", "yrange_size", "*", "STIM_HEIGHT", "# take it to 0", "stim_y", "=", "stim_y", "-", "np", ".", "amin", "(", "stim_y", ")", "# normalize", "if", "np", ".", "amax", "(", "stim_y", ")", "!=", "0", ":", "stim_y", "=", "stim_y", "/", "np", ".", "amax", "(", "stim_y", ")", "# scale for new size", "stim_y", "=", "stim_y", "*", "stim_height", "# raise to right place in plot", "stim_y", "=", "stim_y", "+", "(", "ranges", "[", "1", "]", "[", "1", "]", "-", "(", "stim_height", "*", "1.1", "+", "(", "stim_height", "*", "0.2", ")", ")", ")", "self", ".", "stimPlot", ".", "setData", "(", "stim_x", ",", "stim_y", ")", "# rmax = self.rasterTop*yrange_size + ranges[1][0]", "# rmin = self.rasterBottom*yrange_size + ranges[1][0]", "self", ".", "updateRasterBounds", "(", ")" ]
47
0.00269
def _fix_valid_indices(cls, valid_indices, insertion_index, dim): """Add indices for H&S inserted elements.""" # TODO: make this accept an immutable sequence for valid_indices # (a tuple) and return an immutable sequence rather than mutating an # argument. indices = np.array(sorted(valid_indices[dim])) slice_index = np.sum(indices <= insertion_index) indices[slice_index:] += 1 indices = np.insert(indices, slice_index, insertion_index + 1) valid_indices[dim] = indices.tolist() return valid_indices
[ "def", "_fix_valid_indices", "(", "cls", ",", "valid_indices", ",", "insertion_index", ",", "dim", ")", ":", "# TODO: make this accept an immutable sequence for valid_indices", "# (a tuple) and return an immutable sequence rather than mutating an", "# argument.", "indices", "=", "np", ".", "array", "(", "sorted", "(", "valid_indices", "[", "dim", "]", ")", ")", "slice_index", "=", "np", ".", "sum", "(", "indices", "<=", "insertion_index", ")", "indices", "[", "slice_index", ":", "]", "+=", "1", "indices", "=", "np", ".", "insert", "(", "indices", ",", "slice_index", ",", "insertion_index", "+", "1", ")", "valid_indices", "[", "dim", "]", "=", "indices", ".", "tolist", "(", ")", "return", "valid_indices" ]
51.909091
0.003442
def fromPandas(cls, df): """ Create a :class:`~amplpy.DataFrame` from a pandas DataFrame. """ assert pd is not None if isinstance(df, pd.Series): df = pd.DataFrame(df) else: assert isinstance(df, pd.DataFrame) keys = [ key if isinstance(key, tuple) else (key,) for key in df.index.tolist() ] index = [ ('index{}'.format(i), cindex) for i, cindex in enumerate(zip(*keys)) ] columns = [ (str(cname), df[cname].tolist()) for cname in df.columns.tolist() ] return cls(index=index, columns=columns)
[ "def", "fromPandas", "(", "cls", ",", "df", ")", ":", "assert", "pd", "is", "not", "None", "if", "isinstance", "(", "df", ",", "pd", ".", "Series", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "df", ")", "else", ":", "assert", "isinstance", "(", "df", ",", "pd", ".", "DataFrame", ")", "keys", "=", "[", "key", "if", "isinstance", "(", "key", ",", "tuple", ")", "else", "(", "key", ",", ")", "for", "key", "in", "df", ".", "index", ".", "tolist", "(", ")", "]", "index", "=", "[", "(", "'index{}'", ".", "format", "(", "i", ")", ",", "cindex", ")", "for", "i", ",", "cindex", "in", "enumerate", "(", "zip", "(", "*", "keys", ")", ")", "]", "columns", "=", "[", "(", "str", "(", "cname", ")", ",", "df", "[", "cname", "]", ".", "tolist", "(", ")", ")", "for", "cname", "in", "df", ".", "columns", ".", "tolist", "(", ")", "]", "return", "cls", "(", "index", "=", "index", ",", "columns", "=", "columns", ")" ]
30.545455
0.002886
def from_times(cls, times, delta_t=DEFAULT_OBSERVATION_TIME): """ Create a TimeMOC from a `astropy.time.Time` Parameters ---------- times : `astropy.time.Time` astropy observation times delta_t : `astropy.time.TimeDelta`, optional the duration of one observation. It is set to 30 min by default. This data is used to compute the more efficient TimeMOC order to represent the observations (Best order = the less precise order which is able to discriminate two observations separated by ``delta_t``). Returns ------- time_moc : `~mocpy.tmoc.TimeMOC` """ times_arr = np.asarray(times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int) intervals_arr = np.vstack((times_arr, times_arr + 1)).T # degrade the TimeMoc to the order computer from ``delta_t`` order = TimeMOC.time_resolution_to_order(delta_t) return TimeMOC(IntervalSet(intervals_arr)).degrade_to_order(order)
[ "def", "from_times", "(", "cls", ",", "times", ",", "delta_t", "=", "DEFAULT_OBSERVATION_TIME", ")", ":", "times_arr", "=", "np", ".", "asarray", "(", "times", ".", "jd", "*", "TimeMOC", ".", "DAY_MICRO_SEC", ",", "dtype", "=", "int", ")", "intervals_arr", "=", "np", ".", "vstack", "(", "(", "times_arr", ",", "times_arr", "+", "1", ")", ")", ".", "T", "# degrade the TimeMoc to the order computer from ``delta_t``", "order", "=", "TimeMOC", ".", "time_resolution_to_order", "(", "delta_t", ")", "return", "TimeMOC", "(", "IntervalSet", "(", "intervals_arr", ")", ")", ".", "degrade_to_order", "(", "order", ")" ]
43.565217
0.003906
def swipe(self, x1: int, y1: int, x2: int, y2: int, duration: int = 100) -> None: '''Simulate finger swipe. (1000ms = 1s)''' self._execute('-s', self.device_sn, 'shell', 'input', 'swipe', str(x1), str(y1), str(x2), str(y2), str(duration))
[ "def", "swipe", "(", "self", ",", "x1", ":", "int", ",", "y1", ":", "int", ",", "x2", ":", "int", ",", "y2", ":", "int", ",", "duration", ":", "int", "=", "100", ")", "->", "None", ":", "self", ".", "_execute", "(", "'-s'", ",", "self", ".", "device_sn", ",", "'shell'", ",", "'input'", ",", "'swipe'", ",", "str", "(", "x1", ")", ",", "str", "(", "y1", ")", ",", "str", "(", "x2", ")", ",", "str", "(", "y2", ")", ",", "str", "(", "duration", ")", ")" ]
68.25
0.014493
def call(cls, method, *args, **kwargs): """ Call a remote api method and return the result.""" api = None empty_key = kwargs.pop('empty_key', False) try: api = cls.get_api_connector() apikey = cls.get('api.key') if not apikey and not empty_key: cls.echo("No apikey found, please use 'gandi setup' " "command") sys.exit(1) except MissingConfiguration: if api and empty_key: apikey = '' elif not kwargs.get('safe'): cls.echo("No configuration found, please use 'gandi setup' " "command") sys.exit(1) else: return [] # make the call cls.debug('calling method: %s' % method) for arg in args: cls.debug('with params: %r' % arg) try: resp = api.request(method, apikey, *args, **{'dry_run': kwargs.get('dry_run', False), 'return_dry_run': kwargs.get('return_dry_run', False)}) cls.dump('responded: %r' % resp) return resp except APICallFailed as err: if kwargs.get('safe'): return [] if err.code == 530040: cls.echo("Error: It appears you haven't purchased any credits " "yet.\n" "Please visit https://www.gandi.net/credit/buy to " "learn more and buy credits.") sys.exit(1) if err.code == 510150: cls.echo("Invalid API key, please use 'gandi setup' command.") sys.exit(1) if isinstance(err, DryRunException): if kwargs.get('return_dry_run', False): return err.dry_run else: for msg in err.dry_run: # TODO use trads with %s cls.echo(msg['reason']) cls.echo('\t' + ' '.join(msg['attr'])) sys.exit(1) error = UsageError(err.errors) setattr(error, 'code', err.code) raise error
[ "def", "call", "(", "cls", ",", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "api", "=", "None", "empty_key", "=", "kwargs", ".", "pop", "(", "'empty_key'", ",", "False", ")", "try", ":", "api", "=", "cls", ".", "get_api_connector", "(", ")", "apikey", "=", "cls", ".", "get", "(", "'api.key'", ")", "if", "not", "apikey", "and", "not", "empty_key", ":", "cls", ".", "echo", "(", "\"No apikey found, please use 'gandi setup' \"", "\"command\"", ")", "sys", ".", "exit", "(", "1", ")", "except", "MissingConfiguration", ":", "if", "api", "and", "empty_key", ":", "apikey", "=", "''", "elif", "not", "kwargs", ".", "get", "(", "'safe'", ")", ":", "cls", ".", "echo", "(", "\"No configuration found, please use 'gandi setup' \"", "\"command\"", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "return", "[", "]", "# make the call", "cls", ".", "debug", "(", "'calling method: %s'", "%", "method", ")", "for", "arg", "in", "args", ":", "cls", ".", "debug", "(", "'with params: %r'", "%", "arg", ")", "try", ":", "resp", "=", "api", ".", "request", "(", "method", ",", "apikey", ",", "*", "args", ",", "*", "*", "{", "'dry_run'", ":", "kwargs", ".", "get", "(", "'dry_run'", ",", "False", ")", ",", "'return_dry_run'", ":", "kwargs", ".", "get", "(", "'return_dry_run'", ",", "False", ")", "}", ")", "cls", ".", "dump", "(", "'responded: %r'", "%", "resp", ")", "return", "resp", "except", "APICallFailed", "as", "err", ":", "if", "kwargs", ".", "get", "(", "'safe'", ")", ":", "return", "[", "]", "if", "err", ".", "code", "==", "530040", ":", "cls", ".", "echo", "(", "\"Error: It appears you haven't purchased any credits \"", "\"yet.\\n\"", "\"Please visit https://www.gandi.net/credit/buy to \"", "\"learn more and buy credits.\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "err", ".", "code", "==", "510150", ":", "cls", ".", "echo", "(", "\"Invalid API key, please use 'gandi setup' command.\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "isinstance", "(", "err", ",", "DryRunException", ")", ":", "if", "kwargs", ".", "get", "(", "'return_dry_run'", ",", "False", ")", ":", "return", "err", ".", "dry_run", "else", ":", "for", "msg", "in", "err", ".", "dry_run", ":", "# TODO use trads with %s", "cls", ".", "echo", "(", "msg", "[", "'reason'", "]", ")", "cls", ".", "echo", "(", "'\\t'", "+", "' '", ".", "join", "(", "msg", "[", "'attr'", "]", ")", ")", "sys", ".", "exit", "(", "1", ")", "error", "=", "UsageError", "(", "err", ".", "errors", ")", "setattr", "(", "error", ",", "'code'", ",", "err", ".", "code", ")", "raise", "error" ]
40.375
0.000864
def unescape(s): """ Unescapes a string that may contain commas, tabs, newlines and dashes Commas are decoded from tabs. :param s: (string) to unescape :returns: (string) unescaped string """ assert isinstance(s, basestring) s = s.replace('\t', ',') s = s.replace('\\,', ',') s = s.replace('\\n', '\n') s = s.replace('\\\\', '\\') return s
[ "def", "unescape", "(", "s", ")", ":", "assert", "isinstance", "(", "s", ",", "basestring", ")", "s", "=", "s", ".", "replace", "(", "'\\t'", ",", "','", ")", "s", "=", "s", ".", "replace", "(", "'\\\\,'", ",", "','", ")", "s", "=", "s", ".", "replace", "(", "'\\\\n'", ",", "'\\n'", ")", "s", "=", "s", ".", "replace", "(", "'\\\\\\\\'", ",", "'\\\\'", ")", "return", "s" ]
21.9375
0.021858
def run(self): """Run the whole impact function. :return: A tuple with the status of the IF and an error message if needed. The status is ANALYSIS_SUCCESS if everything was fine. The status is ANALYSIS_FAILED_BAD_INPUT if the client should fix something. The status is ANALYSIS_FAILED_BAD_CODE if something went wrong from the code. :rtype: (int, m.Message) """ self._start_datetime = datetime.now() if not self._is_ready: message = generate_input_error_message( tr('You need to run `prepare` first.'), m.Paragraph(tr( 'In order to run the analysis, you need to call ' '"prepare" before this function.'))) return ANALYSIS_FAILED_BAD_INPUT, message try: self.reset_state() clear_prof_data() self._run() # Get the profiling log self._performance_log = profiling_log() self.callback(8, 8, analysis_steps['profiling']) self._profiling_table = create_profile_layer( self.performance_log_message()) result, name = self.datastore.add_layer( self._profiling_table, self._profiling_table.keywords['title']) if not result: raise Exception( 'Something went wrong with the datastore : {error_message}' .format(error_message=name)) self._profiling_table = self.datastore.layer(name) self.profiling.keywords['provenance_data'] = self.provenance write_iso19115_metadata( self.profiling.source(), self.profiling.keywords) # Style all output layers. self.style() # End of the impact function. We need to set this IF not ready. self._is_ready = False # Set back input layers. self.hazard = load_layer( get_provenance(self.provenance, provenance_hazard_layer))[0] self.exposure = load_layer( get_provenance(self.provenance, provenance_exposure_layer))[0] aggregation_path = get_provenance( self.provenance, provenance_aggregation_layer) LOGGER.debug('Aggregation %s' % aggregation_path) if aggregation_path: self.aggregation = load_layer(aggregation_path)[0] else: self.aggregation = None except NoFeaturesInExtentError: warning_heading = m.Heading( tr('No features in the extent'), **WARNING_STYLE) warning_message = tr( 'There are no features in the analysis extent.') suggestion_heading = m.Heading( tr('Suggestion'), **SUGGESTION_STYLE) suggestion = tr( 'Try zooming in to a bigger area or check your features (' 'geometry and attribute table). For instance, an empty ' 'geometry or an hazard without value are removed during the ' 'process.') message = m.Message() message.add(warning_heading) message.add(warning_message) message.add(suggestion_heading) message.add(suggestion) return ANALYSIS_FAILED_BAD_INPUT, message except SpatialIndexCreationError: warning_heading = m.Heading( tr('Layer geometry issue'), **WARNING_STYLE) warning_message = tr( 'There is a problem while creating the spatial index. ' 'Unfortunately, there is nothing you can do. Maybe try ' 'another area or another aggregation layer.') message = m.Message() message.add(warning_heading) message.add(warning_message) return ANALYSIS_FAILED_BAD_INPUT, message except ProcessingInstallationError: warning_heading = m.Heading( tr('Configuration issue'), **WARNING_STYLE) warning_message = tr( 'There is a problem with the Processing plugin.') suggestion_heading = m.Heading( tr('Suggestion'), **SUGGESTION_STYLE) suggestion = tr( 'InaSAFE depends on the QGIS Processing plugin. This is a ' 'core plugin that ships with QGIS. It used to be possible to ' 'install the processing plugin from the QGIS Plugin Manager, ' 'however we advise you not to use these version since the ' 'Plugin Manager version may be incompatible with the ' 'version needed by InaSAFE. To resolve this issue, check in ' 'your (QGIS profile path)/python/plugins directory if you ' 'have a processing folder. If you do, remove the processing ' 'folder and then restart QGIS. If this issue persists, please ' 'report the problem to the InaSAFE team.') message = m.Message() message.add(warning_heading) message.add(warning_message) message.add(suggestion_heading) message.add(suggestion) return ANALYSIS_FAILED_BAD_INPUT, message except InaSAFEError as e: message = get_error_message(e) return ANALYSIS_FAILED_BAD_CODE, message except MemoryError: warning_heading = m.Heading(tr('Memory issue'), **WARNING_STYLE) warning_message = tr( 'There is not enough free memory to run this analysis.') suggestion_heading = m.Heading( tr('Suggestion'), **SUGGESTION_STYLE) suggestion = tr( 'Try zooming in to a smaller area or using a raster layer ' 'with a coarser resolution to speed up execution and reduce ' 'memory requirements. You could also try adding more RAM to ' 'your computer.') message = m.Message() message.add(warning_heading) message.add(warning_message) message.add(suggestion_heading) message.add(suggestion) return ANALYSIS_FAILED_BAD_INPUT, message except Exception as e: if self.debug_mode: # We run in debug mode, we do not want to catch the exception. # You should download the First Aid plugin for instance. raise else: message = get_error_message(e) return ANALYSIS_FAILED_BAD_CODE, message else: return ANALYSIS_SUCCESS, None
[ "def", "run", "(", "self", ")", ":", "self", ".", "_start_datetime", "=", "datetime", ".", "now", "(", ")", "if", "not", "self", ".", "_is_ready", ":", "message", "=", "generate_input_error_message", "(", "tr", "(", "'You need to run `prepare` first.'", ")", ",", "m", ".", "Paragraph", "(", "tr", "(", "'In order to run the analysis, you need to call '", "'\"prepare\" before this function.'", ")", ")", ")", "return", "ANALYSIS_FAILED_BAD_INPUT", ",", "message", "try", ":", "self", ".", "reset_state", "(", ")", "clear_prof_data", "(", ")", "self", ".", "_run", "(", ")", "# Get the profiling log", "self", ".", "_performance_log", "=", "profiling_log", "(", ")", "self", ".", "callback", "(", "8", ",", "8", ",", "analysis_steps", "[", "'profiling'", "]", ")", "self", ".", "_profiling_table", "=", "create_profile_layer", "(", "self", ".", "performance_log_message", "(", ")", ")", "result", ",", "name", "=", "self", ".", "datastore", ".", "add_layer", "(", "self", ".", "_profiling_table", ",", "self", ".", "_profiling_table", ".", "keywords", "[", "'title'", "]", ")", "if", "not", "result", ":", "raise", "Exception", "(", "'Something went wrong with the datastore : {error_message}'", ".", "format", "(", "error_message", "=", "name", ")", ")", "self", ".", "_profiling_table", "=", "self", ".", "datastore", ".", "layer", "(", "name", ")", "self", ".", "profiling", ".", "keywords", "[", "'provenance_data'", "]", "=", "self", ".", "provenance", "write_iso19115_metadata", "(", "self", ".", "profiling", ".", "source", "(", ")", ",", "self", ".", "profiling", ".", "keywords", ")", "# Style all output layers.", "self", ".", "style", "(", ")", "# End of the impact function. We need to set this IF not ready.", "self", ".", "_is_ready", "=", "False", "# Set back input layers.", "self", ".", "hazard", "=", "load_layer", "(", "get_provenance", "(", "self", ".", "provenance", ",", "provenance_hazard_layer", ")", ")", "[", "0", "]", "self", ".", "exposure", "=", "load_layer", "(", "get_provenance", "(", "self", ".", "provenance", ",", "provenance_exposure_layer", ")", ")", "[", "0", "]", "aggregation_path", "=", "get_provenance", "(", "self", ".", "provenance", ",", "provenance_aggregation_layer", ")", "LOGGER", ".", "debug", "(", "'Aggregation %s'", "%", "aggregation_path", ")", "if", "aggregation_path", ":", "self", ".", "aggregation", "=", "load_layer", "(", "aggregation_path", ")", "[", "0", "]", "else", ":", "self", ".", "aggregation", "=", "None", "except", "NoFeaturesInExtentError", ":", "warning_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'No features in the extent'", ")", ",", "*", "*", "WARNING_STYLE", ")", "warning_message", "=", "tr", "(", "'There are no features in the analysis extent.'", ")", "suggestion_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Suggestion'", ")", ",", "*", "*", "SUGGESTION_STYLE", ")", "suggestion", "=", "tr", "(", "'Try zooming in to a bigger area or check your features ('", "'geometry and attribute table). For instance, an empty '", "'geometry or an hazard without value are removed during the '", "'process.'", ")", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "warning_heading", ")", "message", ".", "add", "(", "warning_message", ")", "message", ".", "add", "(", "suggestion_heading", ")", "message", ".", "add", "(", "suggestion", ")", "return", "ANALYSIS_FAILED_BAD_INPUT", ",", "message", "except", "SpatialIndexCreationError", ":", "warning_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Layer geometry issue'", ")", ",", "*", "*", "WARNING_STYLE", ")", "warning_message", "=", "tr", "(", "'There is a problem while creating the spatial index. '", "'Unfortunately, there is nothing you can do. Maybe try '", "'another area or another aggregation layer.'", ")", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "warning_heading", ")", "message", ".", "add", "(", "warning_message", ")", "return", "ANALYSIS_FAILED_BAD_INPUT", ",", "message", "except", "ProcessingInstallationError", ":", "warning_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Configuration issue'", ")", ",", "*", "*", "WARNING_STYLE", ")", "warning_message", "=", "tr", "(", "'There is a problem with the Processing plugin.'", ")", "suggestion_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Suggestion'", ")", ",", "*", "*", "SUGGESTION_STYLE", ")", "suggestion", "=", "tr", "(", "'InaSAFE depends on the QGIS Processing plugin. This is a '", "'core plugin that ships with QGIS. It used to be possible to '", "'install the processing plugin from the QGIS Plugin Manager, '", "'however we advise you not to use these version since the '", "'Plugin Manager version may be incompatible with the '", "'version needed by InaSAFE. To resolve this issue, check in '", "'your (QGIS profile path)/python/plugins directory if you '", "'have a processing folder. If you do, remove the processing '", "'folder and then restart QGIS. If this issue persists, please '", "'report the problem to the InaSAFE team.'", ")", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "warning_heading", ")", "message", ".", "add", "(", "warning_message", ")", "message", ".", "add", "(", "suggestion_heading", ")", "message", ".", "add", "(", "suggestion", ")", "return", "ANALYSIS_FAILED_BAD_INPUT", ",", "message", "except", "InaSAFEError", "as", "e", ":", "message", "=", "get_error_message", "(", "e", ")", "return", "ANALYSIS_FAILED_BAD_CODE", ",", "message", "except", "MemoryError", ":", "warning_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Memory issue'", ")", ",", "*", "*", "WARNING_STYLE", ")", "warning_message", "=", "tr", "(", "'There is not enough free memory to run this analysis.'", ")", "suggestion_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Suggestion'", ")", ",", "*", "*", "SUGGESTION_STYLE", ")", "suggestion", "=", "tr", "(", "'Try zooming in to a smaller area or using a raster layer '", "'with a coarser resolution to speed up execution and reduce '", "'memory requirements. You could also try adding more RAM to '", "'your computer.'", ")", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "warning_heading", ")", "message", ".", "add", "(", "warning_message", ")", "message", ".", "add", "(", "suggestion_heading", ")", "message", ".", "add", "(", "suggestion", ")", "return", "ANALYSIS_FAILED_BAD_INPUT", ",", "message", "except", "Exception", "as", "e", ":", "if", "self", ".", "debug_mode", ":", "# We run in debug mode, we do not want to catch the exception.", "# You should download the First Aid plugin for instance.", "raise", "else", ":", "message", "=", "get_error_message", "(", "e", ")", "return", "ANALYSIS_FAILED_BAD_CODE", ",", "message", "else", ":", "return", "ANALYSIS_SUCCESS", ",", "None" ]
43.155844
0.000294
def read_playlists(self): self.playlists = [] self.selected_playlist = -1 files = glob.glob(path.join(self.stations_dir, '*.csv')) if len(files) == 0: return 0, -1 else: for a_file in files: a_file_name = ''.join(path.basename(a_file).split('.')[:-1]) a_file_size = self._bytes_to_human(path.getsize(a_file)) a_file_time = ctime(path.getmtime(a_file)) self.playlists.append([a_file_name, a_file_time, a_file_size, a_file]) self.playlists.sort() """ get already loaded playlist id """ for i, a_playlist in enumerate(self.playlists): if a_playlist[-1] == self.stations_file: self.selected_playlist = i break return len(self.playlists), self.selected_playlist
[ "def", "read_playlists", "(", "self", ")", ":", "self", ".", "playlists", "=", "[", "]", "self", ".", "selected_playlist", "=", "-", "1", "files", "=", "glob", ".", "glob", "(", "path", ".", "join", "(", "self", ".", "stations_dir", ",", "'*.csv'", ")", ")", "if", "len", "(", "files", ")", "==", "0", ":", "return", "0", ",", "-", "1", "else", ":", "for", "a_file", "in", "files", ":", "a_file_name", "=", "''", ".", "join", "(", "path", ".", "basename", "(", "a_file", ")", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "a_file_size", "=", "self", ".", "_bytes_to_human", "(", "path", ".", "getsize", "(", "a_file", ")", ")", "a_file_time", "=", "ctime", "(", "path", ".", "getmtime", "(", "a_file", ")", ")", "self", ".", "playlists", ".", "append", "(", "[", "a_file_name", ",", "a_file_time", ",", "a_file_size", ",", "a_file", "]", ")", "self", ".", "playlists", ".", "sort", "(", ")", "for", "i", ",", "a_playlist", "in", "enumerate", "(", "self", ".", "playlists", ")", ":", "if", "a_playlist", "[", "-", "1", "]", "==", "self", ".", "stations_file", ":", "self", ".", "selected_playlist", "=", "i", "break", "return", "len", "(", "self", ".", "playlists", ")", ",", "self", ".", "selected_playlist" ]
44.315789
0.003488
def _volume_command(ramp, volume): """ Set the value if a volume level is provided, else print the current volume level. """ if volume is not None: ramp.set_volume(float(volume)) else: print ramp.volume
[ "def", "_volume_command", "(", "ramp", ",", "volume", ")", ":", "if", "volume", "is", "not", "None", ":", "ramp", ".", "set_volume", "(", "float", "(", "volume", ")", ")", "else", ":", "print", "ramp", ".", "volume" ]
32.571429
0.004274
def log_variable_sizes(var_list, tag, verbose=True, mesh_to_impl=None): """Log the sizes and shapes of variables, and the total size. Args: var_list: a list of variables; defaults to trainable_variables tag: a string; defaults to "Trainable Variables" verbose: bool, if True, log every weight; otherwise, log total size only. mesh_to_impl: an optional map from Mesh to MeshImpl """ if not var_list: return name_to_var = {v.name: v for v in var_list} total_size = 0 total_slice_size = 0 for v_name in sorted(list(name_to_var)): v = name_to_var[v_name] v_size = v.shape.size if mesh_to_impl is not None: slice_size = mesh_to_impl[v.mesh].slice_size(v.shape) else: slice_size = 0 total_slice_size += slice_size if verbose: tf.logging.info( "Variable %s size %s slice_size %s %s", v.name.ljust(60), str(v_size).ljust(12), str(slice_size).ljust(12), str(v.shape).ljust(60)) if isinstance(v, StackedVariable): for n in v.original_names: tf.logging.info(" " + n) total_size += v_size tf.logging.info("%s count: %s Total size: %s Total slice_size: %s", tag.ljust(30), str(len(var_list)).ljust(6), str(total_size).ljust(15), str(total_slice_size).ljust(15))
[ "def", "log_variable_sizes", "(", "var_list", ",", "tag", ",", "verbose", "=", "True", ",", "mesh_to_impl", "=", "None", ")", ":", "if", "not", "var_list", ":", "return", "name_to_var", "=", "{", "v", ".", "name", ":", "v", "for", "v", "in", "var_list", "}", "total_size", "=", "0", "total_slice_size", "=", "0", "for", "v_name", "in", "sorted", "(", "list", "(", "name_to_var", ")", ")", ":", "v", "=", "name_to_var", "[", "v_name", "]", "v_size", "=", "v", ".", "shape", ".", "size", "if", "mesh_to_impl", "is", "not", "None", ":", "slice_size", "=", "mesh_to_impl", "[", "v", ".", "mesh", "]", ".", "slice_size", "(", "v", ".", "shape", ")", "else", ":", "slice_size", "=", "0", "total_slice_size", "+=", "slice_size", "if", "verbose", ":", "tf", ".", "logging", ".", "info", "(", "\"Variable %s size %s slice_size %s %s\"", ",", "v", ".", "name", ".", "ljust", "(", "60", ")", ",", "str", "(", "v_size", ")", ".", "ljust", "(", "12", ")", ",", "str", "(", "slice_size", ")", ".", "ljust", "(", "12", ")", ",", "str", "(", "v", ".", "shape", ")", ".", "ljust", "(", "60", ")", ")", "if", "isinstance", "(", "v", ",", "StackedVariable", ")", ":", "for", "n", "in", "v", ".", "original_names", ":", "tf", ".", "logging", ".", "info", "(", "\" \"", "+", "n", ")", "total_size", "+=", "v_size", "tf", ".", "logging", ".", "info", "(", "\"%s count: %s Total size: %s Total slice_size: %s\"", ",", "tag", ".", "ljust", "(", "30", ")", ",", "str", "(", "len", "(", "var_list", ")", ")", ".", "ljust", "(", "6", ")", ",", "str", "(", "total_size", ")", ".", "ljust", "(", "15", ")", ",", "str", "(", "total_slice_size", ")", ".", "ljust", "(", "15", ")", ")" ]
35
0.00951
def _update_pending_document_state(cursor, document_id, is_license_accepted, are_roles_accepted): """Update the state of the document's state values.""" args = (bool(is_license_accepted), bool(are_roles_accepted), document_id,) cursor.execute("""\ UPDATE pending_documents SET (license_accepted, roles_accepted) = (%s, %s) WHERE id = %s""", args)
[ "def", "_update_pending_document_state", "(", "cursor", ",", "document_id", ",", "is_license_accepted", ",", "are_roles_accepted", ")", ":", "args", "=", "(", "bool", "(", "is_license_accepted", ")", ",", "bool", "(", "are_roles_accepted", ")", ",", "document_id", ",", ")", "cursor", ".", "execute", "(", "\"\"\"\\\nUPDATE pending_documents\nSET (license_accepted, roles_accepted) = (%s, %s)\nWHERE id = %s\"\"\"", ",", "args", ")" ]
41.5
0.002358
def population_variant_regions(items, merged=False): """Retrieve the variant region BED file from a population of items. If tumor/normal, return the tumor BED file. If a population, return the BED file covering the most bases. """ def _get_variant_regions(data): out = dd.get_variant_regions(data) or dd.get_sample_callable(data) # Only need to merge for variant region inputs, not callable BED regions which don't overlap if merged and dd.get_variant_regions(data): merged_out = dd.get_variant_regions_merged(data) if merged_out: out = merged_out else: out = merge_overlaps(out, data) return out import pybedtools if len(items) == 1: return _get_variant_regions(items[0]) else: paired = vcfutils.get_paired(items) if paired: return _get_variant_regions(paired.tumor_data) else: vrs = [] for data in items: vr_bed = _get_variant_regions(data) if vr_bed: vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed)) vrs.sort(reverse=True) if vrs: return vrs[0][1]
[ "def", "population_variant_regions", "(", "items", ",", "merged", "=", "False", ")", ":", "def", "_get_variant_regions", "(", "data", ")", ":", "out", "=", "dd", ".", "get_variant_regions", "(", "data", ")", "or", "dd", ".", "get_sample_callable", "(", "data", ")", "# Only need to merge for variant region inputs, not callable BED regions which don't overlap", "if", "merged", "and", "dd", ".", "get_variant_regions", "(", "data", ")", ":", "merged_out", "=", "dd", ".", "get_variant_regions_merged", "(", "data", ")", "if", "merged_out", ":", "out", "=", "merged_out", "else", ":", "out", "=", "merge_overlaps", "(", "out", ",", "data", ")", "return", "out", "import", "pybedtools", "if", "len", "(", "items", ")", "==", "1", ":", "return", "_get_variant_regions", "(", "items", "[", "0", "]", ")", "else", ":", "paired", "=", "vcfutils", ".", "get_paired", "(", "items", ")", "if", "paired", ":", "return", "_get_variant_regions", "(", "paired", ".", "tumor_data", ")", "else", ":", "vrs", "=", "[", "]", "for", "data", "in", "items", ":", "vr_bed", "=", "_get_variant_regions", "(", "data", ")", "if", "vr_bed", ":", "vrs", ".", "append", "(", "(", "pybedtools", ".", "BedTool", "(", "vr_bed", ")", ".", "total_coverage", "(", ")", ",", "vr_bed", ")", ")", "vrs", ".", "sort", "(", "reverse", "=", "True", ")", "if", "vrs", ":", "return", "vrs", "[", "0", "]", "[", "1", "]" ]
38.40625
0.002381
def populate(self, priority, address, rtr, data): """ :return: None """ assert isinstance(data, bytes) self.needs_high_priority(priority) self.needs_no_rtr(rtr) self.needs_data(data, 3) self.set_attributes(priority, address, rtr) self.closed = self.byte_to_channels(data[0]) self.opened = self.byte_to_channels(data[1]) self.closed_long = self.byte_to_channels(data[2])
[ "def", "populate", "(", "self", ",", "priority", ",", "address", ",", "rtr", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "bytes", ")", "self", ".", "needs_high_priority", "(", "priority", ")", "self", ".", "needs_no_rtr", "(", "rtr", ")", "self", ".", "needs_data", "(", "data", ",", "3", ")", "self", ".", "set_attributes", "(", "priority", ",", "address", ",", "rtr", ")", "self", ".", "closed", "=", "self", ".", "byte_to_channels", "(", "data", "[", "0", "]", ")", "self", ".", "opened", "=", "self", ".", "byte_to_channels", "(", "data", "[", "1", "]", ")", "self", ".", "closed_long", "=", "self", ".", "byte_to_channels", "(", "data", "[", "2", "]", ")" ]
37.166667
0.004376
def length(self): """The number of elements changed. :rtype: int .. code:: python assert change.length == len(change.indices) assert change.length == len(change.elements) """ span = self._stop - self._start length, modulo = divmod(span, self._step) if length < 0: return 0 if modulo != 0: return length + 1 return length
[ "def", "length", "(", "self", ")", ":", "span", "=", "self", ".", "_stop", "-", "self", ".", "_start", "length", ",", "modulo", "=", "divmod", "(", "span", ",", "self", ".", "_step", ")", "if", "length", "<", "0", ":", "return", "0", "if", "modulo", "!=", "0", ":", "return", "length", "+", "1", "return", "length" ]
25.117647
0.004515
def tracebacks_from_file(fileobj, reverse=False): """Generator that yields tracebacks found in a file object With reverse=True, searches backwards from the end of the file. """ if reverse: lines = deque() for line in BackwardsReader(fileobj): lines.appendleft(line) if tb_head in line: yield next(tracebacks_from_lines(lines)) lines.clear() else: for traceback in tracebacks_from_lines(fileobj): yield traceback
[ "def", "tracebacks_from_file", "(", "fileobj", ",", "reverse", "=", "False", ")", ":", "if", "reverse", ":", "lines", "=", "deque", "(", ")", "for", "line", "in", "BackwardsReader", "(", "fileobj", ")", ":", "lines", ".", "appendleft", "(", "line", ")", "if", "tb_head", "in", "line", ":", "yield", "next", "(", "tracebacks_from_lines", "(", "lines", ")", ")", "lines", ".", "clear", "(", ")", "else", ":", "for", "traceback", "in", "tracebacks_from_lines", "(", "fileobj", ")", ":", "yield", "traceback" ]
30
0.001901
def _if(ctx, logical_test, value_if_true=0, value_if_false=False): """ Returns one value if the condition evaluates to TRUE, and another value if it evaluates to FALSE """ return value_if_true if conversions.to_boolean(logical_test, ctx) else value_if_false
[ "def", "_if", "(", "ctx", ",", "logical_test", ",", "value_if_true", "=", "0", ",", "value_if_false", "=", "False", ")", ":", "return", "value_if_true", "if", "conversions", ".", "to_boolean", "(", "logical_test", ",", "ctx", ")", "else", "value_if_false" ]
53.8
0.010989
def to_drake_step(inputs, output): """ Args: inputs: collection of input Steps output: output Step Returns: a string of the drake step for the given inputs and output """ i = [output._yaml_filename] i.extend(map(lambda i: i._target_filename, list(inputs))) i.extend(output.dependencies) # add source file of output and its non-target inputs # if they're not in the drain library objects = get_inputs(output, target=False) objects.add(output) sources = set([os.path.abspath(inspect.getsourcefile(o.__class__)) for o in objects]) i.extend([s for s in sources if not s.startswith(os.path.dirname(__file__))]) output_str = '%' + output.__class__.__name__ if output.name: output_str += ', %' + output.name if output.target: output_str += ', ' + os.path.join(output._target_filename) return '{output} <- {inputs} [method:drain]\n\n'.format( output=output_str, inputs=str.join(', ', i))
[ "def", "to_drake_step", "(", "inputs", ",", "output", ")", ":", "i", "=", "[", "output", ".", "_yaml_filename", "]", "i", ".", "extend", "(", "map", "(", "lambda", "i", ":", "i", ".", "_target_filename", ",", "list", "(", "inputs", ")", ")", ")", "i", ".", "extend", "(", "output", ".", "dependencies", ")", "# add source file of output and its non-target inputs", "# if they're not in the drain library", "objects", "=", "get_inputs", "(", "output", ",", "target", "=", "False", ")", "objects", ".", "add", "(", "output", ")", "sources", "=", "set", "(", "[", "os", ".", "path", ".", "abspath", "(", "inspect", ".", "getsourcefile", "(", "o", ".", "__class__", ")", ")", "for", "o", "in", "objects", "]", ")", "i", ".", "extend", "(", "[", "s", "for", "s", "in", "sources", "if", "not", "s", ".", "startswith", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "]", ")", "output_str", "=", "'%'", "+", "output", ".", "__class__", ".", "__name__", "if", "output", ".", "name", ":", "output_str", "+=", "', %'", "+", "output", ".", "name", "if", "output", ".", "target", ":", "output_str", "+=", "', '", "+", "os", ".", "path", ".", "join", "(", "output", ".", "_target_filename", ")", "return", "'{output} <- {inputs} [method:drain]\\n\\n'", ".", "format", "(", "output", "=", "output_str", ",", "inputs", "=", "str", ".", "join", "(", "', '", ",", "i", ")", ")" ]
37.269231
0.003018
def left_outer(self): """ Performs Left Outer Join :return left_outer: dict """ self.get_collections_data() left_outer_join = self.merge_join_docs( set(self.collections_data['left'].keys())) return left_outer_join
[ "def", "left_outer", "(", "self", ")", ":", "self", ".", "get_collections_data", "(", ")", "left_outer_join", "=", "self", ".", "merge_join_docs", "(", "set", "(", "self", ".", "collections_data", "[", "'left'", "]", ".", "keys", "(", ")", ")", ")", "return", "left_outer_join" ]
31.222222
0.00692
def is_typing_handler(stream): """ Show message to opponent if user is typing message """ while True: packet = yield from stream.get() session_id = packet.get('session_key') user_opponent = packet.get('username') typing = packet.get('typing') if session_id and user_opponent and typing is not None: user_owner = get_user_from_session(session_id) if user_owner: opponent_socket = ws_connections.get((user_opponent, user_owner.username)) if typing and opponent_socket: yield from target_message(opponent_socket, {'type': 'opponent-typing', 'username': user_opponent}) else: pass # invalid session id else: pass
[ "def", "is_typing_handler", "(", "stream", ")", ":", "while", "True", ":", "packet", "=", "yield", "from", "stream", ".", "get", "(", ")", "session_id", "=", "packet", ".", "get", "(", "'session_key'", ")", "user_opponent", "=", "packet", ".", "get", "(", "'username'", ")", "typing", "=", "packet", ".", "get", "(", "'typing'", ")", "if", "session_id", "and", "user_opponent", "and", "typing", "is", "not", "None", ":", "user_owner", "=", "get_user_from_session", "(", "session_id", ")", "if", "user_owner", ":", "opponent_socket", "=", "ws_connections", ".", "get", "(", "(", "user_opponent", ",", "user_owner", ".", "username", ")", ")", "if", "typing", "and", "opponent_socket", ":", "yield", "from", "target_message", "(", "opponent_socket", ",", "{", "'type'", ":", "'opponent-typing'", ",", "'username'", ":", "user_opponent", "}", ")", "else", ":", "pass", "# invalid session id\r", "else", ":", "pass" ]
41.75
0.003513
def __collapse_stranded(s, proc_strands, names=False, verbose=False): """ Get the union of a set of genomic intervals. given a list of genomic intervals with chromosome, start, end and strand fields, collapse those intervals with strand in the set <proc_strands> into a set of non-overlapping intervals. Other intervals are ignored. Intervals must be sorted by chromosome and then start coordinate. :note: O(n) time, O(n) space :return: list of intervals that define the collapsed regions. Note that these are all new objects, no existing object from s is returned or altered. Returned regions will all have name "X" and score 0 :param s: list of genomic regions to collapse :param proc_strands: set of acceptable strands; ignore input intervals with strand not found in this set. :param names: if True, accumulate region names. If false, all output regions have name "X" :param verbose: if True, output progress message to stderr. :raise GenomicIntervalError: if the input regions are not correctly sorted (chromosome then start) """ def get_first_matching_index(s, proc_strands): for i in range(0, len(s)): if s[i].strand in proc_strands: return i return None if proc_strands not in [set("+"), set("-"), set(["+", "-"])]: raise GenomicIntervalError("failed collapsing intervals on strands '" + ",".join(proc_strands) + "''; unrecognised " + "strand symbols") first_index = get_first_matching_index(s, proc_strands) if first_index is None: return [] res = [] current = copy.copy(s[first_index]) current.strand = '+' if (proc_strands == set("+") or proc_strands == set(["+", "-"])) else '-' current.score = 0 current.name = "X" if not names else set(s[first_index].name) for i in range(first_index + 1, len(s)): if s[i].strand not in proc_strands: continue # make sure things are sorted.. if (s[i].chrom < s[i - 1].chrom) or \ (s[i].chrom == s[i - 1].chrom and s[i].start < s[i - 1].start): raise GenomicIntervalError("collapsing regions failed. saw this " + "region: " + str(s[i - 1]) + " before this " + "one: " + str(s[i])) # because of sorting order, we know that nothing else exists with # start less than s[i] which we haven't already seen. if s[i].start > current.end or s[i].chrom != current.chrom: if names: current.name = ";".join(current.name) res.append(current) current = copy.copy(s[i]) current.strand = '+' if (proc_strands == set("+") or proc_strands == set(["+", "-"])) else '-' current.score = 0 current.name = "X" if not names else set(s[i].name) else: current.end = max(s[i].end, current.end) if names: current.name.add(s[i].name) # don't forget the last one... if names: current.name = ";".join(current.name) res.append(current) return res
[ "def", "__collapse_stranded", "(", "s", ",", "proc_strands", ",", "names", "=", "False", ",", "verbose", "=", "False", ")", ":", "def", "get_first_matching_index", "(", "s", ",", "proc_strands", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "s", ")", ")", ":", "if", "s", "[", "i", "]", ".", "strand", "in", "proc_strands", ":", "return", "i", "return", "None", "if", "proc_strands", "not", "in", "[", "set", "(", "\"+\"", ")", ",", "set", "(", "\"-\"", ")", ",", "set", "(", "[", "\"+\"", ",", "\"-\"", "]", ")", "]", ":", "raise", "GenomicIntervalError", "(", "\"failed collapsing intervals on strands '\"", "+", "\",\"", ".", "join", "(", "proc_strands", ")", "+", "\"''; unrecognised \"", "+", "\"strand symbols\"", ")", "first_index", "=", "get_first_matching_index", "(", "s", ",", "proc_strands", ")", "if", "first_index", "is", "None", ":", "return", "[", "]", "res", "=", "[", "]", "current", "=", "copy", ".", "copy", "(", "s", "[", "first_index", "]", ")", "current", ".", "strand", "=", "'+'", "if", "(", "proc_strands", "==", "set", "(", "\"+\"", ")", "or", "proc_strands", "==", "set", "(", "[", "\"+\"", ",", "\"-\"", "]", ")", ")", "else", "'-'", "current", ".", "score", "=", "0", "current", ".", "name", "=", "\"X\"", "if", "not", "names", "else", "set", "(", "s", "[", "first_index", "]", ".", "name", ")", "for", "i", "in", "range", "(", "first_index", "+", "1", ",", "len", "(", "s", ")", ")", ":", "if", "s", "[", "i", "]", ".", "strand", "not", "in", "proc_strands", ":", "continue", "# make sure things are sorted..", "if", "(", "s", "[", "i", "]", ".", "chrom", "<", "s", "[", "i", "-", "1", "]", ".", "chrom", ")", "or", "(", "s", "[", "i", "]", ".", "chrom", "==", "s", "[", "i", "-", "1", "]", ".", "chrom", "and", "s", "[", "i", "]", ".", "start", "<", "s", "[", "i", "-", "1", "]", ".", "start", ")", ":", "raise", "GenomicIntervalError", "(", "\"collapsing regions failed. saw this \"", "+", "\"region: \"", "+", "str", "(", "s", "[", "i", "-", "1", "]", ")", "+", "\" before this \"", "+", "\"one: \"", "+", "str", "(", "s", "[", "i", "]", ")", ")", "# because of sorting order, we know that nothing else exists with", "# start less than s[i] which we haven't already seen.", "if", "s", "[", "i", "]", ".", "start", ">", "current", ".", "end", "or", "s", "[", "i", "]", ".", "chrom", "!=", "current", ".", "chrom", ":", "if", "names", ":", "current", ".", "name", "=", "\";\"", ".", "join", "(", "current", ".", "name", ")", "res", ".", "append", "(", "current", ")", "current", "=", "copy", ".", "copy", "(", "s", "[", "i", "]", ")", "current", ".", "strand", "=", "'+'", "if", "(", "proc_strands", "==", "set", "(", "\"+\"", ")", "or", "proc_strands", "==", "set", "(", "[", "\"+\"", ",", "\"-\"", "]", ")", ")", "else", "'-'", "current", ".", "score", "=", "0", "current", ".", "name", "=", "\"X\"", "if", "not", "names", "else", "set", "(", "s", "[", "i", "]", ".", "name", ")", "else", ":", "current", ".", "end", "=", "max", "(", "s", "[", "i", "]", ".", "end", ",", "current", ".", "end", ")", "if", "names", ":", "current", ".", "name", ".", "add", "(", "s", "[", "i", "]", ".", "name", ")", "# don't forget the last one...", "if", "names", ":", "current", ".", "name", "=", "\";\"", ".", "join", "(", "current", ".", "name", ")", "res", ".", "append", "(", "current", ")", "return", "res" ]
40.868421
0.008488
def visit_index(self, node, parent): """visit a Index node by returning a fresh instance of it""" newnode = nodes.Index(parent=parent) newnode.postinit(self.visit(node.value, newnode)) return newnode
[ "def", "visit_index", "(", "self", ",", "node", ",", "parent", ")", ":", "newnode", "=", "nodes", ".", "Index", "(", "parent", "=", "parent", ")", "newnode", ".", "postinit", "(", "self", ".", "visit", "(", "node", ".", "value", ",", "newnode", ")", ")", "return", "newnode" ]
45.4
0.008658
def power_off(self): """Power the device off.""" status = self.status() if status['power']: # Setting power off when it is already off can cause hangs self._send(self.CMD_POWERSAVE + self.CMD_OFF)
[ "def", "power_off", "(", "self", ")", ":", "status", "=", "self", ".", "status", "(", ")", "if", "status", "[", "'power'", "]", ":", "# Setting power off when it is already off can cause hangs", "self", ".", "_send", "(", "self", ".", "CMD_POWERSAVE", "+", "self", ".", "CMD_OFF", ")" ]
45.8
0.012876
def get_assign_groups(line, ops=ops): """ Split a line into groups by assignment (including augmented assignment) """ group = [] for item in line: group.append(item) if item in ops: yield group group = [] yield group
[ "def", "get_assign_groups", "(", "line", ",", "ops", "=", "ops", ")", ":", "group", "=", "[", "]", "for", "item", "in", "line", ":", "group", ".", "append", "(", "item", ")", "if", "item", "in", "ops", ":", "yield", "group", "group", "=", "[", "]", "yield", "group" ]
24.909091
0.003521
def _connect(self): """ Use the appropriate connection class; optionally with security. """ timeout = None if self._options is not None and 'timeout' in self._options: timeout = self._options['timeout'] if self._client._credentials: self._connection = self._connection_class( host=self._node.host, port=self._node.http_port, credentials=self._client._credentials, timeout=timeout) else: self._connection = self._connection_class( host=self._node.host, port=self._node.http_port, timeout=timeout) # Forces the population of stats and resources before any # other requests are made. self.server_version
[ "def", "_connect", "(", "self", ")", ":", "timeout", "=", "None", "if", "self", ".", "_options", "is", "not", "None", "and", "'timeout'", "in", "self", ".", "_options", ":", "timeout", "=", "self", ".", "_options", "[", "'timeout'", "]", "if", "self", ".", "_client", ".", "_credentials", ":", "self", ".", "_connection", "=", "self", ".", "_connection_class", "(", "host", "=", "self", ".", "_node", ".", "host", ",", "port", "=", "self", ".", "_node", ".", "http_port", ",", "credentials", "=", "self", ".", "_client", ".", "_credentials", ",", "timeout", "=", "timeout", ")", "else", ":", "self", ".", "_connection", "=", "self", ".", "_connection_class", "(", "host", "=", "self", ".", "_node", ".", "host", ",", "port", "=", "self", ".", "_node", ".", "http_port", ",", "timeout", "=", "timeout", ")", "# Forces the population of stats and resources before any", "# other requests are made.", "self", ".", "server_version" ]
37.272727
0.002378
def give_str_indented(self, tags=False): """ Give indented string representation of the callable. This is used in :ref:`automate-webui`. """ args = self._args[:] kwargs = self._kwargs rv = self._give_str_indented(args, kwargs, tags) if not tags: rv = self.strip_color_tags(rv) return rv
[ "def", "give_str_indented", "(", "self", ",", "tags", "=", "False", ")", ":", "args", "=", "self", ".", "_args", "[", ":", "]", "kwargs", "=", "self", ".", "_kwargs", "rv", "=", "self", ".", "_give_str_indented", "(", "args", ",", "kwargs", ",", "tags", ")", "if", "not", "tags", ":", "rv", "=", "self", ".", "strip_color_tags", "(", "rv", ")", "return", "rv" ]
33.454545
0.005291
def moran_cultural(network): """Generalized cultural Moran process. At eachtime step, an individual is chosen to receive information from another individual. Nobody dies, but perhaps their ideas do. """ if not network.transmissions(): # first step, replacer is a source replacer = random.choice(network.nodes(type=Source)) replacer.transmit() else: replacer = random.choice(network.nodes(type=Agent)) replaced = random.choice( replacer.neighbors(direction="to", type=Agent)) from operator import attrgetter replacer.transmit( what=max(replacer.infos(), key=attrgetter('creation_time')), to_whom=replaced)
[ "def", "moran_cultural", "(", "network", ")", ":", "if", "not", "network", ".", "transmissions", "(", ")", ":", "# first step, replacer is a source", "replacer", "=", "random", ".", "choice", "(", "network", ".", "nodes", "(", "type", "=", "Source", ")", ")", "replacer", ".", "transmit", "(", ")", "else", ":", "replacer", "=", "random", ".", "choice", "(", "network", ".", "nodes", "(", "type", "=", "Agent", ")", ")", "replaced", "=", "random", ".", "choice", "(", "replacer", ".", "neighbors", "(", "direction", "=", "\"to\"", ",", "type", "=", "Agent", ")", ")", "from", "operator", "import", "attrgetter", "replacer", ".", "transmit", "(", "what", "=", "max", "(", "replacer", ".", "infos", "(", ")", ",", "key", "=", "attrgetter", "(", "'creation_time'", ")", ")", ",", "to_whom", "=", "replaced", ")" ]
36.736842
0.001397
def init_app(self, app=None, blueprint=None, additional_blueprints=None): """Update flask application with our api :param Application app: a flask application """ if app is not None: self.app = app if blueprint is not None: self.blueprint = blueprint for resource in self.resources: self.route(resource['resource'], resource['view'], *resource['urls'], url_rule_options=resource['url_rule_options']) if self.blueprint is not None: self.app.register_blueprint(self.blueprint) if additional_blueprints is not None: for blueprint in additional_blueprints: self.app.register_blueprint(blueprint) self.app.config.setdefault('PAGE_SIZE', 30)
[ "def", "init_app", "(", "self", ",", "app", "=", "None", ",", "blueprint", "=", "None", ",", "additional_blueprints", "=", "None", ")", ":", "if", "app", "is", "not", "None", ":", "self", ".", "app", "=", "app", "if", "blueprint", "is", "not", "None", ":", "self", ".", "blueprint", "=", "blueprint", "for", "resource", "in", "self", ".", "resources", ":", "self", ".", "route", "(", "resource", "[", "'resource'", "]", ",", "resource", "[", "'view'", "]", ",", "*", "resource", "[", "'urls'", "]", ",", "url_rule_options", "=", "resource", "[", "'url_rule_options'", "]", ")", "if", "self", ".", "blueprint", "is", "not", "None", ":", "self", ".", "app", ".", "register_blueprint", "(", "self", ".", "blueprint", ")", "if", "additional_blueprints", "is", "not", "None", ":", "for", "blueprint", "in", "additional_blueprints", ":", "self", ".", "app", ".", "register_blueprint", "(", "blueprint", ")", "self", ".", "app", ".", "config", ".", "setdefault", "(", "'PAGE_SIZE'", ",", "30", ")" ]
33.36
0.002331
def _get_keycache(self, parentity, branch, turn, tick, *, forward): """Get a frozenset of keys that exist in the entity at the moment. With ``forward=True``, enable an optimization that copies old key sets forward and updates them. """ lru_append(self.keycache, self._kc_lru, (parentity+(branch,), turn, tick), KEYCACHE_MAXSIZE) return self._get_keycachelike( self.keycache, self.keys, self._get_adds_dels, parentity, branch, turn, tick, forward=forward )
[ "def", "_get_keycache", "(", "self", ",", "parentity", ",", "branch", ",", "turn", ",", "tick", ",", "*", ",", "forward", ")", ":", "lru_append", "(", "self", ".", "keycache", ",", "self", ".", "_kc_lru", ",", "(", "parentity", "+", "(", "branch", ",", ")", ",", "turn", ",", "tick", ")", ",", "KEYCACHE_MAXSIZE", ")", "return", "self", ".", "_get_keycachelike", "(", "self", ".", "keycache", ",", "self", ".", "keys", ",", "self", ".", "_get_adds_dels", ",", "parentity", ",", "branch", ",", "turn", ",", "tick", ",", "forward", "=", "forward", ")" ]
43.833333
0.005587
def getVC(self): """ Variance componenrs """ _Cr = decompose_GxE(self.full['Cr']) RV = {} for key in list(_Cr.keys()): RV['var_%s' % key] = sp.array([var_CoXX(_Cr[key], self.Xr)]) RV['var_c'] = self.full['var_c'] RV['var_n'] = self.full['var_n'] return RV
[ "def", "getVC", "(", "self", ")", ":", "_Cr", "=", "decompose_GxE", "(", "self", ".", "full", "[", "'Cr'", "]", ")", "RV", "=", "{", "}", "for", "key", "in", "list", "(", "_Cr", ".", "keys", "(", ")", ")", ":", "RV", "[", "'var_%s'", "%", "key", "]", "=", "sp", ".", "array", "(", "[", "var_CoXX", "(", "_Cr", "[", "key", "]", ",", "self", ".", "Xr", ")", "]", ")", "RV", "[", "'var_c'", "]", "=", "self", ".", "full", "[", "'var_c'", "]", "RV", "[", "'var_n'", "]", "=", "self", ".", "full", "[", "'var_n'", "]", "return", "RV" ]
30.090909
0.01173
def load(self, content): """Parse yaml content.""" # Try parsing the YAML with global tags try: config = yaml.load(content, Loader=self._loader(self._global_tags)) except yaml.YAMLError: raise InvalidConfigError(_("Config is not valid yaml.")) # Try extracting just the tool portion try: config = config[self.tool] except (TypeError, KeyError): return None # If no scopes, just apply global default if not isinstance(config, dict): config = self._apply_default(config, self._global_default) else: # Figure out what scopes exist scoped_keys = set(key for key in self._scopes) # For every scope for key in config: # If scope has custom tags, apply if key in scoped_keys: # local tags, and local default tags, default = self._scopes[key] # Inherit global default if no local default if not default: default = self._global_default config[key] = self._apply_default(config[key], default) self._apply_scope(config[key], tags) # Otherwise just apply global default else: config[key] = self._apply_default(config[key], self._global_default) self._validate(config) return config
[ "def", "load", "(", "self", ",", "content", ")", ":", "# Try parsing the YAML with global tags", "try", ":", "config", "=", "yaml", ".", "load", "(", "content", ",", "Loader", "=", "self", ".", "_loader", "(", "self", ".", "_global_tags", ")", ")", "except", "yaml", ".", "YAMLError", ":", "raise", "InvalidConfigError", "(", "_", "(", "\"Config is not valid yaml.\"", ")", ")", "# Try extracting just the tool portion", "try", ":", "config", "=", "config", "[", "self", ".", "tool", "]", "except", "(", "TypeError", ",", "KeyError", ")", ":", "return", "None", "# If no scopes, just apply global default", "if", "not", "isinstance", "(", "config", ",", "dict", ")", ":", "config", "=", "self", ".", "_apply_default", "(", "config", ",", "self", ".", "_global_default", ")", "else", ":", "# Figure out what scopes exist", "scoped_keys", "=", "set", "(", "key", "for", "key", "in", "self", ".", "_scopes", ")", "# For every scope", "for", "key", "in", "config", ":", "# If scope has custom tags, apply", "if", "key", "in", "scoped_keys", ":", "# local tags, and local default", "tags", ",", "default", "=", "self", ".", "_scopes", "[", "key", "]", "# Inherit global default if no local default", "if", "not", "default", ":", "default", "=", "self", ".", "_global_default", "config", "[", "key", "]", "=", "self", ".", "_apply_default", "(", "config", "[", "key", "]", ",", "default", ")", "self", ".", "_apply_scope", "(", "config", "[", "key", "]", ",", "tags", ")", "# Otherwise just apply global default", "else", ":", "config", "[", "key", "]", "=", "self", ".", "_apply_default", "(", "config", "[", "key", "]", ",", "self", ".", "_global_default", ")", "self", ".", "_validate", "(", "config", ")", "return", "config" ]
35.804878
0.001989
def unsubscribe(self, client): """Unsubscribe a client from the channel.""" if client in self.clients: self.clients.remove(client) log("Unsubscribed client {} from channel {}".format(client, self.name))
[ "def", "unsubscribe", "(", "self", ",", "client", ")", ":", "if", "client", "in", "self", ".", "clients", ":", "self", ".", "clients", ".", "remove", "(", "client", ")", "log", "(", "\"Unsubscribed client {} from channel {}\"", ".", "format", "(", "client", ",", "self", ".", "name", ")", ")" ]
47.6
0.012397
def create_Container(client, id): """ Execute the most basic Create of collection. This will create a collection with 400 RUs throughput and default indexing policy """ print("\n2.1 Create Collection - Basic") try: client.CreateContainer(database_link, {"id": id}) print('Collection with id \'{0}\' created'.format(id)) except errors.HTTPFailure as e: if e.status_code == 409: print('A collection with id \'{0}\' already exists'.format(id)) else: raise errors.HTTPFailure(e.status_code) print("\n2.2 Create Collection - With custom index policy") try: coll = { "id": "collection_custom_index_policy", "indexingPolicy": { "indexingMode": "lazy", "automatic": False } } collection = client.CreateContainer(database_link, coll) print('Collection with id \'{0}\' created'.format(collection['id'])) print('IndexPolicy Mode - \'{0}\''.format(collection['indexingPolicy']['indexingMode'])) print('IndexPolicy Automatic - \'{0}\''.format(collection['indexingPolicy']['automatic'])) except errors.CosmosError as e: if e.status_code == 409: print('A collection with id \'{0}\' already exists'.format(collection['id'])) else: raise errors.HTTPFailure(e.status_code) print("\n2.3 Create Collection - With custom offer throughput") try: coll = {"id": "collection_custom_throughput"} collection_options = { 'offerThroughput': 400 } collection = client.CreateContainer(database_link, coll, collection_options ) print('Collection with id \'{0}\' created'.format(collection['id'])) except errors.HTTPFailure as e: if e.status_code == 409: print('A collection with id \'{0}\' already exists'.format(collection['id'])) else: raise errors.HTTPFailure(e.status_code) print("\n2.4 Create Collection - With Unique keys") try: coll = {"id": "collection_unique_keys", 'uniqueKeyPolicy': {'uniqueKeys': [{'paths': ['/field1/field2', '/field3']}]}} collection_options = { 'offerThroughput': 400 } collection = client.CreateContainer(database_link, coll, collection_options ) unique_key_paths = collection['uniqueKeyPolicy']['uniqueKeys'][0]['paths'] print('Collection with id \'{0}\' created'.format(collection['id'])) print('Unique Key Paths - \'{0}\', \'{1}\''.format(unique_key_paths[0], unique_key_paths[1])) except errors.HTTPFailure as e: if e.status_code == 409: print('A collection with id \'{0}\' already exists'.format(collection['id'])) else: raise errors.HTTPFailure(e.status_code) print("\n2.5 Create Collection - With Partition key") try: coll = { "id": "collection_partition_key", "partitionKey": { "paths": [ "/field1" ], "kind": "Hash" } } collection = client.CreateContainer(database_link, coll) print('Collection with id \'{0}\' created'.format(collection['id'])) except errors.CosmosError as e: if e.status_code == 409: print('A collection with id \'{0}\' already exists'.format(collection['id'])) else: raise errors.HTTPFailure(e.status_code)
[ "def", "create_Container", "(", "client", ",", "id", ")", ":", "print", "(", "\"\\n2.1 Create Collection - Basic\"", ")", "try", ":", "client", ".", "CreateContainer", "(", "database_link", ",", "{", "\"id\"", ":", "id", "}", ")", "print", "(", "'Collection with id \\'{0}\\' created'", ".", "format", "(", "id", ")", ")", "except", "errors", ".", "HTTPFailure", "as", "e", ":", "if", "e", ".", "status_code", "==", "409", ":", "print", "(", "'A collection with id \\'{0}\\' already exists'", ".", "format", "(", "id", ")", ")", "else", ":", "raise", "errors", ".", "HTTPFailure", "(", "e", ".", "status_code", ")", "print", "(", "\"\\n2.2 Create Collection - With custom index policy\"", ")", "try", ":", "coll", "=", "{", "\"id\"", ":", "\"collection_custom_index_policy\"", ",", "\"indexingPolicy\"", ":", "{", "\"indexingMode\"", ":", "\"lazy\"", ",", "\"automatic\"", ":", "False", "}", "}", "collection", "=", "client", ".", "CreateContainer", "(", "database_link", ",", "coll", ")", "print", "(", "'Collection with id \\'{0}\\' created'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "print", "(", "'IndexPolicy Mode - \\'{0}\\''", ".", "format", "(", "collection", "[", "'indexingPolicy'", "]", "[", "'indexingMode'", "]", ")", ")", "print", "(", "'IndexPolicy Automatic - \\'{0}\\''", ".", "format", "(", "collection", "[", "'indexingPolicy'", "]", "[", "'automatic'", "]", ")", ")", "except", "errors", ".", "CosmosError", "as", "e", ":", "if", "e", ".", "status_code", "==", "409", ":", "print", "(", "'A collection with id \\'{0}\\' already exists'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "else", ":", "raise", "errors", ".", "HTTPFailure", "(", "e", ".", "status_code", ")", "print", "(", "\"\\n2.3 Create Collection - With custom offer throughput\"", ")", "try", ":", "coll", "=", "{", "\"id\"", ":", "\"collection_custom_throughput\"", "}", "collection_options", "=", "{", "'offerThroughput'", ":", "400", "}", "collection", "=", "client", ".", "CreateContainer", "(", "database_link", ",", "coll", ",", "collection_options", ")", "print", "(", "'Collection with id \\'{0}\\' created'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "except", "errors", ".", "HTTPFailure", "as", "e", ":", "if", "e", ".", "status_code", "==", "409", ":", "print", "(", "'A collection with id \\'{0}\\' already exists'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "else", ":", "raise", "errors", ".", "HTTPFailure", "(", "e", ".", "status_code", ")", "print", "(", "\"\\n2.4 Create Collection - With Unique keys\"", ")", "try", ":", "coll", "=", "{", "\"id\"", ":", "\"collection_unique_keys\"", ",", "'uniqueKeyPolicy'", ":", "{", "'uniqueKeys'", ":", "[", "{", "'paths'", ":", "[", "'/field1/field2'", ",", "'/field3'", "]", "}", "]", "}", "}", "collection_options", "=", "{", "'offerThroughput'", ":", "400", "}", "collection", "=", "client", ".", "CreateContainer", "(", "database_link", ",", "coll", ",", "collection_options", ")", "unique_key_paths", "=", "collection", "[", "'uniqueKeyPolicy'", "]", "[", "'uniqueKeys'", "]", "[", "0", "]", "[", "'paths'", "]", "print", "(", "'Collection with id \\'{0}\\' created'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "print", "(", "'Unique Key Paths - \\'{0}\\', \\'{1}\\''", ".", "format", "(", "unique_key_paths", "[", "0", "]", ",", "unique_key_paths", "[", "1", "]", ")", ")", "except", "errors", ".", "HTTPFailure", "as", "e", ":", "if", "e", ".", "status_code", "==", "409", ":", "print", "(", "'A collection with id \\'{0}\\' already exists'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "else", ":", "raise", "errors", ".", "HTTPFailure", "(", "e", ".", "status_code", ")", "print", "(", "\"\\n2.5 Create Collection - With Partition key\"", ")", "try", ":", "coll", "=", "{", "\"id\"", ":", "\"collection_partition_key\"", ",", "\"partitionKey\"", ":", "{", "\"paths\"", ":", "[", "\"/field1\"", "]", ",", "\"kind\"", ":", "\"Hash\"", "}", "}", "collection", "=", "client", ".", "CreateContainer", "(", "database_link", ",", "coll", ")", "print", "(", "'Collection with id \\'{0}\\' created'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "except", "errors", ".", "CosmosError", "as", "e", ":", "if", "e", ".", "status_code", "==", "409", ":", "print", "(", "'A collection with id \\'{0}\\' already exists'", ".", "format", "(", "collection", "[", "'id'", "]", ")", ")", "else", ":", "raise", "errors", ".", "HTTPFailure", "(", "e", ".", "status_code", ")" ]
42.157303
0.011458
def make_gromacs(simulation, directory, clean=False): """Create gromacs directory structure""" if clean is False and os.path.exists(directory): raise ValueError( 'Cannot override {}, use option clean=True'.format(directory)) else: shutil.rmtree(directory, ignore_errors=True) os.mkdir(directory) # Check custom simulation potential if simulation.potential.intermolecular.type == 'custom': for pair in simulation.potential.intermolecular.special_pairs: table = to_table(simulation.potential.intermolecular.pair_interaction(*pair), simulation.cutoff) fname1 = os.path.join(directory, 'table_{}_{}.xvg'.format(pair[0], pair[1])) fname2 = os.path.join(directory, 'table_{}_{}.xvg'.format(pair[1], pair[0])) with open(fname1, 'w') as fd: fd.write(table) with open(fname2, 'w') as fd: fd.write(table) ndx = {'System' : np.arange(simulation.system.n_atoms, dtype='int')} for particle in simulation.potential.intermolecular.particles: idx = simulation.system.where(atom_name=particle)['atom'].nonzero()[0] ndx[particle] = idx with open(os.path.join(directory, 'index.ndx'), 'w') as fd: fd.write(to_ndx(ndx)) # Parameter file mdpfile = to_mdp(simulation) with open(os.path.join(directory, 'grompp.mdp'), 'w') as fd: fd.write(mdpfile) # Topology file topfile = to_top(simulation.system, simulation.potential) with open(os.path.join(directory, 'topol.top'), 'w') as fd: fd.write(topfile) # Simulation file datafile(os.path.join(directory, 'conf.gro'), 'w').write('system', simulation.system) return directory
[ "def", "make_gromacs", "(", "simulation", ",", "directory", ",", "clean", "=", "False", ")", ":", "if", "clean", "is", "False", "and", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "raise", "ValueError", "(", "'Cannot override {}, use option clean=True'", ".", "format", "(", "directory", ")", ")", "else", ":", "shutil", ".", "rmtree", "(", "directory", ",", "ignore_errors", "=", "True", ")", "os", ".", "mkdir", "(", "directory", ")", "# Check custom simulation potential", "if", "simulation", ".", "potential", ".", "intermolecular", ".", "type", "==", "'custom'", ":", "for", "pair", "in", "simulation", ".", "potential", ".", "intermolecular", ".", "special_pairs", ":", "table", "=", "to_table", "(", "simulation", ".", "potential", ".", "intermolecular", ".", "pair_interaction", "(", "*", "pair", ")", ",", "simulation", ".", "cutoff", ")", "fname1", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'table_{}_{}.xvg'", ".", "format", "(", "pair", "[", "0", "]", ",", "pair", "[", "1", "]", ")", ")", "fname2", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'table_{}_{}.xvg'", ".", "format", "(", "pair", "[", "1", "]", ",", "pair", "[", "0", "]", ")", ")", "with", "open", "(", "fname1", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "table", ")", "with", "open", "(", "fname2", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "table", ")", "ndx", "=", "{", "'System'", ":", "np", ".", "arange", "(", "simulation", ".", "system", ".", "n_atoms", ",", "dtype", "=", "'int'", ")", "}", "for", "particle", "in", "simulation", ".", "potential", ".", "intermolecular", ".", "particles", ":", "idx", "=", "simulation", ".", "system", ".", "where", "(", "atom_name", "=", "particle", ")", "[", "'atom'", "]", ".", "nonzero", "(", ")", "[", "0", "]", "ndx", "[", "particle", "]", "=", "idx", "with", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'index.ndx'", ")", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "to_ndx", "(", "ndx", ")", ")", "# Parameter file", "mdpfile", "=", "to_mdp", "(", "simulation", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'grompp.mdp'", ")", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "mdpfile", ")", "# Topology file", "topfile", "=", "to_top", "(", "simulation", ".", "system", ",", "simulation", ".", "potential", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'topol.top'", ")", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "topfile", ")", "# Simulation file", "datafile", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "'conf.gro'", ")", ",", "'w'", ")", ".", "write", "(", "'system'", ",", "simulation", ".", "system", ")", "return", "directory" ]
37.176471
0.007194
def from_xarray(cls, arr: "xarray.Dataset") -> "Histogram1D": """Convert form xarray.Dataset Parameters ---------- arr: The data in xarray representation """ kwargs = {'frequencies': arr["frequencies"], 'binning': arr["bins"], 'errors2': arr["errors2"], 'overflow': arr.attrs["overflow"], 'underflow': arr.attrs["underflow"], 'keep_missed': arr.attrs["keep_missed"]} # TODO: Add stats return cls(**kwargs)
[ "def", "from_xarray", "(", "cls", ",", "arr", ":", "\"xarray.Dataset\"", ")", "->", "\"Histogram1D\"", ":", "kwargs", "=", "{", "'frequencies'", ":", "arr", "[", "\"frequencies\"", "]", ",", "'binning'", ":", "arr", "[", "\"bins\"", "]", ",", "'errors2'", ":", "arr", "[", "\"errors2\"", "]", ",", "'overflow'", ":", "arr", ".", "attrs", "[", "\"overflow\"", "]", ",", "'underflow'", ":", "arr", ".", "attrs", "[", "\"underflow\"", "]", ",", "'keep_missed'", ":", "arr", ".", "attrs", "[", "\"keep_missed\"", "]", "}", "# TODO: Add stats", "return", "cls", "(", "*", "*", "kwargs", ")" ]
36.4
0.003571
def normalized_mean_square_error(output, target, name="normalized_mean_squared_error_loss"): """Return the TensorFlow expression of normalized mean-square-error of two distributions. Parameters ---------- output : Tensor 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. target : Tensor The target distribution, format the same with `output`. name : str An optional name to attach to this function. """ # with tf.name_scope("normalized_mean_squared_error_loss"): if output.get_shape().ndims == 2: # [batch_size, n_feature] nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1)) nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1)) elif output.get_shape().ndims == 3: # [batch_size, w, h] nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2])) nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2])) elif output.get_shape().ndims == 4: # [batch_size, w, h, c] nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2, 3])) nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2, 3])) nmse = tf.reduce_mean(nmse_a / nmse_b, name=name) return nmse
[ "def", "normalized_mean_square_error", "(", "output", ",", "target", ",", "name", "=", "\"normalized_mean_squared_error_loss\"", ")", ":", "# with tf.name_scope(\"normalized_mean_squared_error_loss\"):", "if", "output", ".", "get_shape", "(", ")", ".", "ndims", "==", "2", ":", "# [batch_size, n_feature]", "nmse_a", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "squared_difference", "(", "output", ",", "target", ")", ",", "axis", "=", "1", ")", ")", "nmse_b", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "target", ")", ",", "axis", "=", "1", ")", ")", "elif", "output", ".", "get_shape", "(", ")", ".", "ndims", "==", "3", ":", "# [batch_size, w, h]", "nmse_a", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "squared_difference", "(", "output", ",", "target", ")", ",", "axis", "=", "[", "1", ",", "2", "]", ")", ")", "nmse_b", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "target", ")", ",", "axis", "=", "[", "1", ",", "2", "]", ")", ")", "elif", "output", ".", "get_shape", "(", ")", ".", "ndims", "==", "4", ":", "# [batch_size, w, h, c]", "nmse_a", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "squared_difference", "(", "output", ",", "target", ")", ",", "axis", "=", "[", "1", ",", "2", ",", "3", "]", ")", ")", "nmse_b", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "target", ")", ",", "axis", "=", "[", "1", ",", "2", ",", "3", "]", ")", ")", "nmse", "=", "tf", ".", "reduce_mean", "(", "nmse_a", "/", "nmse_b", ",", "name", "=", "name", ")", "return", "nmse" ]
52.6
0.005228
def _increment_flaky_attribute(cls, test_item, flaky_attribute): """ Increments the value of an attribute on a flaky test. :param test_item: The test callable on which to set the attribute :type test_item: `callable` or :class:`nose.case.Test` or :class:`Function` :param flaky_attribute: The name of the attribute to set :type flaky_attribute: `unicode` """ cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
[ "def", "_increment_flaky_attribute", "(", "cls", ",", "test_item", ",", "flaky_attribute", ")", ":", "cls", ".", "_set_flaky_attribute", "(", "test_item", ",", "flaky_attribute", ",", "cls", ".", "_get_flaky_attribute", "(", "test_item", ",", "flaky_attribute", ")", "+", "1", ")" ]
40.642857
0.005155
def set_names(self, names): """ Change names of all columns in the frame. :param List[str] names: The list of new names for every column in the frame. """ assert_is_type(names, [str]) assert_satisfies(names, len(names) == self.ncol) self._ex = ExprNode("colnames=", self, range(self.ncol), names) # Update-in-place, but still lazy return self
[ "def", "set_names", "(", "self", ",", "names", ")", ":", "assert_is_type", "(", "names", ",", "[", "str", "]", ")", "assert_satisfies", "(", "names", ",", "len", "(", "names", ")", "==", "self", ".", "ncol", ")", "self", ".", "_ex", "=", "ExprNode", "(", "\"colnames=\"", ",", "self", ",", "range", "(", "self", ".", "ncol", ")", ",", "names", ")", "# Update-in-place, but still lazy", "return", "self" ]
39.9
0.009804
def get_cds_time(days, msecs): """Get the datetime object of the time since epoch given in days and milliseconds of day """ return datetime(1958, 1, 1) + timedelta(days=float(days), milliseconds=float(msecs))
[ "def", "get_cds_time", "(", "days", ",", "msecs", ")", ":", "return", "datetime", "(", "1958", ",", "1", ",", "1", ")", "+", "timedelta", "(", "days", "=", "float", "(", "days", ")", ",", "milliseconds", "=", "float", "(", "msecs", ")", ")" ]
43.833333
0.003731
def update_score_summary(sender, **kwargs): """ Listen for new Scores and update the relevant ScoreSummary. Args: sender: not used Kwargs: instance (Score): The score model whose save triggered this receiver. """ score = kwargs['instance'] try: score_summary = ScoreSummary.objects.get( student_item=score.student_item ) score_summary.latest = score # A score with the "reset" flag set will always replace the current highest score if score.reset: score_summary.highest = score # The conversion to a float may return None if points possible is zero # In Python, None is always less than an integer, so any score # with non-null points possible will take precedence. elif score.to_float() > score_summary.highest.to_float(): score_summary.highest = score score_summary.save() except ScoreSummary.DoesNotExist: ScoreSummary.objects.create( student_item=score.student_item, highest=score, latest=score, ) except DatabaseError as err: logger.exception( u"Error while updating score summary for student item {}" .format(score.student_item) )
[ "def", "update_score_summary", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "score", "=", "kwargs", "[", "'instance'", "]", "try", ":", "score_summary", "=", "ScoreSummary", ".", "objects", ".", "get", "(", "student_item", "=", "score", ".", "student_item", ")", "score_summary", ".", "latest", "=", "score", "# A score with the \"reset\" flag set will always replace the current highest score", "if", "score", ".", "reset", ":", "score_summary", ".", "highest", "=", "score", "# The conversion to a float may return None if points possible is zero", "# In Python, None is always less than an integer, so any score", "# with non-null points possible will take precedence.", "elif", "score", ".", "to_float", "(", ")", ">", "score_summary", ".", "highest", ".", "to_float", "(", ")", ":", "score_summary", ".", "highest", "=", "score", "score_summary", ".", "save", "(", ")", "except", "ScoreSummary", ".", "DoesNotExist", ":", "ScoreSummary", ".", "objects", ".", "create", "(", "student_item", "=", "score", ".", "student_item", ",", "highest", "=", "score", ",", "latest", "=", "score", ",", ")", "except", "DatabaseError", "as", "err", ":", "logger", ".", "exception", "(", "u\"Error while updating score summary for student item {}\"", ".", "format", "(", "score", ".", "student_item", ")", ")" ]
36.710526
0.003492
def scene_remove(frames): """parse a scene.rm message""" # "scene.rm" <scene_id> reader = MessageReader(frames) results = reader.string("command").uint32("scene_id").assert_end().get() if results.command != "scene.rm": raise MessageParserError("Command is not 'scene.rm'") return (results.scene_id,)
[ "def", "scene_remove", "(", "frames", ")", ":", "# \"scene.rm\" <scene_id>", "reader", "=", "MessageReader", "(", "frames", ")", "results", "=", "reader", ".", "string", "(", "\"command\"", ")", ".", "uint32", "(", "\"scene_id\"", ")", ".", "assert_end", "(", ")", ".", "get", "(", ")", "if", "results", ".", "command", "!=", "\"scene.rm\"", ":", "raise", "MessageParserError", "(", "\"Command is not 'scene.rm'\"", ")", "return", "(", "results", ".", "scene_id", ",", ")" ]
44
0.008357
def check_edge(self, name1, name2): ''' API: check_edge(self, name1, name2) Description: Return True if edge exists, False otherwise. Input: name1: name of the source node. name2: name of the sink node. Return: Returns True if edge exists, False otherwise. ''' if self.graph_type is DIRECTED_GRAPH: return (name1, name2) in self.edge_attr else: return ((name1, name2) in self.edge_attr or (name2, name1) in self.edge_attr)
[ "def", "check_edge", "(", "self", ",", "name1", ",", "name2", ")", ":", "if", "self", ".", "graph_type", "is", "DIRECTED_GRAPH", ":", "return", "(", "name1", ",", "name2", ")", "in", "self", ".", "edge_attr", "else", ":", "return", "(", "(", "name1", ",", "name2", ")", "in", "self", ".", "edge_attr", "or", "(", "name2", ",", "name1", ")", "in", "self", ".", "edge_attr", ")" ]
34.9375
0.003484
def plot_txn_time_hist(transactions, bin_minutes=5, tz='America/New_York', ax=None, **kwargs): """ Plots a histogram of transaction times, binning the times into buckets of a given duration. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet. bin_minutes : float, optional Sizes of the bins in minutes, defaults to 5 minutes. tz : str, optional Time zone to plot against. Note that if the specified zone does not apply daylight savings, the distribution may be partially offset. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() txn_time = transactions.copy() txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz)) txn_time.index = txn_time.index.map(lambda x: x.hour * 60 + x.minute) txn_time['trade_value'] = (txn_time.amount * txn_time.price).abs() txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961)) txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutes txn_time = txn_time.groupby(level=0).sum() txn_time['time_str'] = txn_time.index.map(lambda x: str(datetime.time(int(x / 60), x % 60))[:-3]) trade_value_sum = txn_time.trade_value.sum() txn_time.trade_value = txn_time.trade_value.fillna(0) / trade_value_sum ax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs) ax.set_xlim(570, 960) ax.set_xticks(txn_time.index[::int(30 / bin_minutes)]) ax.set_xticklabels(txn_time.time_str[::int(30 / bin_minutes)]) ax.set_title('Transaction time distribution') ax.set_ylabel('Proportion') ax.set_xlabel('') return ax
[ "def", "plot_txn_time_hist", "(", "transactions", ",", "bin_minutes", "=", "5", ",", "tz", "=", "'America/New_York'", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "txn_time", "=", "transactions", ".", "copy", "(", ")", "txn_time", ".", "index", "=", "txn_time", ".", "index", ".", "tz_convert", "(", "pytz", ".", "timezone", "(", "tz", ")", ")", "txn_time", ".", "index", "=", "txn_time", ".", "index", ".", "map", "(", "lambda", "x", ":", "x", ".", "hour", "*", "60", "+", "x", ".", "minute", ")", "txn_time", "[", "'trade_value'", "]", "=", "(", "txn_time", ".", "amount", "*", "txn_time", ".", "price", ")", ".", "abs", "(", ")", "txn_time", "=", "txn_time", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", ".", "reindex", "(", "index", "=", "range", "(", "570", ",", "961", ")", ")", "txn_time", ".", "index", "=", "(", "txn_time", ".", "index", "/", "bin_minutes", ")", ".", "astype", "(", "int", ")", "*", "bin_minutes", "txn_time", "=", "txn_time", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", "txn_time", "[", "'time_str'", "]", "=", "txn_time", ".", "index", ".", "map", "(", "lambda", "x", ":", "str", "(", "datetime", ".", "time", "(", "int", "(", "x", "/", "60", ")", ",", "x", "%", "60", ")", ")", "[", ":", "-", "3", "]", ")", "trade_value_sum", "=", "txn_time", ".", "trade_value", ".", "sum", "(", ")", "txn_time", ".", "trade_value", "=", "txn_time", ".", "trade_value", ".", "fillna", "(", "0", ")", "/", "trade_value_sum", "ax", ".", "bar", "(", "txn_time", ".", "index", ",", "txn_time", ".", "trade_value", ",", "width", "=", "bin_minutes", ",", "*", "*", "kwargs", ")", "ax", ".", "set_xlim", "(", "570", ",", "960", ")", "ax", ".", "set_xticks", "(", "txn_time", ".", "index", "[", ":", ":", "int", "(", "30", "/", "bin_minutes", ")", "]", ")", "ax", ".", "set_xticklabels", "(", "txn_time", ".", "time_str", "[", ":", ":", "int", "(", "30", "/", "bin_minutes", ")", "]", ")", "ax", ".", "set_title", "(", "'Transaction time distribution'", ")", "ax", ".", "set_ylabel", "(", "'Proportion'", ")", "ax", ".", "set_xlabel", "(", "''", ")", "return", "ax" ]
36.571429
0.000476
def _wrap(self, line: str) -> str: """ Returns an import wrapped to the specified line-length, if possible. """ wrap_mode = self.config['multi_line_output'] if len(line) > self.config['line_length'] and wrap_mode != WrapModes.NOQA: line_without_comment = line comment = None if '#' in line: line_without_comment, comment = line.split('#', 1) for splitter in ("import ", ".", "as "): exp = r"\b" + re.escape(splitter) + r"\b" if re.search(exp, line_without_comment) and not line_without_comment.strip().startswith(splitter): line_parts = re.split(exp, line_without_comment) if comment: line_parts[-1] = '{0}#{1}'.format(line_parts[-1], comment) next_line = [] while (len(line) + 2) > (self.config['wrap_length'] or self.config['line_length']) and line_parts: next_line.append(line_parts.pop()) line = splitter.join(line_parts) if not line: line = next_line.pop() cont_line = self._wrap(self.config['indent'] + splitter.join(next_line).lstrip()) if self.config['use_parentheses']: output = "{0}{1}({2}{3}{4}{5})".format( line, splitter, self.line_separator, cont_line, "," if self.config['include_trailing_comma'] else "", self.line_separator if wrap_mode in {WrapModes.VERTICAL_HANGING_INDENT, WrapModes.VERTICAL_GRID_GROUPED} else "") lines = output.split(self.line_separator) if self.config['comment_prefix'] in lines[-1] and lines[-1].endswith(')'): line, comment = lines[-1].split(self.config['comment_prefix'], 1) lines[-1] = line + ')' + self.config['comment_prefix'] + comment[:-1] return self.line_separator.join(lines) return "{0}{1}\\{2}{3}".format(line, splitter, self.line_separator, cont_line) elif len(line) > self.config['line_length'] and wrap_mode == settings.WrapModes.NOQA: if "# NOQA" not in line: return "{0}{1} NOQA".format(line, self.config['comment_prefix']) return line
[ "def", "_wrap", "(", "self", ",", "line", ":", "str", ")", "->", "str", ":", "wrap_mode", "=", "self", ".", "config", "[", "'multi_line_output'", "]", "if", "len", "(", "line", ")", ">", "self", ".", "config", "[", "'line_length'", "]", "and", "wrap_mode", "!=", "WrapModes", ".", "NOQA", ":", "line_without_comment", "=", "line", "comment", "=", "None", "if", "'#'", "in", "line", ":", "line_without_comment", ",", "comment", "=", "line", ".", "split", "(", "'#'", ",", "1", ")", "for", "splitter", "in", "(", "\"import \"", ",", "\".\"", ",", "\"as \"", ")", ":", "exp", "=", "r\"\\b\"", "+", "re", ".", "escape", "(", "splitter", ")", "+", "r\"\\b\"", "if", "re", ".", "search", "(", "exp", ",", "line_without_comment", ")", "and", "not", "line_without_comment", ".", "strip", "(", ")", ".", "startswith", "(", "splitter", ")", ":", "line_parts", "=", "re", ".", "split", "(", "exp", ",", "line_without_comment", ")", "if", "comment", ":", "line_parts", "[", "-", "1", "]", "=", "'{0}#{1}'", ".", "format", "(", "line_parts", "[", "-", "1", "]", ",", "comment", ")", "next_line", "=", "[", "]", "while", "(", "len", "(", "line", ")", "+", "2", ")", ">", "(", "self", ".", "config", "[", "'wrap_length'", "]", "or", "self", ".", "config", "[", "'line_length'", "]", ")", "and", "line_parts", ":", "next_line", ".", "append", "(", "line_parts", ".", "pop", "(", ")", ")", "line", "=", "splitter", ".", "join", "(", "line_parts", ")", "if", "not", "line", ":", "line", "=", "next_line", ".", "pop", "(", ")", "cont_line", "=", "self", ".", "_wrap", "(", "self", ".", "config", "[", "'indent'", "]", "+", "splitter", ".", "join", "(", "next_line", ")", ".", "lstrip", "(", ")", ")", "if", "self", ".", "config", "[", "'use_parentheses'", "]", ":", "output", "=", "\"{0}{1}({2}{3}{4}{5})\"", ".", "format", "(", "line", ",", "splitter", ",", "self", ".", "line_separator", ",", "cont_line", ",", "\",\"", "if", "self", ".", "config", "[", "'include_trailing_comma'", "]", "else", "\"\"", ",", "self", ".", "line_separator", "if", "wrap_mode", "in", "{", "WrapModes", ".", "VERTICAL_HANGING_INDENT", ",", "WrapModes", ".", "VERTICAL_GRID_GROUPED", "}", "else", "\"\"", ")", "lines", "=", "output", ".", "split", "(", "self", ".", "line_separator", ")", "if", "self", ".", "config", "[", "'comment_prefix'", "]", "in", "lines", "[", "-", "1", "]", "and", "lines", "[", "-", "1", "]", ".", "endswith", "(", "')'", ")", ":", "line", ",", "comment", "=", "lines", "[", "-", "1", "]", ".", "split", "(", "self", ".", "config", "[", "'comment_prefix'", "]", ",", "1", ")", "lines", "[", "-", "1", "]", "=", "line", "+", "')'", "+", "self", ".", "config", "[", "'comment_prefix'", "]", "+", "comment", "[", ":", "-", "1", "]", "return", "self", ".", "line_separator", ".", "join", "(", "lines", ")", "return", "\"{0}{1}\\\\{2}{3}\"", ".", "format", "(", "line", ",", "splitter", ",", "self", ".", "line_separator", ",", "cont_line", ")", "elif", "len", "(", "line", ")", ">", "self", ".", "config", "[", "'line_length'", "]", "and", "wrap_mode", "==", "settings", ".", "WrapModes", ".", "NOQA", ":", "if", "\"# NOQA\"", "not", "in", "line", ":", "return", "\"{0}{1} NOQA\"", ".", "format", "(", "line", ",", "self", ".", "config", "[", "'comment_prefix'", "]", ")", "return", "line" ]
60.071429
0.00663
def discard(self, key): """Remove a item from its member if it is a member. Usage:: >>> s = OrderedSet([1, 2, 3]) >>> s.discard(2) >>> s OrderedSet([1, 3]) **中文文档** 从有序集合中删除一个元素, 同时保持集合依然有序。 """ if key in self.map: key, prev, next_item = self.map.pop(key) prev[2] = next_item next_item[1] = prev
[ "def", "discard", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "map", ":", "key", ",", "prev", ",", "next_item", "=", "self", ".", "map", ".", "pop", "(", "key", ")", "prev", "[", "2", "]", "=", "next_item", "next_item", "[", "1", "]", "=", "prev" ]
25
0.012848
def connect(self, f, t): """Connect two existing vertices. Nothing happens if the vertices are already connected. """ if t not in self._vertices: raise KeyError(t) self._forwards[f].add(t) self._backwards[t].add(f)
[ "def", "connect", "(", "self", ",", "f", ",", "t", ")", ":", "if", "t", "not", "in", "self", ".", "_vertices", ":", "raise", "KeyError", "(", "t", ")", "self", ".", "_forwards", "[", "f", "]", ".", "add", "(", "t", ")", "self", ".", "_backwards", "[", "t", "]", ".", "add", "(", "f", ")" ]
29.666667
0.007273
def predict(self, test_data, custom_metric = None, custom_metric_func = None): """ Predict on a dataset. :param H2OFrame test_data: Data on which to make predictions. :param custom_metric: custom evaluation function defined as class reference, the class get uploaded into cluster :param custom_metric_func: custom evaluation function reference, e.g, result of upload_custom_metric :returns: A new H2OFrame of predictions. """ # Upload evaluation function into DKV if custom_metric: assert_satisfies(custom_metric_func, custom_metric_func is None, "The argument 'eval_func_ref' cannot be specified when eval_func is specified, ") eval_func_ref = h2o.upload_custom_metric(custom_metric) if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame") j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id), data = {'custom_metric_func': custom_metric_func}), self._model_json["algo"] + " prediction") j.poll() return h2o.get_frame(j.dest_key)
[ "def", "predict", "(", "self", ",", "test_data", ",", "custom_metric", "=", "None", ",", "custom_metric_func", "=", "None", ")", ":", "# Upload evaluation function into DKV", "if", "custom_metric", ":", "assert_satisfies", "(", "custom_metric_func", ",", "custom_metric_func", "is", "None", ",", "\"The argument 'eval_func_ref' cannot be specified when eval_func is specified, \"", ")", "eval_func_ref", "=", "h2o", ".", "upload_custom_metric", "(", "custom_metric", ")", "if", "not", "isinstance", "(", "test_data", ",", "h2o", ".", "H2OFrame", ")", ":", "raise", "ValueError", "(", "\"test_data must be an instance of H2OFrame\"", ")", "j", "=", "H2OJob", "(", "h2o", ".", "api", "(", "\"POST /4/Predictions/models/%s/frames/%s\"", "%", "(", "self", ".", "model_id", ",", "test_data", ".", "frame_id", ")", ",", "data", "=", "{", "'custom_metric_func'", ":", "custom_metric_func", "}", ")", ",", "self", ".", "_model_json", "[", "\"algo\"", "]", "+", "\" prediction\"", ")", "j", ".", "poll", "(", ")", "return", "h2o", ".", "get_frame", "(", "j", ".", "dest_key", ")" ]
56.809524
0.011542
def formfield_for_manytomany(self, db_field, request, **kwargs): """ Filter the disposable authors. """ if db_field.name == 'authors': kwargs['queryset'] = Author.objects.filter( Q(is_staff=True) | Q(entries__isnull=False) ).distinct() return super(EntryAdmin, self).formfield_for_manytomany( db_field, request, **kwargs)
[ "def", "formfield_for_manytomany", "(", "self", ",", "db_field", ",", "request", ",", "*", "*", "kwargs", ")", ":", "if", "db_field", ".", "name", "==", "'authors'", ":", "kwargs", "[", "'queryset'", "]", "=", "Author", ".", "objects", ".", "filter", "(", "Q", "(", "is_staff", "=", "True", ")", "|", "Q", "(", "entries__isnull", "=", "False", ")", ")", ".", "distinct", "(", ")", "return", "super", "(", "EntryAdmin", ",", "self", ")", ".", "formfield_for_manytomany", "(", "db_field", ",", "request", ",", "*", "*", "kwargs", ")" ]
37.090909
0.004785
def patch(*module_names): """apply monkey-patches to stdlib modules in-place imports the relevant modules and simply overwrites attributes on the module objects themselves. those attributes may be functions, classes or other attributes. valid arguments are: - __builtin__ - Queue - fcntl - os - select - signal - socket - ssl - sys - thread - threading - time - zmq with no arguments, patches everything it can in all of the above modules :raises: ``ValueError`` if an unknown module name is provided .. note:: lots more standard library modules can be made non-blocking by virtue of patching some combination of the the above (because they only block by using blocking functions defined elsewhere). a few examples: - ``subprocess`` works cooperatively with ``os`` and ``select`` patched - ``httplib``, ``urllib`` and ``urllib2`` will all operate cooperatively with ``socket`` and ``ssl`` patched """ if not module_names: module_names = _patchers.keys() log.info("monkey-patching in-place (%d modules)" % len(module_names)) for module_name in module_names: if module_name not in _patchers: raise ValueError("'%s' is not greenhouse-patchable" % module_name) for module_name in module_names: if module_name in sys.modules: module = sys.modules[module_name] else: module = __import__( module_name, {}, {}, module_name.rsplit(".", 1)[0]) for attr, patch in _patchers[module_name].items(): setattr(module, attr, patch)
[ "def", "patch", "(", "*", "module_names", ")", ":", "if", "not", "module_names", ":", "module_names", "=", "_patchers", ".", "keys", "(", ")", "log", ".", "info", "(", "\"monkey-patching in-place (%d modules)\"", "%", "len", "(", "module_names", ")", ")", "for", "module_name", "in", "module_names", ":", "if", "module_name", "not", "in", "_patchers", ":", "raise", "ValueError", "(", "\"'%s' is not greenhouse-patchable\"", "%", "module_name", ")", "for", "module_name", "in", "module_names", ":", "if", "module_name", "in", "sys", ".", "modules", ":", "module", "=", "sys", ".", "modules", "[", "module_name", "]", "else", ":", "module", "=", "__import__", "(", "module_name", ",", "{", "}", ",", "{", "}", ",", "module_name", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", ")", "for", "attr", ",", "patch", "in", "_patchers", "[", "module_name", "]", ".", "items", "(", ")", ":", "setattr", "(", "module", ",", "attr", ",", "patch", ")" ]
30.735849
0.000595
def istextfile(fname, blocksize=512): """ Uses heuristics to guess whether the given file is text or binary, by reading a single block of bytes from the file. If more than 30% of the chars in the block are non-text, or there are NUL ('\x00') bytes in the block, assume this is a binary file. """ with open(fname, "rb") as fobj: block = fobj.read(blocksize) if not block: # An empty file is considered a valid text file return True if b"\x00" in block: # Files with null bytes are binary return False # Use translate's 'deletechars' argument to efficiently remove all # occurrences of TEXT_CHARS from the block nontext = block.translate(None, TEXT_CHARS) return float(len(nontext)) / len(block) <= 0.30
[ "def", "istextfile", "(", "fname", ",", "blocksize", "=", "512", ")", ":", "with", "open", "(", "fname", ",", "\"rb\"", ")", "as", "fobj", ":", "block", "=", "fobj", ".", "read", "(", "blocksize", ")", "if", "not", "block", ":", "# An empty file is considered a valid text file", "return", "True", "if", "b\"\\x00\"", "in", "block", ":", "# Files with null bytes are binary", "return", "False", "# Use translate's 'deletechars' argument to efficiently remove all", "# occurrences of TEXT_CHARS from the block", "nontext", "=", "block", ".", "translate", "(", "None", ",", "TEXT_CHARS", ")", "return", "float", "(", "len", "(", "nontext", ")", ")", "/", "len", "(", "block", ")", "<=", "0.30" ]
37.333333
0.001244
def command_help_long(self): """ Return command help for use in global parser usage string @TODO update to support self.current_indent from formatter """ indent = " " * 2 # replace with current_indent help = "Command must be one of:\n" for action_name in self.parser.valid_commands: help += "%s%-10s %-70s\n" % (indent, action_name, self.parser.commands[action_name].desc_short.capitalize()) help += '\nSee \'%s help COMMAND\' for help and information on a command' % self.parser.prog return help
[ "def", "command_help_long", "(", "self", ")", ":", "indent", "=", "\" \"", "*", "2", "# replace with current_indent", "help", "=", "\"Command must be one of:\\n\"", "for", "action_name", "in", "self", ".", "parser", ".", "valid_commands", ":", "help", "+=", "\"%s%-10s %-70s\\n\"", "%", "(", "indent", ",", "action_name", ",", "self", ".", "parser", ".", "commands", "[", "action_name", "]", ".", "desc_short", ".", "capitalize", "(", ")", ")", "help", "+=", "'\\nSee \\'%s help COMMAND\\' for help and information on a command'", "%", "self", ".", "parser", ".", "prog", "return", "help" ]
44.833333
0.010929
def lint_fileset(*dirnames, **kwargs): """Lints a group of files using a given rcfile. Keyword arguments are * ``rc_filename`` (``str``): The name of the Pylint config RC file. * ``description`` (``str``): A description of the files and configuration currently being run. Args: dirnames (tuple): Directories to run Pylint in. kwargs: The keyword arguments. The only keyword arguments are ``rc_filename`` and ``description`` and both are required. Raises: KeyError: If the wrong keyword arguments are used. """ try: rc_filename = kwargs['rc_filename'] description = kwargs['description'] if len(kwargs) != 2: raise KeyError except KeyError: raise KeyError(_LINT_FILESET_MSG) pylint_shell_command = ['pylint', '--rcfile', rc_filename] pylint_shell_command.extend(dirnames) status_code = subprocess.call(pylint_shell_command) if status_code != 0: error_message = _ERROR_TEMPLATE.format(description, status_code) print(error_message, file=sys.stderr) sys.exit(status_code)
[ "def", "lint_fileset", "(", "*", "dirnames", ",", "*", "*", "kwargs", ")", ":", "try", ":", "rc_filename", "=", "kwargs", "[", "'rc_filename'", "]", "description", "=", "kwargs", "[", "'description'", "]", "if", "len", "(", "kwargs", ")", "!=", "2", ":", "raise", "KeyError", "except", "KeyError", ":", "raise", "KeyError", "(", "_LINT_FILESET_MSG", ")", "pylint_shell_command", "=", "[", "'pylint'", ",", "'--rcfile'", ",", "rc_filename", "]", "pylint_shell_command", ".", "extend", "(", "dirnames", ")", "status_code", "=", "subprocess", ".", "call", "(", "pylint_shell_command", ")", "if", "status_code", "!=", "0", ":", "error_message", "=", "_ERROR_TEMPLATE", ".", "format", "(", "description", ",", "status_code", ")", "print", "(", "error_message", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "status_code", ")" ]
34.606061
0.000852
def getAsWkt(self, session): """ Retrieve the geometry in Well Known Text format. This method is a veneer for an SQL query that calls the ``ST_AsText()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: Well Known Text string representation of geometry. """ statement = """ SELECT ST_AsText({0}) AS wkt FROM {1} WHERE id={2}; """.format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.wkt
[ "def", "getAsWkt", "(", "self", ",", "session", ")", ":", "statement", "=", "\"\"\"\n SELECT ST_AsText({0}) AS wkt\n FROM {1}\n WHERE id={2};\n \"\"\"", ".", "format", "(", "self", ".", "geometryColumnName", ",", "self", ".", "tableName", ",", "self", ".", "id", ")", "result", "=", "session", ".", "execute", "(", "statement", ")", "for", "row", "in", "result", ":", "return", "row", ".", "wkt" ]
33.416667
0.004848
def clone(self, choices): """ Make a copy of this parameter, supply different choices. @param choices: A sequence of L{Option} instances. @type choices: C{list} @rtype: L{ChoiceParameter} """ return self.__class__( self.name, choices, self.label, self.description, self.multiple, self.viewFactory)
[ "def", "clone", "(", "self", ",", "choices", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "name", ",", "choices", ",", "self", ".", "label", ",", "self", ".", "description", ",", "self", ".", "multiple", ",", "self", ".", "viewFactory", ")" ]
25.75
0.004684
def kernel_matrix_xX(svm_model, original_x, original_X): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel() ''' K = np.zeros((svm_model.data_num, svm_model.data_num)) for i in range(svm_model.data_num): for j in range(svm_model.data_num): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j]) elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j]) ''' return K
[ "def", "kernel_matrix_xX", "(", "svm_model", ",", "original_x", ",", "original_X", ")", ":", "if", "(", "svm_model", ".", "svm_kernel", "==", "'polynomial_kernel'", "or", "svm_model", ".", "svm_kernel", "==", "'soft_polynomial_kernel'", ")", ":", "K", "=", "(", "svm_model", ".", "zeta", "+", "svm_model", ".", "gamma", "*", "np", ".", "dot", "(", "original_x", ",", "original_X", ".", "T", ")", ")", "**", "svm_model", ".", "Q", "elif", "(", "svm_model", ".", "svm_kernel", "==", "'gaussian_kernel'", "or", "svm_model", ".", "svm_kernel", "==", "'soft_gaussian_kernel'", ")", ":", "K", "=", "np", ".", "exp", "(", "-", "svm_model", ".", "gamma", "*", "(", "cdist", "(", "original_X", ",", "np", ".", "atleast_2d", "(", "original_x", ")", ",", "'euclidean'", ")", ".", "T", "**", "2", ")", ")", ".", "ravel", "(", ")", "return", "K" ]
57.526316
0.009001
def long_banner(self): """Banner for IPython widgets with pylab message""" # Default banner try: from IPython.core.usage import quick_guide except Exception: quick_guide = '' banner_parts = [ 'Python %s\n' % self.interpreter_versions['python_version'], 'Type "copyright", "credits" or "license" for more information.\n\n', 'IPython %s -- An enhanced Interactive Python.\n' % \ self.interpreter_versions['ipython_version'], quick_guide ] banner = ''.join(banner_parts) # Pylab additions pylab_o = self.additional_options['pylab'] autoload_pylab_o = self.additional_options['autoload_pylab'] mpl_installed = programs.is_module_installed('matplotlib') if mpl_installed and (pylab_o and autoload_pylab_o): pylab_message = ("\nPopulating the interactive namespace from " "numpy and matplotlib\n") banner = banner + pylab_message # Sympy additions sympy_o = self.additional_options['sympy'] if sympy_o: lines = """ These commands were executed: >>> from __future__ import division >>> from sympy import * >>> x, y, z, t = symbols('x y z t') >>> k, m, n = symbols('k m n', integer=True) >>> f, g, h = symbols('f g h', cls=Function) """ banner = banner + lines if (pylab_o and sympy_o): lines = """ Warning: pylab (numpy and matplotlib) and symbolic math (sympy) are both enabled at the same time. Some pylab functions are going to be overrided by the sympy module (e.g. plot) """ banner = banner + lines return banner
[ "def", "long_banner", "(", "self", ")", ":", "# Default banner", "try", ":", "from", "IPython", ".", "core", ".", "usage", "import", "quick_guide", "except", "Exception", ":", "quick_guide", "=", "''", "banner_parts", "=", "[", "'Python %s\\n'", "%", "self", ".", "interpreter_versions", "[", "'python_version'", "]", ",", "'Type \"copyright\", \"credits\" or \"license\" for more information.\\n\\n'", ",", "'IPython %s -- An enhanced Interactive Python.\\n'", "%", "self", ".", "interpreter_versions", "[", "'ipython_version'", "]", ",", "quick_guide", "]", "banner", "=", "''", ".", "join", "(", "banner_parts", ")", "# Pylab additions", "pylab_o", "=", "self", ".", "additional_options", "[", "'pylab'", "]", "autoload_pylab_o", "=", "self", ".", "additional_options", "[", "'autoload_pylab'", "]", "mpl_installed", "=", "programs", ".", "is_module_installed", "(", "'matplotlib'", ")", "if", "mpl_installed", "and", "(", "pylab_o", "and", "autoload_pylab_o", ")", ":", "pylab_message", "=", "(", "\"\\nPopulating the interactive namespace from \"", "\"numpy and matplotlib\\n\"", ")", "banner", "=", "banner", "+", "pylab_message", "# Sympy additions", "sympy_o", "=", "self", ".", "additional_options", "[", "'sympy'", "]", "if", "sympy_o", ":", "lines", "=", "\"\"\"\nThese commands were executed:\n>>> from __future__ import division\n>>> from sympy import *\n>>> x, y, z, t = symbols('x y z t')\n>>> k, m, n = symbols('k m n', integer=True)\n>>> f, g, h = symbols('f g h', cls=Function)\n\"\"\"", "banner", "=", "banner", "+", "lines", "if", "(", "pylab_o", "and", "sympy_o", ")", ":", "lines", "=", "\"\"\"\nWarning: pylab (numpy and matplotlib) and symbolic math (sympy) are both \nenabled at the same time. Some pylab functions are going to be overrided by \nthe sympy module (e.g. plot)\n\"\"\"", "banner", "=", "banner", "+", "lines", "return", "banner" ]
37.577778
0.003458
def add_scalebar(ax, matchx=True, matchy=True, hidex=True, hidey=True, unitsx='', unitsy='', scalex=1, scaley=1, **kwargs): """ Add scalebars to axes Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes - ax : the axis to attach ticks to - matchx,matchy : if True, set size of scale bars to spacing between ticks if False, size should be set using sizex and sizey params - hidex,hidey : if True, hide x-axis and y-axis of parent - **kwargs : additional arguments passed to AnchoredScaleBars Returns created scalebar object """ def f(axis): l = axis.get_majorticklocs() return len(l)>1 and (l[1] - l[0]) if matchx: kwargs['sizex'] = f(ax.xaxis) if matchy: kwargs['sizey'] = f(ax.yaxis) if 'labelx' not in kwargs or kwargs['labelx'] is None: kwargs['labelx'] = '%.3g %s'%(kwargs['sizex']*scalex,unitsx) if 'labely' not in kwargs or kwargs['labely'] is None: kwargs['labely'] = '%.3g %s'%(kwargs['sizey']*scaley,unitsy) sb = AnchoredScaleBar(ax.transData, **kwargs) ax.add_artist(sb) if hidex : ax.xaxis.set_visible(False) if hidey : ax.yaxis.set_visible(False) if hidex and hidey: ax.set_frame_on(False) return sb
[ "def", "add_scalebar", "(", "ax", ",", "matchx", "=", "True", ",", "matchy", "=", "True", ",", "hidex", "=", "True", ",", "hidey", "=", "True", ",", "unitsx", "=", "''", ",", "unitsy", "=", "''", ",", "scalex", "=", "1", ",", "scaley", "=", "1", ",", "*", "*", "kwargs", ")", ":", "def", "f", "(", "axis", ")", ":", "l", "=", "axis", ".", "get_majorticklocs", "(", ")", "return", "len", "(", "l", ")", ">", "1", "and", "(", "l", "[", "1", "]", "-", "l", "[", "0", "]", ")", "if", "matchx", ":", "kwargs", "[", "'sizex'", "]", "=", "f", "(", "ax", ".", "xaxis", ")", "if", "matchy", ":", "kwargs", "[", "'sizey'", "]", "=", "f", "(", "ax", ".", "yaxis", ")", "if", "'labelx'", "not", "in", "kwargs", "or", "kwargs", "[", "'labelx'", "]", "is", "None", ":", "kwargs", "[", "'labelx'", "]", "=", "'%.3g %s'", "%", "(", "kwargs", "[", "'sizex'", "]", "*", "scalex", ",", "unitsx", ")", "if", "'labely'", "not", "in", "kwargs", "or", "kwargs", "[", "'labely'", "]", "is", "None", ":", "kwargs", "[", "'labely'", "]", "=", "'%.3g %s'", "%", "(", "kwargs", "[", "'sizey'", "]", "*", "scaley", ",", "unitsy", ")", "sb", "=", "AnchoredScaleBar", "(", "ax", ".", "transData", ",", "*", "*", "kwargs", ")", "ax", ".", "add_artist", "(", "sb", ")", "if", "hidex", ":", "ax", ".", "xaxis", ".", "set_visible", "(", "False", ")", "if", "hidey", ":", "ax", ".", "yaxis", ".", "set_visible", "(", "False", ")", "if", "hidex", "and", "hidey", ":", "ax", ".", "set_frame_on", "(", "False", ")", "return", "sb" ]
41.645161
0.013626
def get_option_type(self, key, subkey): """Get the type of a particular option. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. :return: :class:`str` - description of the type. :raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option. """ key, subkey = _lower_keys(key, subkey) _entry_must_exist(self.gc, key, subkey) return self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)]["type"].values[0]
[ "def", "get_option_type", "(", "self", ",", "key", ",", "subkey", ")", ":", "key", ",", "subkey", "=", "_lower_keys", "(", "key", ",", "subkey", ")", "_entry_must_exist", "(", "self", ".", "gc", ",", "key", ",", "subkey", ")", "return", "self", ".", "gc", "[", "(", "self", ".", "gc", "[", "\"k1\"", "]", "==", "key", ")", "&", "(", "self", ".", "gc", "[", "\"k2\"", "]", "==", "subkey", ")", "]", "[", "\"type\"", "]", ".", "values", "[", "0", "]" ]
34.529412
0.003317
def convert(self, value, param, ctx): """ Convert value to int. """ self.gandi = ctx.obj value = click.Choice.convert(self, value, param, ctx) return int(value)
[ "def", "convert", "(", "self", ",", "value", ",", "param", ",", "ctx", ")", ":", "self", ".", "gandi", "=", "ctx", ".", "obj", "value", "=", "click", ".", "Choice", ".", "convert", "(", "self", ",", "value", ",", "param", ",", "ctx", ")", "return", "int", "(", "value", ")" ]
37.6
0.010417
def derivative_factory(name): """Create derivative function for some ufuncs.""" if name == 'sin': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(cos(self.domain)(point)) elif name == 'cos': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(-sin(self.domain)(point)) elif name == 'tan': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(1 + self(point) ** 2) elif name == 'sqrt': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(0.5 / self(point)) elif name == 'square': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(2.0 * point) elif name == 'log': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(1.0 / point) elif name == 'exp': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(self(point)) elif name == 'reciprocal': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(-self(point) ** 2) elif name == 'sinh': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(cosh(self.domain)(point)) elif name == 'cosh': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(sinh(self.domain)(point)) else: # Fallback to default derivative = Operator.derivative return derivative
[ "def", "derivative_factory", "(", "name", ")", ":", "if", "name", "==", "'sin'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "return", "MultiplyOperator", "(", "cos", "(", "self", ".", "domain", ")", "(", "point", ")", ")", "elif", "name", "==", "'cos'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "point", "=", "self", ".", "domain", ".", "element", "(", "point", ")", "return", "MultiplyOperator", "(", "-", "sin", "(", "self", ".", "domain", ")", "(", "point", ")", ")", "elif", "name", "==", "'tan'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "return", "MultiplyOperator", "(", "1", "+", "self", "(", "point", ")", "**", "2", ")", "elif", "name", "==", "'sqrt'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "return", "MultiplyOperator", "(", "0.5", "/", "self", "(", "point", ")", ")", "elif", "name", "==", "'square'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "point", "=", "self", ".", "domain", ".", "element", "(", "point", ")", "return", "MultiplyOperator", "(", "2.0", "*", "point", ")", "elif", "name", "==", "'log'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "point", "=", "self", ".", "domain", ".", "element", "(", "point", ")", "return", "MultiplyOperator", "(", "1.0", "/", "point", ")", "elif", "name", "==", "'exp'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "return", "MultiplyOperator", "(", "self", "(", "point", ")", ")", "elif", "name", "==", "'reciprocal'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "point", "=", "self", ".", "domain", ".", "element", "(", "point", ")", "return", "MultiplyOperator", "(", "-", "self", "(", "point", ")", "**", "2", ")", "elif", "name", "==", "'sinh'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "point", "=", "self", ".", "domain", ".", "element", "(", "point", ")", "return", "MultiplyOperator", "(", "cosh", "(", "self", ".", "domain", ")", "(", "point", ")", ")", "elif", "name", "==", "'cosh'", ":", "def", "derivative", "(", "self", ",", "point", ")", ":", "\"\"\"Return the derivative operator.\"\"\"", "return", "MultiplyOperator", "(", "sinh", "(", "self", ".", "domain", ")", "(", "point", ")", ")", "else", ":", "# Fallback to default", "derivative", "=", "Operator", ".", "derivative", "return", "derivative" ]
38.754717
0.000475
def _ensure_annotations(dashboard): '''Explode annotation_tags into annotations.''' if 'annotation_tags' not in dashboard: return tags = dashboard['annotation_tags'] annotations = { 'enable': True, 'list': [], } for tag in tags: annotations['list'].append({ 'datasource': "graphite", 'enable': False, 'iconColor': "#C0C6BE", 'iconSize': 13, 'lineColor': "rgba(255, 96, 96, 0.592157)", 'name': tag, 'showLine': True, 'tags': tag, }) del dashboard['annotation_tags'] dashboard['annotations'] = annotations
[ "def", "_ensure_annotations", "(", "dashboard", ")", ":", "if", "'annotation_tags'", "not", "in", "dashboard", ":", "return", "tags", "=", "dashboard", "[", "'annotation_tags'", "]", "annotations", "=", "{", "'enable'", ":", "True", ",", "'list'", ":", "[", "]", ",", "}", "for", "tag", "in", "tags", ":", "annotations", "[", "'list'", "]", ".", "append", "(", "{", "'datasource'", ":", "\"graphite\"", ",", "'enable'", ":", "False", ",", "'iconColor'", ":", "\"#C0C6BE\"", ",", "'iconSize'", ":", "13", ",", "'lineColor'", ":", "\"rgba(255, 96, 96, 0.592157)\"", ",", "'name'", ":", "tag", ",", "'showLine'", ":", "True", ",", "'tags'", ":", "tag", ",", "}", ")", "del", "dashboard", "[", "'annotation_tags'", "]", "dashboard", "[", "'annotations'", "]", "=", "annotations" ]
29.545455
0.00149
def find_file_match(folder_path, regex=''): """ Returns absolute paths of files that match the regex within folder_path and all its children folders. Note: The regex matching is done using the match function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([os.path.join(root, f) for f in files if re.match(regex, f)]) return outlist
[ "def", "find_file_match", "(", "folder_path", ",", "regex", "=", "''", ")", ":", "outlist", "=", "[", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder_path", ")", ":", "outlist", ".", "extend", "(", "[", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "for", "f", "in", "files", "if", "re", ".", "match", "(", "regex", ",", "f", ")", "]", ")", "return", "outlist" ]
22
0.001742
def split_grads_by_size(threshold_size, device_grads): """Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices. The inner list is over individual gradients. Returns: small_grads: Subset of device_grads where shape is <= theshold_size elements. large_grads: Subset of device_grads where shape is > threshold_size elements. """ small_grads = [] large_grads = [] for dl in device_grads: small_dl = [] large_dl = [] for (g, v) in dl: tensor_size = g.get_shape().num_elements() if tensor_size <= threshold_size: small_dl.append([g, v]) else: large_dl.append([g, v]) if small_dl: small_grads.append(small_dl) if large_dl: large_grads.append(large_dl) return small_grads, large_grads
[ "def", "split_grads_by_size", "(", "threshold_size", ",", "device_grads", ")", ":", "small_grads", "=", "[", "]", "large_grads", "=", "[", "]", "for", "dl", "in", "device_grads", ":", "small_dl", "=", "[", "]", "large_dl", "=", "[", "]", "for", "(", "g", ",", "v", ")", "in", "dl", ":", "tensor_size", "=", "g", ".", "get_shape", "(", ")", ".", "num_elements", "(", ")", "if", "tensor_size", "<=", "threshold_size", ":", "small_dl", ".", "append", "(", "[", "g", ",", "v", "]", ")", "else", ":", "large_dl", ".", "append", "(", "[", "g", ",", "v", "]", ")", "if", "small_dl", ":", "small_grads", ".", "append", "(", "small_dl", ")", "if", "large_dl", ":", "large_grads", ".", "append", "(", "large_dl", ")", "return", "small_grads", ",", "large_grads" ]
33.5
0.000967
def mod_run_check(cmd_kwargs, onlyif, unless, creates): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True ''' # never use VT for onlyif/unless executions because this will lead # to quote problems cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs['use_vt'] = False cmd_kwargs['bg'] = False if onlyif is not None: if isinstance(onlyif, six.string_types): cmd = __salt__['cmd.retcode'](onlyif, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command return code: %s', cmd) if cmd != 0: return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} elif isinstance(onlyif, list): for entry in onlyif: cmd = __salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command \'%s\' return code: %s', entry, cmd) if cmd != 0: return {'comment': 'onlyif condition is false: {0}'.format(entry), 'skip_watch': True, 'result': True} elif not isinstance(onlyif, six.string_types): if not onlyif: log.debug('Command not run: onlyif did not evaluate to string_type') return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if isinstance(unless, six.string_types): cmd = __salt__['cmd.retcode'](unless, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command return code: %s', cmd) if cmd == 0: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} elif isinstance(unless, list): cmd = [] for entry in unless: cmd.append(__salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs)) log.debug('Last command return code: %s', cmd) if all([c == 0 for c in cmd]): return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} elif not isinstance(unless, six.string_types): if unless: log.debug('Command not run: unless did not evaluate to string_type') return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} if isinstance(creates, six.string_types) and os.path.exists(creates): return {'comment': '{0} exists'.format(creates), 'result': True} elif isinstance(creates, list) and all([ os.path.exists(path) for path in creates ]): return {'comment': 'All files in creates exist', 'result': True} # No reason to stop, return True return True
[ "def", "mod_run_check", "(", "cmd_kwargs", ",", "onlyif", ",", "unless", ",", "creates", ")", ":", "# never use VT for onlyif/unless executions because this will lead", "# to quote problems", "cmd_kwargs", "=", "copy", ".", "deepcopy", "(", "cmd_kwargs", ")", "cmd_kwargs", "[", "'use_vt'", "]", "=", "False", "cmd_kwargs", "[", "'bg'", "]", "=", "False", "if", "onlyif", "is", "not", "None", ":", "if", "isinstance", "(", "onlyif", ",", "six", ".", "string_types", ")", ":", "cmd", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "onlyif", ",", "ignore_retcode", "=", "True", ",", "python_shell", "=", "True", ",", "*", "*", "cmd_kwargs", ")", "log", ".", "debug", "(", "'Last command return code: %s'", ",", "cmd", ")", "if", "cmd", "!=", "0", ":", "return", "{", "'comment'", ":", "'onlyif condition is false'", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "elif", "isinstance", "(", "onlyif", ",", "list", ")", ":", "for", "entry", "in", "onlyif", ":", "cmd", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "entry", ",", "ignore_retcode", "=", "True", ",", "python_shell", "=", "True", ",", "*", "*", "cmd_kwargs", ")", "log", ".", "debug", "(", "'Last command \\'%s\\' return code: %s'", ",", "entry", ",", "cmd", ")", "if", "cmd", "!=", "0", ":", "return", "{", "'comment'", ":", "'onlyif condition is false: {0}'", ".", "format", "(", "entry", ")", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "elif", "not", "isinstance", "(", "onlyif", ",", "six", ".", "string_types", ")", ":", "if", "not", "onlyif", ":", "log", ".", "debug", "(", "'Command not run: onlyif did not evaluate to string_type'", ")", "return", "{", "'comment'", ":", "'onlyif condition is false'", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "if", "unless", "is", "not", "None", ":", "if", "isinstance", "(", "unless", ",", "six", ".", "string_types", ")", ":", "cmd", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "unless", ",", "ignore_retcode", "=", "True", ",", "python_shell", "=", "True", ",", "*", "*", "cmd_kwargs", ")", "log", ".", "debug", "(", "'Last command return code: %s'", ",", "cmd", ")", "if", "cmd", "==", "0", ":", "return", "{", "'comment'", ":", "'unless condition is true'", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "elif", "isinstance", "(", "unless", ",", "list", ")", ":", "cmd", "=", "[", "]", "for", "entry", "in", "unless", ":", "cmd", ".", "append", "(", "__salt__", "[", "'cmd.retcode'", "]", "(", "entry", ",", "ignore_retcode", "=", "True", ",", "python_shell", "=", "True", ",", "*", "*", "cmd_kwargs", ")", ")", "log", ".", "debug", "(", "'Last command return code: %s'", ",", "cmd", ")", "if", "all", "(", "[", "c", "==", "0", "for", "c", "in", "cmd", "]", ")", ":", "return", "{", "'comment'", ":", "'unless condition is true'", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "elif", "not", "isinstance", "(", "unless", ",", "six", ".", "string_types", ")", ":", "if", "unless", ":", "log", ".", "debug", "(", "'Command not run: unless did not evaluate to string_type'", ")", "return", "{", "'comment'", ":", "'unless condition is true'", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "if", "isinstance", "(", "creates", ",", "six", ".", "string_types", ")", "and", "os", ".", "path", ".", "exists", "(", "creates", ")", ":", "return", "{", "'comment'", ":", "'{0} exists'", ".", "format", "(", "creates", ")", ",", "'result'", ":", "True", "}", "elif", "isinstance", "(", "creates", ",", "list", ")", "and", "all", "(", "[", "os", ".", "path", ".", "exists", "(", "path", ")", "for", "path", "in", "creates", "]", ")", ":", "return", "{", "'comment'", ":", "'All files in creates exist'", ",", "'result'", ":", "True", "}", "# No reason to stop, return True", "return", "True" ]
43.666667
0.002488
def compute_grouped_sigma(ungrouped_sigma, group_matrix): ''' Returns sigma for the groups of parameter values in the argument ungrouped_metric where the group consists of no more than one parameter ''' group_matrix = np.array(group_matrix, dtype=np.bool) sigma_masked = np.ma.masked_array(ungrouped_sigma * group_matrix.T, mask=(group_matrix ^ 1).T) sigma_agg = np.ma.mean(sigma_masked, axis=1) sigma = np.zeros(group_matrix.shape[1], dtype=np.float) np.copyto(sigma, sigma_agg, where=group_matrix.sum(axis=0) == 1) np.copyto(sigma, np.NAN, where=group_matrix.sum(axis=0) != 1) return sigma
[ "def", "compute_grouped_sigma", "(", "ungrouped_sigma", ",", "group_matrix", ")", ":", "group_matrix", "=", "np", ".", "array", "(", "group_matrix", ",", "dtype", "=", "np", ".", "bool", ")", "sigma_masked", "=", "np", ".", "ma", ".", "masked_array", "(", "ungrouped_sigma", "*", "group_matrix", ".", "T", ",", "mask", "=", "(", "group_matrix", "^", "1", ")", ".", "T", ")", "sigma_agg", "=", "np", ".", "ma", ".", "mean", "(", "sigma_masked", ",", "axis", "=", "1", ")", "sigma", "=", "np", ".", "zeros", "(", "group_matrix", ".", "shape", "[", "1", "]", ",", "dtype", "=", "np", ".", "float", ")", "np", ".", "copyto", "(", "sigma", ",", "sigma_agg", ",", "where", "=", "group_matrix", ".", "sum", "(", "axis", "=", "0", ")", "==", "1", ")", "np", ".", "copyto", "(", "sigma", ",", "np", ".", "NAN", ",", "where", "=", "group_matrix", ".", "sum", "(", "axis", "=", "0", ")", "!=", "1", ")", "return", "sigma" ]
39.058824
0.001471
def parse_cli_args_into(): """ Creates the cli argparser for application specifics and AWS credentials. :return: A dict of values from the cli arguments :rtype: TemplaterCommand """ cli_arg_parser = argparse.ArgumentParser(parents=[ AWSArgumentParser(default_role_session_name='aws-autodiscovery-templater') ]) main_parser = cli_arg_parser.add_argument_group('AWS Autodiscovery Templater') # template_location = main_parser.add_mutually_exclusive_group(required=True) main_parser.add_argument('--template-path', help='Path to the template to fill variables into.', required=True) # template_location.add_argument('--template-s3-uri', help='S3 URI to the template to fill variables into.') # output = main_parser.add_mutually_exclusive_group(required=True) # output.add_argument('--destination-path', # help='Destination for the source once the template has been rendered.') main_parser.add_argument('--stdout', help='Prints a json object containing the retrieves resources', action='store_true', default=False, required=True) main_parser.add_argument('--vpc-ids', help=('Optionally restrict the filtering to a particular list of IPs. ' 'Comma seperated list of vpc-ids.'), action='store_true', default=None) main_parser.add_argument('--filter', help=('Filter for ec2 instances as defined in http://boto3.readthedocs.org/en/latest/' 'reference/services/ec2.html#EC2.Client.describe_instances'), default=None, nargs='+') main_parser.add_argument('--filter-empty', help=('By default, missing values are returned as null to keep private/public ip/hostname' 'sets of equal length. This removes null values from the filter'), action='store_true', default=False) return cli_arg_parser.parse_args(namespace=TemplateCommand())
[ "def", "parse_cli_args_into", "(", ")", ":", "cli_arg_parser", "=", "argparse", ".", "ArgumentParser", "(", "parents", "=", "[", "AWSArgumentParser", "(", "default_role_session_name", "=", "'aws-autodiscovery-templater'", ")", "]", ")", "main_parser", "=", "cli_arg_parser", ".", "add_argument_group", "(", "'AWS Autodiscovery Templater'", ")", "# template_location = main_parser.add_mutually_exclusive_group(required=True)", "main_parser", ".", "add_argument", "(", "'--template-path'", ",", "help", "=", "'Path to the template to fill variables into.'", ",", "required", "=", "True", ")", "# template_location.add_argument('--template-s3-uri', help='S3 URI to the template to fill variables into.')", "# output = main_parser.add_mutually_exclusive_group(required=True)", "# output.add_argument('--destination-path',", "# help='Destination for the source once the template has been rendered.')", "main_parser", ".", "add_argument", "(", "'--stdout'", ",", "help", "=", "'Prints a json object containing the retrieves resources'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "True", ")", "main_parser", ".", "add_argument", "(", "'--vpc-ids'", ",", "help", "=", "(", "'Optionally restrict the filtering to a particular list of IPs. '", "'Comma seperated list of vpc-ids.'", ")", ",", "action", "=", "'store_true'", ",", "default", "=", "None", ")", "main_parser", ".", "add_argument", "(", "'--filter'", ",", "help", "=", "(", "'Filter for ec2 instances as defined in http://boto3.readthedocs.org/en/latest/'", "'reference/services/ec2.html#EC2.Client.describe_instances'", ")", ",", "default", "=", "None", ",", "nargs", "=", "'+'", ")", "main_parser", ".", "add_argument", "(", "'--filter-empty'", ",", "help", "=", "(", "'By default, missing values are returned as null to keep private/public ip/hostname'", "'sets of equal length. This removes null values from the filter'", ")", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "return", "cli_arg_parser", ".", "parse_args", "(", "namespace", "=", "TemplateCommand", "(", ")", ")" ]
56.710526
0.005931
def energy(self, strand, dotparens, temp=37.0, pseudo=False, material=None, dangles='some', sodium=1.0, magnesium=0.0): '''Calculate the free energy of a given sequence structure. Runs the \'energy\' command. :param strand: Strand on which to run energy. Strands must be either coral.DNA or coral.RNA). :type strand: coral.DNA or coral.RNA :param dotparens: The structure in dotparens notation. :type dotparens: str :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :returns: The free energy of the sequence with the specified secondary structure. :rtype: float ''' # Set the material (will be used to set command material flag) material = self._set_material(strand, material) # Set up command flags cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium, magnesium, multi=False) # Set up the input file and run the command. Note: no STDOUT lines = [str(strand), dotparens] stdout = self._run('energy', cmd_args, lines).split('\n') # Return the energy return float(stdout[-2])
[ "def", "energy", "(", "self", ",", "strand", ",", "dotparens", ",", "temp", "=", "37.0", ",", "pseudo", "=", "False", ",", "material", "=", "None", ",", "dangles", "=", "'some'", ",", "sodium", "=", "1.0", ",", "magnesium", "=", "0.0", ")", ":", "# Set the material (will be used to set command material flag)", "material", "=", "self", ".", "_set_material", "(", "strand", ",", "material", ")", "# Set up command flags", "cmd_args", "=", "self", ".", "_prep_cmd_args", "(", "temp", ",", "dangles", ",", "material", ",", "pseudo", ",", "sodium", ",", "magnesium", ",", "multi", "=", "False", ")", "# Set up the input file and run the command. Note: no STDOUT", "lines", "=", "[", "str", "(", "strand", ")", ",", "dotparens", "]", "stdout", "=", "self", ".", "_run", "(", "'energy'", ",", "cmd_args", ",", "lines", ")", ".", "split", "(", "'\\n'", ")", "# Return the energy", "return", "float", "(", "stdout", "[", "-", "2", "]", ")" ]
47.627451
0.00121
def finalize(self, **kwargs): """ Finalize the drawing by adding a title and legend, and removing the axes objects that do not convey information about TNSE. """ self.set_title( "TSNE Projection of {} Documents".format(self.n_instances_) ) # Remove the ticks self.ax.set_yticks([]) self.ax.set_xticks([]) # Add the legend outside of the figure box. if not all(self.classes_ == np.array([self.NULL_CLASS])): box = self.ax.get_position() self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) manual_legend( self, self.classes_, self.color_values_, loc='center left', bbox_to_anchor=(1, 0.5) )
[ "def", "finalize", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "set_title", "(", "\"TSNE Projection of {} Documents\"", ".", "format", "(", "self", ".", "n_instances_", ")", ")", "# Remove the ticks", "self", ".", "ax", ".", "set_yticks", "(", "[", "]", ")", "self", ".", "ax", ".", "set_xticks", "(", "[", "]", ")", "# Add the legend outside of the figure box.", "if", "not", "all", "(", "self", ".", "classes_", "==", "np", ".", "array", "(", "[", "self", ".", "NULL_CLASS", "]", ")", ")", ":", "box", "=", "self", ".", "ax", ".", "get_position", "(", ")", "self", ".", "ax", ".", "set_position", "(", "[", "box", ".", "x0", ",", "box", ".", "y0", ",", "box", ".", "width", "*", "0.8", ",", "box", ".", "height", "]", ")", "manual_legend", "(", "self", ",", "self", ".", "classes_", ",", "self", ".", "color_values_", ",", "loc", "=", "'center left'", ",", "bbox_to_anchor", "=", "(", "1", ",", "0.5", ")", ")" ]
36.47619
0.002545
def H_n(self, n, x): """ constructs the Hermite polynomial of order n at position x (dimensionless) :param n: The n'the basis function. :type name: int. :param x: 1-dim position (dimensionless) :type state: float or numpy array. :returns: array-- H_n(x). :raises: AttributeError, KeyError """ if not self._interpolation: n_array = np.zeros(n+1) n_array[n] = 1 return self.hermval(x, n_array, tensor=False) # attention, this routine calculates every single hermite polynomial and multiplies it with zero (exept the right one) else: return np.interp(x, self.x_grid, self.H_interp[n])
[ "def", "H_n", "(", "self", ",", "n", ",", "x", ")", ":", "if", "not", "self", ".", "_interpolation", ":", "n_array", "=", "np", ".", "zeros", "(", "n", "+", "1", ")", "n_array", "[", "n", "]", "=", "1", "return", "self", ".", "hermval", "(", "x", ",", "n_array", ",", "tensor", "=", "False", ")", "# attention, this routine calculates every single hermite polynomial and multiplies it with zero (exept the right one)", "else", ":", "return", "np", ".", "interp", "(", "x", ",", "self", ".", "x_grid", ",", "self", ".", "H_interp", "[", "n", "]", ")" ]
41.411765
0.005556
def pre_release(version): """Generates new docs, release announcements and creates a local tag.""" announce(version) regen() changelog(version, write_out=True) fix_formatting() msg = "Preparing release version {}".format(version) check_call(["git", "commit", "-a", "-m", msg]) print() print(f"{Fore.CYAN}[generate.pre_release] {Fore.GREEN}All done!") print() print(f"Please push your branch and open a PR.")
[ "def", "pre_release", "(", "version", ")", ":", "announce", "(", "version", ")", "regen", "(", ")", "changelog", "(", "version", ",", "write_out", "=", "True", ")", "fix_formatting", "(", ")", "msg", "=", "\"Preparing release version {}\"", ".", "format", "(", "version", ")", "check_call", "(", "[", "\"git\"", ",", "\"commit\"", ",", "\"-a\"", ",", "\"-m\"", ",", "msg", "]", ")", "print", "(", ")", "print", "(", "f\"{Fore.CYAN}[generate.pre_release] {Fore.GREEN}All done!\"", ")", "print", "(", ")", "print", "(", "f\"Please push your branch and open a PR.\"", ")" ]
31.428571
0.002208
def parse_rst_params(doc): """ Parse a reStructuredText docstring and return a dictionary with parameter names and descriptions. >>> doc = ''' ... :param foo: foo parameter ... foo parameter ... ... :param bar: bar parameter ... :param baz: baz parameter ... baz parameter ... baz parameter ... Some text. ... ''' >>> params = parse_rst_params(doc) >>> params['foo'] 'foo parameter foo parameter' >>> params['bar'] 'bar parameter' >>> params['baz'] 'baz parameter baz parameter baz parameter' """ param_re = re.compile(r"""^([ \t]*):param\ (?P<param>\w+):\ (?P<body>.*\n(\1[ \t]+\w.*\n)*)""", re.MULTILINE|re.VERBOSE) params = {} for match in param_re.finditer(doc): parts = match.groupdict() body_lines = parts['body'].strip().split('\n') params[parts['param']] = ' '.join(s.strip() for s in body_lines) return params
[ "def", "parse_rst_params", "(", "doc", ")", ":", "param_re", "=", "re", ".", "compile", "(", "r\"\"\"^([ \\t]*):param\\ \n (?P<param>\\w+):\\ \n (?P<body>.*\\n(\\1[ \\t]+\\w.*\\n)*)\"\"\"", ",", "re", ".", "MULTILINE", "|", "re", ".", "VERBOSE", ")", "params", "=", "{", "}", "for", "match", "in", "param_re", ".", "finditer", "(", "doc", ")", ":", "parts", "=", "match", ".", "groupdict", "(", ")", "body_lines", "=", "parts", "[", "'body'", "]", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "params", "[", "parts", "[", "'param'", "]", "]", "=", "' '", ".", "join", "(", "s", ".", "strip", "(", ")", "for", "s", "in", "body_lines", ")", "return", "params" ]
30.235294
0.00754
def render_flow(self, data): """render the OpenDocument with the user data @param data: the input stream of user data. This should be a dictionary mapping, keys being the values accessible to your report. @type data: dictionary """ self.render_tree(data) # then reconstruct a new ODT document with the generated content for status in self.__save_output(): yield status
[ "def", "render_flow", "(", "self", ",", "data", ")", ":", "self", ".", "render_tree", "(", "data", ")", "# then reconstruct a new ODT document with the generated content", "for", "status", "in", "self", ".", "__save_output", "(", ")", ":", "yield", "status" ]
33.461538
0.004474
def dgrep(pat,*opts): """Return grep() on dir()+dir(__builtins__). A very common use of grep() when working interactively.""" return grep(pat,dir(__main__)+dir(__main__.__builtins__),*opts)
[ "def", "dgrep", "(", "pat", ",", "*", "opts", ")", ":", "return", "grep", "(", "pat", ",", "dir", "(", "__main__", ")", "+", "dir", "(", "__main__", ".", "__builtins__", ")", ",", "*", "opts", ")" ]
33
0.019704
def filter_savitzky_golay(y, window_size=5, order=2, deriv=0, rate=1): """Smooth (and optionally differentiate) with a Savitzky-Golay filter.""" try: window_size = np.abs(np.int(window_size)) order = np.abs(np.int(order)) except ValueError: raise ValueError('window_size and order must be integers') if window_size % 2 != 1 or window_size < 1: raise ValueError('window_size size must be a positive odd number') if window_size < order + 2: raise ValueError('window_size is too small for the polynomials order') order_range = range(order + 1) half_window = (window_size - 1) // 2 # precompute limits minimum = np.min(y) maximum = np.max(y) # precompute coefficients b = np.mat([ [k ** i for i in order_range] for k in range(-half_window, half_window + 1) ]) m = np.linalg.pinv(b).A[deriv] * rate ** deriv * math.factorial(deriv) # pad the signal at the extremes with values taken from the original signal firstvals = y[0] - np.abs(y[1:half_window+1][::-1] - y[0]) lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) y = np.concatenate((firstvals, y, lastvals)) return np.clip( np.convolve(m[::-1], y, mode='valid'), minimum, maximum, )
[ "def", "filter_savitzky_golay", "(", "y", ",", "window_size", "=", "5", ",", "order", "=", "2", ",", "deriv", "=", "0", ",", "rate", "=", "1", ")", ":", "try", ":", "window_size", "=", "np", ".", "abs", "(", "np", ".", "int", "(", "window_size", ")", ")", "order", "=", "np", ".", "abs", "(", "np", ".", "int", "(", "order", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'window_size and order must be integers'", ")", "if", "window_size", "%", "2", "!=", "1", "or", "window_size", "<", "1", ":", "raise", "ValueError", "(", "'window_size size must be a positive odd number'", ")", "if", "window_size", "<", "order", "+", "2", ":", "raise", "ValueError", "(", "'window_size is too small for the polynomials order'", ")", "order_range", "=", "range", "(", "order", "+", "1", ")", "half_window", "=", "(", "window_size", "-", "1", ")", "//", "2", "# precompute limits", "minimum", "=", "np", ".", "min", "(", "y", ")", "maximum", "=", "np", ".", "max", "(", "y", ")", "# precompute coefficients", "b", "=", "np", ".", "mat", "(", "[", "[", "k", "**", "i", "for", "i", "in", "order_range", "]", "for", "k", "in", "range", "(", "-", "half_window", ",", "half_window", "+", "1", ")", "]", ")", "m", "=", "np", ".", "linalg", ".", "pinv", "(", "b", ")", ".", "A", "[", "deriv", "]", "*", "rate", "**", "deriv", "*", "math", ".", "factorial", "(", "deriv", ")", "# pad the signal at the extremes with values taken from the original signal", "firstvals", "=", "y", "[", "0", "]", "-", "np", ".", "abs", "(", "y", "[", "1", ":", "half_window", "+", "1", "]", "[", ":", ":", "-", "1", "]", "-", "y", "[", "0", "]", ")", "lastvals", "=", "y", "[", "-", "1", "]", "+", "np", ".", "abs", "(", "y", "[", "-", "half_window", "-", "1", ":", "-", "1", "]", "[", ":", ":", "-", "1", "]", "-", "y", "[", "-", "1", "]", ")", "y", "=", "np", ".", "concatenate", "(", "(", "firstvals", ",", "y", ",", "lastvals", ")", ")", "return", "np", ".", "clip", "(", "np", ".", "convolve", "(", "m", "[", ":", ":", "-", "1", "]", ",", "y", ",", "mode", "=", "'valid'", ")", ",", "minimum", ",", "maximum", ",", ")" ]
37.441176
0.000766
def bottleneck_block_v1(cnn, depth, depth_bottleneck, stride): """Bottleneck block with identity short-cut for ResNet v1. Args: cnn: the network to append bottleneck blocks. depth: the number of output filters for this bottleneck block. depth_bottleneck: the number of bottleneck filters for this block. stride: Stride used in the first layer of the bottleneck block. """ input_layer = cnn.top_layer in_size = cnn.top_size name_key = "resnet_v1" name = name_key + str(cnn.counts[name_key]) cnn.counts[name_key] += 1 with tf.variable_scope(name): if depth == in_size: if stride == 1: shortcut = input_layer else: shortcut = cnn.apool( 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) else: shortcut = cnn.conv( depth, 1, 1, stride, stride, activation=None, use_batch_norm=True, input_layer=input_layer, num_channels_in=in_size, bias=None) cnn.conv( depth_bottleneck, 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size, use_batch_norm=True, bias=None) cnn.conv( depth_bottleneck, 3, 3, 1, 1, mode="SAME_RESNET", use_batch_norm=True, bias=None) res = cnn.conv( depth, 1, 1, 1, 1, activation=None, use_batch_norm=True, bias=None) output = tf.nn.relu(shortcut + res) cnn.top_layer = output cnn.top_size = depth
[ "def", "bottleneck_block_v1", "(", "cnn", ",", "depth", ",", "depth_bottleneck", ",", "stride", ")", ":", "input_layer", "=", "cnn", ".", "top_layer", "in_size", "=", "cnn", ".", "top_size", "name_key", "=", "\"resnet_v1\"", "name", "=", "name_key", "+", "str", "(", "cnn", ".", "counts", "[", "name_key", "]", ")", "cnn", ".", "counts", "[", "name_key", "]", "+=", "1", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "if", "depth", "==", "in_size", ":", "if", "stride", "==", "1", ":", "shortcut", "=", "input_layer", "else", ":", "shortcut", "=", "cnn", ".", "apool", "(", "1", ",", "1", ",", "stride", ",", "stride", ",", "input_layer", "=", "input_layer", ",", "num_channels_in", "=", "in_size", ")", "else", ":", "shortcut", "=", "cnn", ".", "conv", "(", "depth", ",", "1", ",", "1", ",", "stride", ",", "stride", ",", "activation", "=", "None", ",", "use_batch_norm", "=", "True", ",", "input_layer", "=", "input_layer", ",", "num_channels_in", "=", "in_size", ",", "bias", "=", "None", ")", "cnn", ".", "conv", "(", "depth_bottleneck", ",", "1", ",", "1", ",", "stride", ",", "stride", ",", "input_layer", "=", "input_layer", ",", "num_channels_in", "=", "in_size", ",", "use_batch_norm", "=", "True", ",", "bias", "=", "None", ")", "cnn", ".", "conv", "(", "depth_bottleneck", ",", "3", ",", "3", ",", "1", ",", "1", ",", "mode", "=", "\"SAME_RESNET\"", ",", "use_batch_norm", "=", "True", ",", "bias", "=", "None", ")", "res", "=", "cnn", ".", "conv", "(", "depth", ",", "1", ",", "1", ",", "1", ",", "1", ",", "activation", "=", "None", ",", "use_batch_norm", "=", "True", ",", "bias", "=", "None", ")", "output", "=", "tf", ".", "nn", ".", "relu", "(", "shortcut", "+", "res", ")", "cnn", ".", "top_layer", "=", "output", "cnn", ".", "top_size", "=", "depth" ]
29.587302
0.000519
def add_info_widget(self, widget): ''' Add widget string to right panel of the screen ''' index = widget.get_index() while index in self.info_widgets.keys(): index += 1 self.info_widgets[widget.get_index()] = widget
[ "def", "add_info_widget", "(", "self", ",", "widget", ")", ":", "index", "=", "widget", ".", "get_index", "(", ")", "while", "index", "in", "self", ".", "info_widgets", ".", "keys", "(", ")", ":", "index", "+=", "1", "self", ".", "info_widgets", "[", "widget", ".", "get_index", "(", ")", "]", "=", "widget" ]
33.5
0.007273
def set(self, key, val): """ Return a new PMap with key and val inserted. >>> m1 = m(a=1, b=2) >>> m2 = m1.set('a', 3) >>> m3 = m1.set('c' ,4) >>> m1 pmap({'a': 1, 'b': 2}) >>> m2 pmap({'a': 3, 'b': 2}) >>> m3 pmap({'a': 1, 'c': 4, 'b': 2}) """ return self.evolver().set(key, val).persistent()
[ "def", "set", "(", "self", ",", "key", ",", "val", ")", ":", "return", "self", ".", "evolver", "(", ")", ".", "set", "(", "key", ",", "val", ")", ".", "persistent", "(", ")" ]
25.6
0.005025
def on_unexpected_error(e): # pragma: no cover """Catch-all error handler Unexpected errors will be handled by this function. """ sys.stderr.write('Unexpected error: {} ({})\n'.format( str(e), e.__class__.__name__)) sys.stderr.write('See file slam_error.log for additional details.\n') sys.exit(1)
[ "def", "on_unexpected_error", "(", "e", ")", ":", "# pragma: no cover", "sys", ".", "stderr", ".", "write", "(", "'Unexpected error: {} ({})\\n'", ".", "format", "(", "str", "(", "e", ")", ",", "e", ".", "__class__", ".", "__name__", ")", ")", "sys", ".", "stderr", ".", "write", "(", "'See file slam_error.log for additional details.\\n'", ")", "sys", ".", "exit", "(", "1", ")" ]
35.888889
0.003021
def make_node(cls, id_, arglist, lineno): """ Creates an array access. A(x1, x2, ..., xn) """ assert isinstance(arglist, SymbolARGLIST) variable = gl.SYMBOL_TABLE.access_array(id_, lineno) if variable is None: return None if len(variable.bounds) != len(arglist): syntax_error(lineno, "Array '%s' has %i dimensions, not %i" % (variable.name, len(variable.bounds), len(arglist))) return None # Checks for array subscript range if the subscript is constant # e.g. A(1) is a constant subscript access for i, b in zip(arglist, variable.bounds): btype = gl.SYMBOL_TABLE.basic_types[gl.BOUND_TYPE] lower_bound = NUMBER(b.lower, type_=btype, lineno=lineno) i.value = BINARY.make_node('MINUS', TYPECAST.make_node(btype, i.value, lineno), lower_bound, lineno, func=lambda x, y: x - y, type_=btype) if is_number(i.value) or is_const(i.value): val = i.value.value if val < 0 or val > b.count: warning(lineno, "Array '%s' subscript out of range" % id_) # Returns the variable entry and the node return cls(variable, arglist, lineno)
[ "def", "make_node", "(", "cls", ",", "id_", ",", "arglist", ",", "lineno", ")", ":", "assert", "isinstance", "(", "arglist", ",", "SymbolARGLIST", ")", "variable", "=", "gl", ".", "SYMBOL_TABLE", ".", "access_array", "(", "id_", ",", "lineno", ")", "if", "variable", "is", "None", ":", "return", "None", "if", "len", "(", "variable", ".", "bounds", ")", "!=", "len", "(", "arglist", ")", ":", "syntax_error", "(", "lineno", ",", "\"Array '%s' has %i dimensions, not %i\"", "%", "(", "variable", ".", "name", ",", "len", "(", "variable", ".", "bounds", ")", ",", "len", "(", "arglist", ")", ")", ")", "return", "None", "# Checks for array subscript range if the subscript is constant", "# e.g. A(1) is a constant subscript access", "for", "i", ",", "b", "in", "zip", "(", "arglist", ",", "variable", ".", "bounds", ")", ":", "btype", "=", "gl", ".", "SYMBOL_TABLE", ".", "basic_types", "[", "gl", ".", "BOUND_TYPE", "]", "lower_bound", "=", "NUMBER", "(", "b", ".", "lower", ",", "type_", "=", "btype", ",", "lineno", "=", "lineno", ")", "i", ".", "value", "=", "BINARY", ".", "make_node", "(", "'MINUS'", ",", "TYPECAST", ".", "make_node", "(", "btype", ",", "i", ".", "value", ",", "lineno", ")", ",", "lower_bound", ",", "lineno", ",", "func", "=", "lambda", "x", ",", "y", ":", "x", "-", "y", ",", "type_", "=", "btype", ")", "if", "is_number", "(", "i", ".", "value", ")", "or", "is_const", "(", "i", ".", "value", ")", ":", "val", "=", "i", ".", "value", ".", "value", "if", "val", "<", "0", "or", "val", ">", "b", ".", "count", ":", "warning", "(", "lineno", ",", "\"Array '%s' subscript out of range\"", "%", "id_", ")", "# Returns the variable entry and the node", "return", "cls", "(", "variable", ",", "arglist", ",", "lineno", ")" ]
46.896552
0.002882
def delete(cls, group, admin): """Delete admin from group. :param group: Group object. :param admin: Admin object. """ with db.session.begin_nested(): obj = cls.query.filter( cls.admin == admin, cls.group == group).one() db.session.delete(obj)
[ "def", "delete", "(", "cls", ",", "group", ",", "admin", ")", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "obj", "=", "cls", ".", "query", ".", "filter", "(", "cls", ".", "admin", "==", "admin", ",", "cls", ".", "group", "==", "group", ")", ".", "one", "(", ")", "db", ".", "session", ".", "delete", "(", "obj", ")" ]
31.5
0.006173
def trace_incoming_web_request( self, webapp_info, url, method, headers=None, remote_address=None, str_tag=None, byte_tag=None): '''Create a tracer for an incoming webrequest. :param WebapplicationInfoHandle webapp_info: Web application information (see :meth:`create_web_application_info`). :param str url: The requested URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :param str remote_address: The remote (client) IP address (of the peer of the socket connection via which the request was received). The remote address is useful to gain information about load balancers, proxies and ultimately the end user that is sending the request. For the other parameters, see :ref:`tagging`. :rtype: tracers.IncomingWebRequestTracer ''' assert isinstance(webapp_info, WebapplicationInfoHandle) result = tracers.IncomingWebRequestTracer( self._nsdk, self._nsdk.incomingwebrequesttracer_create( webapp_info.handle, url, method)) if not result: return result try: if headers: self._nsdk.incomingwebrequesttracer_add_request_headers( result.handle, *_get_kvc(headers)) if remote_address: self._nsdk.incomingwebrequesttracer_set_remote_address( result.handle, remote_address) self._applytag(result, str_tag, byte_tag) except: result.end() raise return result
[ "def", "trace_incoming_web_request", "(", "self", ",", "webapp_info", ",", "url", ",", "method", ",", "headers", "=", "None", ",", "remote_address", "=", "None", ",", "str_tag", "=", "None", ",", "byte_tag", "=", "None", ")", ":", "assert", "isinstance", "(", "webapp_info", ",", "WebapplicationInfoHandle", ")", "result", "=", "tracers", ".", "IncomingWebRequestTracer", "(", "self", ".", "_nsdk", ",", "self", ".", "_nsdk", ".", "incomingwebrequesttracer_create", "(", "webapp_info", ".", "handle", ",", "url", ",", "method", ")", ")", "if", "not", "result", ":", "return", "result", "try", ":", "if", "headers", ":", "self", ".", "_nsdk", ".", "incomingwebrequesttracer_add_request_headers", "(", "result", ".", "handle", ",", "*", "_get_kvc", "(", "headers", ")", ")", "if", "remote_address", ":", "self", ".", "_nsdk", ".", "incomingwebrequesttracer_set_remote_address", "(", "result", ".", "handle", ",", "remote_address", ")", "self", ".", "_applytag", "(", "result", ",", "str_tag", ",", "byte_tag", ")", "except", ":", "result", ".", "end", "(", ")", "raise", "return", "result" ]
42.565217
0.000998
def difference(self,other): """ Return a new DiscreteSet with the difference of the two sets, i.e. all elements that are in self but not in other. :param DiscreteSet other: Set to subtract :rtype: DiscreteSet :raises ValueError: if self is a set of everything """ if self.everything: raise ValueError("Can not remove from everything") elif other.everything: return DiscreteSet([]) else: return DiscreteSet(self.elements.difference(other.elements))
[ "def", "difference", "(", "self", ",", "other", ")", ":", "if", "self", ".", "everything", ":", "raise", "ValueError", "(", "\"Can not remove from everything\"", ")", "elif", "other", ".", "everything", ":", "return", "DiscreteSet", "(", "[", "]", ")", "else", ":", "return", "DiscreteSet", "(", "self", ".", "elements", ".", "difference", "(", "other", ".", "elements", ")", ")" ]
36.666667
0.005319
def _extract_table_root(d, current, pc): """ Extract data from the root level of a paleoData table. :param dict d: paleoData table :param dict current: Current root data :param str pc: paleoData or chronData :return dict current: Current root data """ logger_ts.info("enter extract_table_root") try: for k, v in d.items(): if isinstance(v, str): current[pc + '_' + k] = v except Exception as e: logger_ts.error("extract_table_root: {}".format(e)) return current
[ "def", "_extract_table_root", "(", "d", ",", "current", ",", "pc", ")", ":", "logger_ts", ".", "info", "(", "\"enter extract_table_root\"", ")", "try", ":", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "str", ")", ":", "current", "[", "pc", "+", "'_'", "+", "k", "]", "=", "v", "except", "Exception", "as", "e", ":", "logger_ts", ".", "error", "(", "\"extract_table_root: {}\"", ".", "format", "(", "e", ")", ")", "return", "current" ]
33.375
0.001821