text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def transition(value, maximum, start, end): """ Transition between two values. :param value: Current iteration. :param maximum: Maximum number of iterations. :param start: Start value. :param end: End value. :returns: Transitional value. """ return round(start + (end - start) * value / maximum, 2)
[ "def", "transition", "(", "value", ",", "maximum", ",", "start", ",", "end", ")", ":", "return", "round", "(", "start", "+", "(", "end", "-", "start", ")", "*", "value", "/", "maximum", ",", "2", ")" ]
32.2
0.003021
def simxSetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' matrix = (ct.c_float*12)(*matrix) return c_SetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode)
[ "def", "simxSetSphericalJointMatrix", "(", "clientID", ",", "jointHandle", ",", "matrix", ",", "operationMode", ")", ":", "matrix", "=", "(", "ct", ".", "c_float", "*", "12", ")", "(", "*", "matrix", ")", "return", "c_SetSphericalJointMatrix", "(", "clientID", ",", "jointHandle", ",", "matrix", ",", "operationMode", ")" ]
50
0.009836
def readline(self, echo=None, prompt='', use_history=True): """Return a line of text, including the terminating LF If echo is true always echo, if echo is false never echo If echo is None follow the negotiated setting. prompt is the current prompt to write (and rewrite if needed) use_history controls if this current line uses (and adds to) the command history. """ line = [] insptr = 0 ansi = 0 histptr = len(self.history) if self.DOECHO: self.write(prompt) self._current_prompt = prompt else: self._current_prompt = '' self._current_line = '' while True: c = self.getc(block=True) c = self.ansi_to_curses(c) if c == theNULL: continue elif c == curses.KEY_LEFT: if insptr > 0: insptr = insptr - 1 self._readline_echo(self.CODES['CSRLEFT'], echo) else: self._readline_echo(BELL, echo) continue elif c == curses.KEY_RIGHT: if insptr < len(line): insptr = insptr + 1 self._readline_echo(self.CODES['CSRRIGHT'], echo) else: self._readline_echo(BELL, echo) continue elif c == curses.KEY_UP or c == curses.KEY_DOWN: if not use_history: self._readline_echo(BELL, echo) continue if c == curses.KEY_UP: if histptr > 0: histptr = histptr - 1 else: self._readline_echo(BELL, echo) continue elif c == curses.KEY_DOWN: if histptr < len(self.history): histptr = histptr + 1 else: self._readline_echo(BELL, echo) continue line = [] if histptr < len(self.history): line.extend(self.history[histptr]) for char in range(insptr): self._readline_echo(self.CODES['CSRLEFT'], echo) self._readline_echo(self.CODES['DEOL'], echo) self._readline_echo(''.join(line), echo) insptr = len(line) continue elif c == chr(3): self._readline_echo('\n' + curses.ascii.unctrl(c) + ' ABORT\n', echo) return '' elif c == chr(4): if len(line) > 0: self._readline_echo('\n' + curses.ascii.unctrl(c) + ' ABORT (QUIT)\n', echo) return '' self._readline_echo('\n' + curses.ascii.unctrl(c) + ' QUIT\n', echo) return 'QUIT' elif c == chr(10): self._readline_echo(c, echo) result = ''.join(line) if use_history: self.history.append(result) if echo is False: if prompt: self.write( chr(10) ) log.debug('readline: %s(hidden text)', prompt) else: log.debug('readline: %s%r', prompt, result) return result elif c == curses.KEY_BACKSPACE or c == chr(127) or c == chr(8): if insptr > 0: self._readline_echo(self.CODES['CSRLEFT'] + self.CODES['DEL'], echo) insptr = insptr - 1 del line[insptr] else: self._readline_echo(BELL, echo) continue elif c == curses.KEY_DC: if insptr < len(line): self._readline_echo(self.CODES['DEL'], echo) del line[insptr] else: self._readline_echo(BELL, echo) continue else: if ord(c) < 32: c = curses.ascii.unctrl(c) if len(line) > insptr: self._readline_insert(c, echo, insptr, line) else: self._readline_echo(c, echo) line[insptr:insptr] = c insptr = insptr + len(c) if self._readline_do_echo(echo): self._current_line = line
[ "def", "readline", "(", "self", ",", "echo", "=", "None", ",", "prompt", "=", "''", ",", "use_history", "=", "True", ")", ":", "line", "=", "[", "]", "insptr", "=", "0", "ansi", "=", "0", "histptr", "=", "len", "(", "self", ".", "history", ")", "if", "self", ".", "DOECHO", ":", "self", ".", "write", "(", "prompt", ")", "self", ".", "_current_prompt", "=", "prompt", "else", ":", "self", ".", "_current_prompt", "=", "''", "self", ".", "_current_line", "=", "''", "while", "True", ":", "c", "=", "self", ".", "getc", "(", "block", "=", "True", ")", "c", "=", "self", ".", "ansi_to_curses", "(", "c", ")", "if", "c", "==", "theNULL", ":", "continue", "elif", "c", "==", "curses", ".", "KEY_LEFT", ":", "if", "insptr", ">", "0", ":", "insptr", "=", "insptr", "-", "1", "self", ".", "_readline_echo", "(", "self", ".", "CODES", "[", "'CSRLEFT'", "]", ",", "echo", ")", "else", ":", "self", ".", "_readline_echo", "(", "BELL", ",", "echo", ")", "continue", "elif", "c", "==", "curses", ".", "KEY_RIGHT", ":", "if", "insptr", "<", "len", "(", "line", ")", ":", "insptr", "=", "insptr", "+", "1", "self", ".", "_readline_echo", "(", "self", ".", "CODES", "[", "'CSRRIGHT'", "]", ",", "echo", ")", "else", ":", "self", ".", "_readline_echo", "(", "BELL", ",", "echo", ")", "continue", "elif", "c", "==", "curses", ".", "KEY_UP", "or", "c", "==", "curses", ".", "KEY_DOWN", ":", "if", "not", "use_history", ":", "self", ".", "_readline_echo", "(", "BELL", ",", "echo", ")", "continue", "if", "c", "==", "curses", ".", "KEY_UP", ":", "if", "histptr", ">", "0", ":", "histptr", "=", "histptr", "-", "1", "else", ":", "self", ".", "_readline_echo", "(", "BELL", ",", "echo", ")", "continue", "elif", "c", "==", "curses", ".", "KEY_DOWN", ":", "if", "histptr", "<", "len", "(", "self", ".", "history", ")", ":", "histptr", "=", "histptr", "+", "1", "else", ":", "self", ".", "_readline_echo", "(", "BELL", ",", "echo", ")", "continue", "line", "=", "[", "]", "if", "histptr", "<", "len", "(", "self", ".", "history", ")", ":", "line", ".", "extend", "(", "self", ".", "history", "[", "histptr", "]", ")", "for", "char", "in", "range", "(", "insptr", ")", ":", "self", ".", "_readline_echo", "(", "self", ".", "CODES", "[", "'CSRLEFT'", "]", ",", "echo", ")", "self", ".", "_readline_echo", "(", "self", ".", "CODES", "[", "'DEOL'", "]", ",", "echo", ")", "self", ".", "_readline_echo", "(", "''", ".", "join", "(", "line", ")", ",", "echo", ")", "insptr", "=", "len", "(", "line", ")", "continue", "elif", "c", "==", "chr", "(", "3", ")", ":", "self", ".", "_readline_echo", "(", "'\\n'", "+", "curses", ".", "ascii", ".", "unctrl", "(", "c", ")", "+", "' ABORT\\n'", ",", "echo", ")", "return", "''", "elif", "c", "==", "chr", "(", "4", ")", ":", "if", "len", "(", "line", ")", ">", "0", ":", "self", ".", "_readline_echo", "(", "'\\n'", "+", "curses", ".", "ascii", ".", "unctrl", "(", "c", ")", "+", "' ABORT (QUIT)\\n'", ",", "echo", ")", "return", "''", "self", ".", "_readline_echo", "(", "'\\n'", "+", "curses", ".", "ascii", ".", "unctrl", "(", "c", ")", "+", "' QUIT\\n'", ",", "echo", ")", "return", "'QUIT'", "elif", "c", "==", "chr", "(", "10", ")", ":", "self", ".", "_readline_echo", "(", "c", ",", "echo", ")", "result", "=", "''", ".", "join", "(", "line", ")", "if", "use_history", ":", "self", ".", "history", ".", "append", "(", "result", ")", "if", "echo", "is", "False", ":", "if", "prompt", ":", "self", ".", "write", "(", "chr", "(", "10", ")", ")", "log", ".", "debug", "(", "'readline: %s(hidden text)'", ",", "prompt", ")", "else", ":", "log", ".", "debug", "(", "'readline: %s%r'", ",", "prompt", ",", "result", ")", "return", "result", "elif", "c", "==", "curses", ".", "KEY_BACKSPACE", "or", "c", "==", "chr", "(", "127", ")", "or", "c", "==", "chr", "(", "8", ")", ":", "if", "insptr", ">", "0", ":", "self", ".", "_readline_echo", "(", "self", ".", "CODES", "[", "'CSRLEFT'", "]", "+", "self", ".", "CODES", "[", "'DEL'", "]", ",", "echo", ")", "insptr", "=", "insptr", "-", "1", "del", "line", "[", "insptr", "]", "else", ":", "self", ".", "_readline_echo", "(", "BELL", ",", "echo", ")", "continue", "elif", "c", "==", "curses", ".", "KEY_DC", ":", "if", "insptr", "<", "len", "(", "line", ")", ":", "self", ".", "_readline_echo", "(", "self", ".", "CODES", "[", "'DEL'", "]", ",", "echo", ")", "del", "line", "[", "insptr", "]", "else", ":", "self", ".", "_readline_echo", "(", "BELL", ",", "echo", ")", "continue", "else", ":", "if", "ord", "(", "c", ")", "<", "32", ":", "c", "=", "curses", ".", "ascii", ".", "unctrl", "(", "c", ")", "if", "len", "(", "line", ")", ">", "insptr", ":", "self", ".", "_readline_insert", "(", "c", ",", "echo", ",", "insptr", ",", "line", ")", "else", ":", "self", ".", "_readline_echo", "(", "c", ",", "echo", ")", "line", "[", "insptr", ":", "insptr", "]", "=", "c", "insptr", "=", "insptr", "+", "len", "(", "c", ")", "if", "self", ".", "_readline_do_echo", "(", "echo", ")", ":", "self", ".", "_current_line", "=", "line" ]
39.495575
0.00306
def yield_from_handle(self, tokens): """Process Python 3.3 yield from.""" internal_assert(len(tokens) == 1, "invalid yield from tokens", tokens) if self.target_info < (3, 3): return ( yield_from_var + " = " + tokens[0] + "\nfor " + yield_item_var + " in " + yield_from_var + ":\n" + openindent + "yield " + yield_item_var + "\n" + closeindent ) else: return "yield from " + tokens[0]
[ "def", "yield_from_handle", "(", "self", ",", "tokens", ")", ":", "internal_assert", "(", "len", "(", "tokens", ")", "==", "1", ",", "\"invalid yield from tokens\"", ",", "tokens", ")", "if", "self", ".", "target_info", "<", "(", "3", ",", "3", ")", ":", "return", "(", "yield_from_var", "+", "\" = \"", "+", "tokens", "[", "0", "]", "+", "\"\\nfor \"", "+", "yield_item_var", "+", "\" in \"", "+", "yield_from_var", "+", "\":\\n\"", "+", "openindent", "+", "\"yield \"", "+", "yield_item_var", "+", "\"\\n\"", "+", "closeindent", ")", "else", ":", "return", "\"yield from \"", "+", "tokens", "[", "0", "]" ]
44.454545
0.004008
def parse_html_urls(file_name, html_data): ''' Returns a list of tuples in the form (url, file_name, line_number) ''' try: html = lxml.html.fromstring(html_data) anchor_tags = html.cssselect('a') for a in anchor_tags: # A link was started but not finished, href with nothing set! if not 'href' in a.attrib or a.attrib['href'] == '': BROKEN_URLS.append(('None', file_name, a.sourceline)) url = clean_url(a.attrib['href']) if is_valid_url(url): if url not in URL_CACHE: URL_CACHE.add(url) yield (url, file_name, a.sourceline) except SyntaxError: pass
[ "def", "parse_html_urls", "(", "file_name", ",", "html_data", ")", ":", "try", ":", "html", "=", "lxml", ".", "html", ".", "fromstring", "(", "html_data", ")", "anchor_tags", "=", "html", ".", "cssselect", "(", "'a'", ")", "for", "a", "in", "anchor_tags", ":", "# A link was started but not finished, href with nothing set!", "if", "not", "'href'", "in", "a", ".", "attrib", "or", "a", ".", "attrib", "[", "'href'", "]", "==", "''", ":", "BROKEN_URLS", ".", "append", "(", "(", "'None'", ",", "file_name", ",", "a", ".", "sourceline", ")", ")", "url", "=", "clean_url", "(", "a", ".", "attrib", "[", "'href'", "]", ")", "if", "is_valid_url", "(", "url", ")", ":", "if", "url", "not", "in", "URL_CACHE", ":", "URL_CACHE", ".", "add", "(", "url", ")", "yield", "(", "url", ",", "file_name", ",", "a", ".", "sourceline", ")", "except", "SyntaxError", ":", "pass" ]
30.565217
0.002759
def is_valid_channel(self, channel, conda_url='https://conda.anaconda.org', non_blocking=True): """Check if a conda channel is valid.""" logger.debug(str((channel, conda_url))) if non_blocking: method = self._is_valid_channel return self._create_worker(method, channel, conda_url) else: return self._is_valid_channel(channel, conda_url=conda_url)
[ "def", "is_valid_channel", "(", "self", ",", "channel", ",", "conda_url", "=", "'https://conda.anaconda.org'", ",", "non_blocking", "=", "True", ")", ":", "logger", ".", "debug", "(", "str", "(", "(", "channel", ",", "conda_url", ")", ")", ")", "if", "non_blocking", ":", "method", "=", "self", ".", "_is_valid_channel", "return", "self", ".", "_create_worker", "(", "method", ",", "channel", ",", "conda_url", ")", "else", ":", "return", "self", ".", "_is_valid_channel", "(", "channel", ",", "conda_url", "=", "conda_url", ")" ]
43.545455
0.010225
def authenticate(url, account, key, by='name', expires=0, timestamp=None, timeout=None, request_type="xml", admin_auth=False, use_password=False, raise_on_error=False): """ Authenticate to the Zimbra server :param url: URL of Zimbra SOAP service :param account: The account to be authenticated against :param key: The preauth key of the domain of the account or a password (if admin_auth or use_password is True) :param by: If the account is specified as a name, an ID or a ForeignPrincipal :param expires: When the token expires (or 0 for default expiration) :param timestamp: When the token was requested (None for "now") :param timeout: Timeout for the communication with the server. Defaults to the urllib2-default :param request_type: Which type of request to use ("xml" (default) or "json") :param admin_auth: This request should authenticate and generate an admin token. The "key"-parameter therefore holds the admin password (implies use_password) :param use_password: The "key"-parameter holds a password. Do a password- based user authentication. :param raise_on_error: Should I raise an exception when an authentication error occurs or just return None? :return: The authentication token or None :rtype: str or None or unicode """ if timestamp is None: timestamp = int(time.time()) * 1000 pak = "" if not admin_auth: pak = preauth.create_preauth(account, key, by, expires, timestamp) if request_type == 'xml': auth_request = RequestXml() else: auth_request = RequestJson() request_data = { 'account': { 'by': by, '_content': account } } ns = "urn:zimbraAccount" if admin_auth: ns = "urn:zimbraAdmin" request_data['password'] = key elif use_password: request_data['password'] = { "_content": key } else: request_data['preauth'] = { 'timestamp': timestamp, 'expires': expires, '_content': pak } auth_request.add_request( 'AuthRequest', request_data, ns ) server = Communication(url, timeout) if request_type == 'xml': response = ResponseXml() else: response = ResponseJson() server.send_request(auth_request, response) if response.is_fault(): if raise_on_error: raise AuthenticationFailed( "Cannot authenticate user: (%s) %s" % ( response.get_fault_code(), response.get_fault_message() ) ) return None return response.get_response()['AuthResponse']['authToken']
[ "def", "authenticate", "(", "url", ",", "account", ",", "key", ",", "by", "=", "'name'", ",", "expires", "=", "0", ",", "timestamp", "=", "None", ",", "timeout", "=", "None", ",", "request_type", "=", "\"xml\"", ",", "admin_auth", "=", "False", ",", "use_password", "=", "False", ",", "raise_on_error", "=", "False", ")", ":", "if", "timestamp", "is", "None", ":", "timestamp", "=", "int", "(", "time", ".", "time", "(", ")", ")", "*", "1000", "pak", "=", "\"\"", "if", "not", "admin_auth", ":", "pak", "=", "preauth", ".", "create_preauth", "(", "account", ",", "key", ",", "by", ",", "expires", ",", "timestamp", ")", "if", "request_type", "==", "'xml'", ":", "auth_request", "=", "RequestXml", "(", ")", "else", ":", "auth_request", "=", "RequestJson", "(", ")", "request_data", "=", "{", "'account'", ":", "{", "'by'", ":", "by", ",", "'_content'", ":", "account", "}", "}", "ns", "=", "\"urn:zimbraAccount\"", "if", "admin_auth", ":", "ns", "=", "\"urn:zimbraAdmin\"", "request_data", "[", "'password'", "]", "=", "key", "elif", "use_password", ":", "request_data", "[", "'password'", "]", "=", "{", "\"_content\"", ":", "key", "}", "else", ":", "request_data", "[", "'preauth'", "]", "=", "{", "'timestamp'", ":", "timestamp", ",", "'expires'", ":", "expires", ",", "'_content'", ":", "pak", "}", "auth_request", ".", "add_request", "(", "'AuthRequest'", ",", "request_data", ",", "ns", ")", "server", "=", "Communication", "(", "url", ",", "timeout", ")", "if", "request_type", "==", "'xml'", ":", "response", "=", "ResponseXml", "(", ")", "else", ":", "response", "=", "ResponseJson", "(", ")", "server", ".", "send_request", "(", "auth_request", ",", "response", ")", "if", "response", ".", "is_fault", "(", ")", ":", "if", "raise_on_error", ":", "raise", "AuthenticationFailed", "(", "\"Cannot authenticate user: (%s) %s\"", "%", "(", "response", ".", "get_fault_code", "(", ")", ",", "response", ".", "get_fault_message", "(", ")", ")", ")", "return", "None", "return", "response", ".", "get_response", "(", ")", "[", "'AuthResponse'", "]", "[", "'authToken'", "]" ]
26.028571
0.000352
def is_complete(self): """ Checks whether all descriptor fields are set. If descriptor has at least one "*" or null field it is considered "incomplete" :return: true if all descriptor fields are defined and false otherwise. """ return self._group != None and self._type != None \ and self._kind != None and self._name != None and self._version != None
[ "def", "is_complete", "(", "self", ")", ":", "return", "self", ".", "_group", "!=", "None", "and", "self", ".", "_type", "!=", "None", "and", "self", ".", "_kind", "!=", "None", "and", "self", ".", "_name", "!=", "None", "and", "self", ".", "_version", "!=", "None" ]
44.888889
0.021845
def subsequent_calling_points(self): """ A list of CallingPoint objects. This is the list of all subsequent calling points for the service, including all associated services if the service splits into multiple services. """ calling_points = list() for cpl in self._subsequent_calling_point_lists: calling_points += cpl.calling_points return calling_points
[ "def", "subsequent_calling_points", "(", "self", ")", ":", "calling_points", "=", "list", "(", ")", "for", "cpl", "in", "self", ".", "_subsequent_calling_point_lists", ":", "calling_points", "+=", "cpl", ".", "calling_points", "return", "calling_points" ]
35.75
0.004545
def prioritized_iter(cls, flag_val, env_val, config_val, config_default_val, hardcoded_val, default): """Yield the non-None values from highest-ranked to lowest, wrapped in RankedValue instances.""" if flag_val is not None: yield RankedValue(cls.FLAG, flag_val) if env_val is not None: yield RankedValue(cls.ENVIRONMENT, env_val) if config_val is not None: yield RankedValue(cls.CONFIG, config_val) if config_default_val is not None: yield RankedValue(cls.CONFIG_DEFAULT, config_default_val) if hardcoded_val is not None: yield RankedValue(cls.HARDCODED, hardcoded_val) yield RankedValue(cls.NONE, default)
[ "def", "prioritized_iter", "(", "cls", ",", "flag_val", ",", "env_val", ",", "config_val", ",", "config_default_val", ",", "hardcoded_val", ",", "default", ")", ":", "if", "flag_val", "is", "not", "None", ":", "yield", "RankedValue", "(", "cls", ".", "FLAG", ",", "flag_val", ")", "if", "env_val", "is", "not", "None", ":", "yield", "RankedValue", "(", "cls", ".", "ENVIRONMENT", ",", "env_val", ")", "if", "config_val", "is", "not", "None", ":", "yield", "RankedValue", "(", "cls", ".", "CONFIG", ",", "config_val", ")", "if", "config_default_val", "is", "not", "None", ":", "yield", "RankedValue", "(", "cls", ".", "CONFIG_DEFAULT", ",", "config_default_val", ")", "if", "hardcoded_val", "is", "not", "None", ":", "yield", "RankedValue", "(", "cls", ".", "HARDCODED", ",", "hardcoded_val", ")", "yield", "RankedValue", "(", "cls", ".", "NONE", ",", "default", ")" ]
48.142857
0.011645
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]: """ This is the main method that takes code from the Jupyter cell and submits it to the SAS server :param code: code from the cell :param silent: :return: str with either the log or list """ if not code.strip(): return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} if self.mva is None: self._allow_stdin = True self._start_sas() if self.lst_len < 0: self._get_lst_len() if code.startswith('Obfuscated SAS Code'): logger.debug("decoding string") tmp1 = code.split() decode = base64.b64decode(tmp1[-1]) code = decode.decode('utf-8') if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False: logger.debug("code type: " + str(type(code))) logger.debug("code length: " + str(len(code))) logger.debug("code string: " + code) if code.startswith("/*SASKernelTest*/"): res = self.mva.submit(code, "text") else: res = self.mva.submit(code, prompt=self.promptDict) self.promptDict = {} if res['LOG'].find("SAS process has terminated unexpectedly") > -1: print(res['LOG'], '\n' "Restarting SAS session on your behalf") self.do_shutdown(True) return res['LOG'] output = res['LST'] log = res['LOG'] return self._which_display(log, output) elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False: full_log = highlight(self.mva.saslog(), SASLogLexer(), HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>", title="Full SAS Log")) return full_log.replace('\n', ' ') else: return self.cachedlog.replace('\n', ' ')
[ "def", "do_execute_direct", "(", "self", ",", "code", ":", "str", ",", "silent", ":", "bool", "=", "False", ")", "->", "[", "str", ",", "dict", "]", ":", "if", "not", "code", ".", "strip", "(", ")", ":", "return", "{", "'status'", ":", "'ok'", ",", "'execution_count'", ":", "self", ".", "execution_count", ",", "'payload'", ":", "[", "]", ",", "'user_expressions'", ":", "{", "}", "}", "if", "self", ".", "mva", "is", "None", ":", "self", ".", "_allow_stdin", "=", "True", "self", ".", "_start_sas", "(", ")", "if", "self", ".", "lst_len", "<", "0", ":", "self", ".", "_get_lst_len", "(", ")", "if", "code", ".", "startswith", "(", "'Obfuscated SAS Code'", ")", ":", "logger", ".", "debug", "(", "\"decoding string\"", ")", "tmp1", "=", "code", ".", "split", "(", ")", "decode", "=", "base64", ".", "b64decode", "(", "tmp1", "[", "-", "1", "]", ")", "code", "=", "decode", ".", "decode", "(", "'utf-8'", ")", "if", "code", ".", "startswith", "(", "'showSASLog_11092015'", ")", "==", "False", "and", "code", ".", "startswith", "(", "\"CompleteshowSASLog_11092015\"", ")", "==", "False", ":", "logger", ".", "debug", "(", "\"code type: \"", "+", "str", "(", "type", "(", "code", ")", ")", ")", "logger", ".", "debug", "(", "\"code length: \"", "+", "str", "(", "len", "(", "code", ")", ")", ")", "logger", ".", "debug", "(", "\"code string: \"", "+", "code", ")", "if", "code", ".", "startswith", "(", "\"/*SASKernelTest*/\"", ")", ":", "res", "=", "self", ".", "mva", ".", "submit", "(", "code", ",", "\"text\"", ")", "else", ":", "res", "=", "self", ".", "mva", ".", "submit", "(", "code", ",", "prompt", "=", "self", ".", "promptDict", ")", "self", ".", "promptDict", "=", "{", "}", "if", "res", "[", "'LOG'", "]", ".", "find", "(", "\"SAS process has terminated unexpectedly\"", ")", ">", "-", "1", ":", "print", "(", "res", "[", "'LOG'", "]", ",", "'\\n'", "\"Restarting SAS session on your behalf\"", ")", "self", ".", "do_shutdown", "(", "True", ")", "return", "res", "[", "'LOG'", "]", "output", "=", "res", "[", "'LST'", "]", "log", "=", "res", "[", "'LOG'", "]", "return", "self", ".", "_which_display", "(", "log", ",", "output", ")", "elif", "code", ".", "startswith", "(", "\"CompleteshowSASLog_11092015\"", ")", "==", "True", "and", "code", ".", "startswith", "(", "'showSASLog_11092015'", ")", "==", "False", ":", "full_log", "=", "highlight", "(", "self", ".", "mva", ".", "saslog", "(", ")", ",", "SASLogLexer", "(", ")", ",", "HtmlFormatter", "(", "full", "=", "True", ",", "style", "=", "SASLogStyle", ",", "lineseparator", "=", "\"<br>\"", ",", "title", "=", "\"Full SAS Log\"", ")", ")", "return", "full_log", ".", "replace", "(", "'\\n'", ",", "' '", ")", "else", ":", "return", "self", ".", "cachedlog", ".", "replace", "(", "'\\n'", ",", "' '", ")" ]
43.734694
0.004564
def cutout(im, n_holes, length): """ Cut out n_holes number of square holes of size length in image at random locations. Holes may overlap. """ r,c,*_ = im.shape mask = np.ones((r, c), np.int32) for n in range(n_holes): y = np.random.randint(0, r) x = np.random.randint(0, c) y1 = int(np.clip(y - length / 2, 0, r)) y2 = int(np.clip(y + length / 2, 0, r)) x1 = int(np.clip(x - length / 2, 0, c)) x2 = int(np.clip(x + length / 2, 0, c)) mask[y1: y2, x1: x2] = 0. mask = mask[:,:,None] im = im * mask return im
[ "def", "cutout", "(", "im", ",", "n_holes", ",", "length", ")", ":", "r", ",", "c", ",", "", "*", "_", "=", "im", ".", "shape", "mask", "=", "np", ".", "ones", "(", "(", "r", ",", "c", ")", ",", "np", ".", "int32", ")", "for", "n", "in", "range", "(", "n_holes", ")", ":", "y", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "r", ")", "x", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "c", ")", "y1", "=", "int", "(", "np", ".", "clip", "(", "y", "-", "length", "/", "2", ",", "0", ",", "r", ")", ")", "y2", "=", "int", "(", "np", ".", "clip", "(", "y", "+", "length", "/", "2", ",", "0", ",", "r", ")", ")", "x1", "=", "int", "(", "np", ".", "clip", "(", "x", "-", "length", "/", "2", ",", "0", ",", "c", ")", ")", "x2", "=", "int", "(", "np", ".", "clip", "(", "x", "+", "length", "/", "2", ",", "0", ",", "c", ")", ")", "mask", "[", "y1", ":", "y2", ",", "x1", ":", "x2", "]", "=", "0.", "mask", "=", "mask", "[", ":", ",", ":", ",", "None", "]", "im", "=", "im", "*", "mask", "return", "im" ]
34.235294
0.011706
def dump_counts(self, out=sys.stdout, count_fn=len, colwidth=10): """Dump out the summary counts of entries in this pivot table as a tabular listing. @param out: output stream to write to @param count_fn: (default=len) function for computing value for each pivot cell @param colwidth: (default=10) """ if len(self._pivot_attrs) == 1: out.write("Pivot: %s\n" % ','.join(self._pivot_attrs)) maxkeylen = max(len(str(k)) for k in self.keys()) maxvallen = colwidth keytally = {} for k, sub in self.items(): sub_v = count_fn(sub) maxvallen = max(maxvallen, len(str(sub_v))) keytally[k] = sub_v for k, sub in self.items(): out.write("%-*.*s " % (maxkeylen, maxkeylen, k)) out.write("%*s\n" % (maxvallen, keytally[k])) elif len(self._pivot_attrs) == 2: out.write("Pivot: %s\n" % ','.join(self._pivot_attrs)) maxkeylen = max(max(len(str(k)) for k in self.keys()), 5) maxvallen = max(max(len(str(k)) for k in self.subtables[0].keys()), colwidth) keytally = dict((k, 0) for k in self.subtables[0].keys()) out.write("%*s " % (maxkeylen, '')) out.write(' '.join("%*.*s" % (maxvallen, maxvallen, k) for k in self.subtables[0].keys())) out.write(' %*s\n' % (maxvallen, 'Total')) for k, sub in self.items(): out.write("%-*.*s " % (maxkeylen, maxkeylen, k)) for kk, ssub in sub.items(): ssub_v = count_fn(ssub) out.write("%*d " % (maxvallen, ssub_v)) keytally[kk] += ssub_v maxvallen = max(maxvallen, len(str(ssub_v))) sub_v = count_fn(sub) maxvallen = max(maxvallen, len(str(sub_v))) out.write("%*d\n" % (maxvallen, sub_v)) out.write('%-*.*s ' % (maxkeylen, maxkeylen, "Total")) out.write(' '.join("%*d" % (maxvallen, tally) for k, tally in sorted(keytally.items()))) out.write(" %*d\n" % (maxvallen, sum(tally for k, tally in keytally.items()))) else: raise ValueError("can only dump summary counts for 1 or 2-attribute pivots")
[ "def", "dump_counts", "(", "self", ",", "out", "=", "sys", ".", "stdout", ",", "count_fn", "=", "len", ",", "colwidth", "=", "10", ")", ":", "if", "len", "(", "self", ".", "_pivot_attrs", ")", "==", "1", ":", "out", ".", "write", "(", "\"Pivot: %s\\n\"", "%", "','", ".", "join", "(", "self", ".", "_pivot_attrs", ")", ")", "maxkeylen", "=", "max", "(", "len", "(", "str", "(", "k", ")", ")", "for", "k", "in", "self", ".", "keys", "(", ")", ")", "maxvallen", "=", "colwidth", "keytally", "=", "{", "}", "for", "k", ",", "sub", "in", "self", ".", "items", "(", ")", ":", "sub_v", "=", "count_fn", "(", "sub", ")", "maxvallen", "=", "max", "(", "maxvallen", ",", "len", "(", "str", "(", "sub_v", ")", ")", ")", "keytally", "[", "k", "]", "=", "sub_v", "for", "k", ",", "sub", "in", "self", ".", "items", "(", ")", ":", "out", ".", "write", "(", "\"%-*.*s \"", "%", "(", "maxkeylen", ",", "maxkeylen", ",", "k", ")", ")", "out", ".", "write", "(", "\"%*s\\n\"", "%", "(", "maxvallen", ",", "keytally", "[", "k", "]", ")", ")", "elif", "len", "(", "self", ".", "_pivot_attrs", ")", "==", "2", ":", "out", ".", "write", "(", "\"Pivot: %s\\n\"", "%", "','", ".", "join", "(", "self", ".", "_pivot_attrs", ")", ")", "maxkeylen", "=", "max", "(", "max", "(", "len", "(", "str", "(", "k", ")", ")", "for", "k", "in", "self", ".", "keys", "(", ")", ")", ",", "5", ")", "maxvallen", "=", "max", "(", "max", "(", "len", "(", "str", "(", "k", ")", ")", "for", "k", "in", "self", ".", "subtables", "[", "0", "]", ".", "keys", "(", ")", ")", ",", "colwidth", ")", "keytally", "=", "dict", "(", "(", "k", ",", "0", ")", "for", "k", "in", "self", ".", "subtables", "[", "0", "]", ".", "keys", "(", ")", ")", "out", ".", "write", "(", "\"%*s \"", "%", "(", "maxkeylen", ",", "''", ")", ")", "out", ".", "write", "(", "' '", ".", "join", "(", "\"%*.*s\"", "%", "(", "maxvallen", ",", "maxvallen", ",", "k", ")", "for", "k", "in", "self", ".", "subtables", "[", "0", "]", ".", "keys", "(", ")", ")", ")", "out", ".", "write", "(", "' %*s\\n'", "%", "(", "maxvallen", ",", "'Total'", ")", ")", "for", "k", ",", "sub", "in", "self", ".", "items", "(", ")", ":", "out", ".", "write", "(", "\"%-*.*s \"", "%", "(", "maxkeylen", ",", "maxkeylen", ",", "k", ")", ")", "for", "kk", ",", "ssub", "in", "sub", ".", "items", "(", ")", ":", "ssub_v", "=", "count_fn", "(", "ssub", ")", "out", ".", "write", "(", "\"%*d \"", "%", "(", "maxvallen", ",", "ssub_v", ")", ")", "keytally", "[", "kk", "]", "+=", "ssub_v", "maxvallen", "=", "max", "(", "maxvallen", ",", "len", "(", "str", "(", "ssub_v", ")", ")", ")", "sub_v", "=", "count_fn", "(", "sub", ")", "maxvallen", "=", "max", "(", "maxvallen", ",", "len", "(", "str", "(", "sub_v", ")", ")", ")", "out", ".", "write", "(", "\"%*d\\n\"", "%", "(", "maxvallen", ",", "sub_v", ")", ")", "out", ".", "write", "(", "'%-*.*s '", "%", "(", "maxkeylen", ",", "maxkeylen", ",", "\"Total\"", ")", ")", "out", ".", "write", "(", "' '", ".", "join", "(", "\"%*d\"", "%", "(", "maxvallen", ",", "tally", ")", "for", "k", ",", "tally", "in", "sorted", "(", "keytally", ".", "items", "(", ")", ")", ")", ")", "out", ".", "write", "(", "\" %*d\\n\"", "%", "(", "maxvallen", ",", "sum", "(", "tally", "for", "k", ",", "tally", "in", "keytally", ".", "items", "(", ")", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"can only dump summary counts for 1 or 2-attribute pivots\"", ")" ]
56.146341
0.003843
def add_child_gradebook(self, gradebook_id, child_id): """Adds a child to a gradebook. arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook arg: child_id (osid.id.Id): the ``Id`` of the new child raise: AlreadyExists - ``gradebook_id`` is already a parent of ``child_id`` raise: NotFound - ``gradebook_id`` or ``child_id`` not found raise: NullArgument - ``gradebook_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.add_child_bin_template if self._catalog_session is not None: return self._catalog_session.add_child_catalog(catalog_id=gradebook_id, child_id=child_id) return self._hierarchy_session.add_child(id_=gradebook_id, child_id=child_id)
[ "def", "add_child_gradebook", "(", "self", ",", "gradebook_id", ",", "child_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchyDesignSession.add_child_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "add_child_catalog", "(", "catalog_id", "=", "gradebook_id", ",", "child_id", "=", "child_id", ")", "return", "self", ".", "_hierarchy_session", ".", "add_child", "(", "id_", "=", "gradebook_id", ",", "child_id", "=", "child_id", ")" ]
51.1
0.003842
def union(self, other): """Returns a new IntervalSet which represents the union of each of the intervals in this IntervalSet with each of the intervals in the other IntervalSet :param other: An IntervalSet to union with this one. """ result = IntervalSet() for el in self: result.add(el) for el in other: result.add(el) return result
[ "def", "union", "(", "self", ",", "other", ")", ":", "result", "=", "IntervalSet", "(", ")", "for", "el", "in", "self", ":", "result", ".", "add", "(", "el", ")", "for", "el", "in", "other", ":", "result", ".", "add", "(", "el", ")", "return", "result" ]
37.363636
0.007126
def add_network_to_dhcp_agent(self, dhcp_agent, body=None): """Adds a network to dhcp agent.""" return self.post((self.agent_path + self.DHCP_NETS) % dhcp_agent, body=body)
[ "def", "add_network_to_dhcp_agent", "(", "self", ",", "dhcp_agent", ",", "body", "=", "None", ")", ":", "return", "self", ".", "post", "(", "(", "self", ".", "agent_path", "+", "self", ".", "DHCP_NETS", ")", "%", "dhcp_agent", ",", "body", "=", "body", ")" ]
52.5
0.00939
def set_connection_logging(self, loadbalancer, val): """ Sets the connection logging for the given load balancer. """ uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer) val = str(val).lower() req_body = {"connectionLogging": { "enabled": val, }} resp, body = self.api.method_put(uri, body=req_body) return body
[ "def", "set_connection_logging", "(", "self", ",", "loadbalancer", ",", "val", ")", ":", "uri", "=", "\"/loadbalancers/%s/connectionlogging\"", "%", "utils", ".", "get_id", "(", "loadbalancer", ")", "val", "=", "str", "(", "val", ")", ".", "lower", "(", ")", "req_body", "=", "{", "\"connectionLogging\"", ":", "{", "\"enabled\"", ":", "val", ",", "}", "}", "resp", ",", "body", "=", "self", ".", "api", ".", "method_put", "(", "uri", ",", "body", "=", "req_body", ")", "return", "body" ]
38
0.007009
def triads_inv(reference_labels, estimated_labels): """Score chords along triad (root, quality to #5, & bass) relationships. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.triads_inv(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0] """ validate(reference_labels, estimated_labels) ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False) est_roots, est_semitones, est_bass = encode_many(estimated_labels, False) eq_roots = ref_roots == est_roots eq_basses = ref_bass == est_bass eq_semitones = np.all( np.equal(ref_semitones[:, :8], est_semitones[:, :8]), axis=1) comparison_scores = (eq_roots * eq_semitones * eq_basses).astype(np.float) # Ignore 'X' chords comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
[ "def", "triads_inv", "(", "reference_labels", ",", "estimated_labels", ")", ":", "validate", "(", "reference_labels", ",", "estimated_labels", ")", "ref_roots", ",", "ref_semitones", ",", "ref_bass", "=", "encode_many", "(", "reference_labels", ",", "False", ")", "est_roots", ",", "est_semitones", ",", "est_bass", "=", "encode_many", "(", "estimated_labels", ",", "False", ")", "eq_roots", "=", "ref_roots", "==", "est_roots", "eq_basses", "=", "ref_bass", "==", "est_bass", "eq_semitones", "=", "np", ".", "all", "(", "np", ".", "equal", "(", "ref_semitones", "[", ":", ",", ":", "8", "]", ",", "est_semitones", "[", ":", ",", ":", "8", "]", ")", ",", "axis", "=", "1", ")", "comparison_scores", "=", "(", "eq_roots", "*", "eq_semitones", "*", "eq_basses", ")", ".", "astype", "(", "np", ".", "float", ")", "# Ignore 'X' chords", "comparison_scores", "[", "np", ".", "any", "(", "ref_semitones", "<", "0", ",", "axis", "=", "1", ")", "]", "=", "-", "1.0", "return", "comparison_scores" ]
38.276596
0.000542
def add_decrypt_chunk( self, final_path, internal_fdstart, offsets, symkey, iv, hmac_datafile): # type: (CryptoOffload, str, int, blobxfer.models.download.Offsets, # bytes, bytes, str) -> None """Add a chunk to decrypt :param CryptoOffload self: this :param str final_path: final path :param int internal_fdstart: internal fd offset start :param blobxfer.models.download.Offsets offsets: offsets :param bytes symkey: symmetric key :param bytes iv: initialization vector :param str hmac_datafile: encrypted data file """ self._task_queue.put( (CryptoAction.Decrypt, final_path, internal_fdstart, offsets, symkey, iv, hmac_datafile) )
[ "def", "add_decrypt_chunk", "(", "self", ",", "final_path", ",", "internal_fdstart", ",", "offsets", ",", "symkey", ",", "iv", ",", "hmac_datafile", ")", ":", "# type: (CryptoOffload, str, int, blobxfer.models.download.Offsets,", "# bytes, bytes, str) -> None", "self", ".", "_task_queue", ".", "put", "(", "(", "CryptoAction", ".", "Decrypt", ",", "final_path", ",", "internal_fdstart", ",", "offsets", ",", "symkey", ",", "iv", ",", "hmac_datafile", ")", ")" ]
43.055556
0.005051
def make_figure_source_geom(extractors, what): """ Extract the geometry of a given sources Example: http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3 """ import matplotlib.pyplot as plt fig = plt.figure() [ex] = extractors sitecol = ex.get('sitecol') geom_by_src = vars(ex.get(what)) ax = fig.add_subplot(1, 1, 1) ax.grid(True) ax.set_xlabel('Source') bmap = basemap('cyl', sitecol) for src, geom in geom_by_src.items(): if src != 'array': bmap.plot(geom['lon'], geom['lat'], label=src) bmap.plot(sitecol['lon'], sitecol['lat'], 'x') ax.legend() return plt
[ "def", "make_figure_source_geom", "(", "extractors", ",", "what", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "fig", "=", "plt", ".", "figure", "(", ")", "[", "ex", "]", "=", "extractors", "sitecol", "=", "ex", ".", "get", "(", "'sitecol'", ")", "geom_by_src", "=", "vars", "(", "ex", ".", "get", "(", "what", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "ax", ".", "grid", "(", "True", ")", "ax", ".", "set_xlabel", "(", "'Source'", ")", "bmap", "=", "basemap", "(", "'cyl'", ",", "sitecol", ")", "for", "src", ",", "geom", "in", "geom_by_src", ".", "items", "(", ")", ":", "if", "src", "!=", "'array'", ":", "bmap", ".", "plot", "(", "geom", "[", "'lon'", "]", ",", "geom", "[", "'lat'", "]", ",", "label", "=", "src", ")", "bmap", ".", "plot", "(", "sitecol", "[", "'lon'", "]", ",", "sitecol", "[", "'lat'", "]", ",", "'x'", ")", "ax", ".", "legend", "(", ")", "return", "plt" ]
30.333333
0.001522
def right(self, f, n=1): """return the nearest n features strictly to the right of a Feature f. Overlapping features are not considered as to the right. f: a Feature object n: the number of features to return """ intervals = self.intervals[f.chrom] ilen = len(intervals) iright = binsearch_right_end(intervals, f.end, 0, ilen) results = [] while iright < ilen: i = len(results) if i > n: if distance(f, results[i - 1]) != distance(f, results[i - 2]): return results[:i - 1] other = intervals[iright] iright += 1 if distance(other, f) == 0: continue results.append(other) return results
[ "def", "right", "(", "self", ",", "f", ",", "n", "=", "1", ")", ":", "intervals", "=", "self", ".", "intervals", "[", "f", ".", "chrom", "]", "ilen", "=", "len", "(", "intervals", ")", "iright", "=", "binsearch_right_end", "(", "intervals", ",", "f", ".", "end", ",", "0", ",", "ilen", ")", "results", "=", "[", "]", "while", "iright", "<", "ilen", ":", "i", "=", "len", "(", "results", ")", "if", "i", ">", "n", ":", "if", "distance", "(", "f", ",", "results", "[", "i", "-", "1", "]", ")", "!=", "distance", "(", "f", ",", "results", "[", "i", "-", "2", "]", ")", ":", "return", "results", "[", ":", "i", "-", "1", "]", "other", "=", "intervals", "[", "iright", "]", "iright", "+=", "1", "if", "distance", "(", "other", ",", "f", ")", "==", "0", ":", "continue", "results", ".", "append", "(", "other", ")", "return", "results" ]
34.636364
0.003831
def _find_append_zero_crossings(x, y): r""" Find and interpolate zero crossings. Estimate the zero crossings of an x,y series and add estimated crossings to series, returning a sorted array with no duplicate values. Parameters ---------- x : `pint.Quantity` x values of data y : `pint.Quantity` y values of data Returns ------- x : `pint.Quantity` x values of data y : `pint.Quantity` y values of data """ # Find and append crossings to the data crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units) x = concatenate((x, crossings[0])) y = concatenate((y, crossings[1])) # Resort so that data are in order sort_idx = np.argsort(x) x = x[sort_idx] y = y[sort_idx] # Remove duplicate data points if there are any keep_idx = np.ediff1d(x, to_end=[1]) > 0 x = x[keep_idx] y = y[keep_idx] return x, y
[ "def", "_find_append_zero_crossings", "(", "x", ",", "y", ")", ":", "# Find and append crossings to the data", "crossings", "=", "find_intersections", "(", "x", "[", "1", ":", "]", ",", "y", "[", "1", ":", "]", ",", "np", ".", "zeros_like", "(", "y", "[", "1", ":", "]", ")", "*", "y", ".", "units", ")", "x", "=", "concatenate", "(", "(", "x", ",", "crossings", "[", "0", "]", ")", ")", "y", "=", "concatenate", "(", "(", "y", ",", "crossings", "[", "1", "]", ")", ")", "# Resort so that data are in order", "sort_idx", "=", "np", ".", "argsort", "(", "x", ")", "x", "=", "x", "[", "sort_idx", "]", "y", "=", "y", "[", "sort_idx", "]", "# Remove duplicate data points if there are any", "keep_idx", "=", "np", ".", "ediff1d", "(", "x", ",", "to_end", "=", "[", "1", "]", ")", ">", "0", "x", "=", "x", "[", "keep_idx", "]", "y", "=", "y", "[", "keep_idx", "]", "return", "x", ",", "y" ]
24.945946
0.003128
def update(self): """Update |KInz| based on |HInz| and |LAI|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(2) >>> hinz(0.2) >>> lai.acker_jun = 1.0 >>> lai.vers_dec = 2.0 >>> derived.kinz.update() >>> from hydpy import round_ >>> round_(derived.kinz.acker_jun) 0.2 >>> round_(derived.kinz.vers_dec) 0.4 """ con = self.subpars.pars.control self(con.hinz*con.lai)
[ "def", "update", "(", "self", ")", ":", "con", "=", "self", ".", "subpars", ".", "pars", ".", "control", "self", "(", "con", ".", "hinz", "*", "con", ".", "lai", ")" ]
27.666667
0.003883
def main(): """ Entry point """ client_1 = MessageBot("verne", "Jules Verne") client_1.start() client_1.connect("127.0.0.1") client_2 = MessageBot("adams", "Douglas Adams") client_2.start() client_2.connect("127.0.0.1") herald_1 = Herald(client_1) herald_1.start() herald_2 = Herald(client_2) herald_2.start() handler = LogHandler() herald_1.register('/toto/*', handler) herald_2.register('/toto/*', handler) cmd = HeraldBot("bot", "Robotnik", herald_1) cmd.connect("127.0.0.1") cmd.wait_stop() for closable in (client_1, client_2, herald_1, herald_2): closable.close() logging.info("Bye !")
[ "def", "main", "(", ")", ":", "client_1", "=", "MessageBot", "(", "\"verne\"", ",", "\"Jules Verne\"", ")", "client_1", ".", "start", "(", ")", "client_1", ".", "connect", "(", "\"127.0.0.1\"", ")", "client_2", "=", "MessageBot", "(", "\"adams\"", ",", "\"Douglas Adams\"", ")", "client_2", ".", "start", "(", ")", "client_2", ".", "connect", "(", "\"127.0.0.1\"", ")", "herald_1", "=", "Herald", "(", "client_1", ")", "herald_1", ".", "start", "(", ")", "herald_2", "=", "Herald", "(", "client_2", ")", "herald_2", ".", "start", "(", ")", "handler", "=", "LogHandler", "(", ")", "herald_1", ".", "register", "(", "'/toto/*'", ",", "handler", ")", "herald_2", ".", "register", "(", "'/toto/*'", ",", "handler", ")", "cmd", "=", "HeraldBot", "(", "\"bot\"", ",", "\"Robotnik\"", ",", "herald_1", ")", "cmd", ".", "connect", "(", "\"127.0.0.1\"", ")", "cmd", ".", "wait_stop", "(", ")", "for", "closable", "in", "(", "client_1", ",", "client_2", ",", "herald_1", ",", "herald_2", ")", ":", "closable", ".", "close", "(", ")", "logging", ".", "info", "(", "\"Bye !\"", ")" ]
21.322581
0.001447
def get_patient_pharmacies(self, patient_id, patients_favorite_only='N'): """ invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action :return: JSON response """ magic = self._magic_json( action=TouchWorksMagicConstants.ACTION_GET_PATIENT_PHARAMCIES, patient_id=patient_id, parameter1=patients_favorite_only) response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic) result = self._get_results_or_raise_if_magic_invalid( magic, response, TouchWorksMagicConstants.RESULT_GET_PATIENT_PHARAMCIES) return result
[ "def", "get_patient_pharmacies", "(", "self", ",", "patient_id", ",", "patients_favorite_only", "=", "'N'", ")", ":", "magic", "=", "self", ".", "_magic_json", "(", "action", "=", "TouchWorksMagicConstants", ".", "ACTION_GET_PATIENT_PHARAMCIES", ",", "patient_id", "=", "patient_id", ",", "parameter1", "=", "patients_favorite_only", ")", "response", "=", "self", ".", "_http_request", "(", "TouchWorksEndPoints", ".", "MAGIC_JSON", ",", "data", "=", "magic", ")", "result", "=", "self", ".", "_get_results_or_raise_if_magic_invalid", "(", "magic", ",", "response", ",", "TouchWorksMagicConstants", ".", "RESULT_GET_PATIENT_PHARAMCIES", ")", "return", "result" ]
43.5
0.007032
def format_runlog(runlog: List[Mapping[str, Any]]) -> str: """ Format a run log (return value of :py:meth:`simulate``) into a human-readable string :param runlog: The output of a call to :py:func:`simulate` """ to_ret = [] for command in runlog: to_ret.append( '\t' * command['level'] + command['payload'].get('text', '').format(**command['payload'])) if command['logs']: to_ret.append('\t' * command['level'] + 'Logs from this command:') to_ret.extend( ['\t' * command['level'] + f'{l.levelname} ({l.module}): {l.msg}' % l.args for l in command['logs']]) return '\n'.join(to_ret)
[ "def", "format_runlog", "(", "runlog", ":", "List", "[", "Mapping", "[", "str", ",", "Any", "]", "]", ")", "->", "str", ":", "to_ret", "=", "[", "]", "for", "command", "in", "runlog", ":", "to_ret", ".", "append", "(", "'\\t'", "*", "command", "[", "'level'", "]", "+", "command", "[", "'payload'", "]", ".", "get", "(", "'text'", ",", "''", ")", ".", "format", "(", "*", "*", "command", "[", "'payload'", "]", ")", ")", "if", "command", "[", "'logs'", "]", ":", "to_ret", ".", "append", "(", "'\\t'", "*", "command", "[", "'level'", "]", "+", "'Logs from this command:'", ")", "to_ret", ".", "extend", "(", "[", "'\\t'", "*", "command", "[", "'level'", "]", "+", "f'{l.levelname} ({l.module}): {l.msg}'", "%", "l", ".", "args", "for", "l", "in", "command", "[", "'logs'", "]", "]", ")", "return", "'\\n'", ".", "join", "(", "to_ret", ")" ]
37.315789
0.002751
def to_JSON(self): """Dumps object fields into a JSON formatted string :returns: the JSON string """ return json.dumps({"reference_time": self._reference_time, "location": json.loads(self._location.to_JSON()), "interval": self._interval, "co_samples": self._co_samples, "reception_time": self._reception_time, })
[ "def", "to_JSON", "(", "self", ")", ":", "return", "json", ".", "dumps", "(", "{", "\"reference_time\"", ":", "self", ".", "_reference_time", ",", "\"location\"", ":", "json", ".", "loads", "(", "self", ".", "_location", ".", "to_JSON", "(", ")", ")", ",", "\"interval\"", ":", "self", ".", "_interval", ",", "\"co_samples\"", ":", "self", ".", "_co_samples", ",", "\"reception_time\"", ":", "self", ".", "_reception_time", ",", "}", ")" ]
39.25
0.004149
def tiny_integer(self, column, auto_increment=False, unsigned=False): """ Create a new tiny integer column on the table. :param column: The column :type column: str :type auto_increment: bool :type unsigned: bool :rtype: Fluent """ return self._add_column('tiny_integer', column, auto_increment=auto_increment, unsigned=unsigned)
[ "def", "tiny_integer", "(", "self", ",", "column", ",", "auto_increment", "=", "False", ",", "unsigned", "=", "False", ")", ":", "return", "self", ".", "_add_column", "(", "'tiny_integer'", ",", "column", ",", "auto_increment", "=", "auto_increment", ",", "unsigned", "=", "unsigned", ")" ]
28.375
0.004264
def _init_context(self, gene_seq): """Initializes attributes defining mutation contexts and their position. The self.context2pos and self.pos2context dictionaries map from sequence context to sequence position and sequence position to sequence context, respectively. These attributes allow for randomly sampling of mutation positions while respecting sequence context in the randomization-based test. Parameters ---------- gene_seq : GeneSequence GeneSequence object from the gene_sequence module """ self.context2pos, self.pos2context = {}, {} gene_len = len(gene_seq.exon_seq) # get length of CDS five_ss_len = 2*len(gene_seq.five_prime_seq) # total length of 5' splice sites three_ss_len = 2*len(gene_seq.three_prime_seq) # total length of 3' splice sites if gene_seq.nuc_context in [1, 2]: # case where context matters index_context = int(gene_seq.nuc_context) - 1 # subtract 1 since python is zero-based index for i in range(index_context, gene_len): nucs = gene_seq.exon_seq[i-index_context:i+1] self.context2pos.setdefault(nucs, []) self.context2pos[nucs].append(i) self.pos2context[i] = nucs # sequence context for five prime splice site for i, five_ss in enumerate(gene_seq.five_prime_seq): first_nucs = five_ss[1-index_context:1+1] second_nucs = five_ss[2-index_context:2+1] first_pos = 2*i + gene_len second_pos = 2*i + gene_len + 1 self.context2pos.setdefault(first_nucs, []) self.context2pos[first_nucs].append(first_pos) self.context2pos.setdefault(second_nucs, []) self.context2pos[second_nucs].append(second_pos) self.pos2context[first_pos] = first_nucs self.pos2context[second_pos] = second_nucs # sequence context for three prime splice site for i, three_ss in enumerate(gene_seq.three_prime_seq): first_nucs = three_ss[1-index_context:1+1] second_nucs = three_ss[2-index_context:2+1] first_pos = 2*i + gene_len + five_ss_len second_pos = 2*i + gene_len + five_ss_len + 1 self.context2pos.setdefault(first_nucs, []) self.context2pos[first_nucs].append(first_pos) self.context2pos.setdefault(second_nucs, []) self.context2pos[second_nucs].append(second_pos) self.pos2context[first_pos] = first_nucs self.pos2context[second_pos] = second_nucs # hack solution for context for first nuc if gene_seq.exon_seq and gene_seq.nuc_context > 1: self.pos2context[0] = gene_seq.exon_seq[0] * 2 self.context2pos.setdefault(gene_seq.exon_seq[0]*2, []) self.context2pos[gene_seq.exon_seq[0]*2].append(0) elif gene_seq.nuc_context in [1.5, 3]: # use the nucleotide context from chasm if nuc # context is 1.5 otherwise always use a three # nucleotide context ncontext = gene_seq.nuc_context for i in range(1, len(gene_seq.exon_seq)-1): nucs = gene_seq.exon_seq[i-1:i+2] if ncontext == 1.5: context = prob2020.python.mutation_context.get_chasm_context(nucs) else: context = nucs self.context2pos.setdefault(context, []) self.context2pos[context].append(i) self.pos2context[i] = context # sequence context for five prime splice site for i, five_ss in enumerate(gene_seq.five_prime_seq): first_nucs = five_ss[:3] second_nucs = five_ss[1:4] first_pos = 2*i + gene_len second_pos = 2*i + gene_len + 1 if ncontext == 1.5: first_context = prob2020.python.mutation_context.get_chasm_context(first_nucs) second_context = prob2020.python.mutation_context.get_chasm_context(second_nucs) else: first_context = first_nucs second_context = second_nucs self.context2pos.setdefault(first_context, []) self.context2pos[first_context].append(first_pos) self.context2pos.setdefault(second_context, []) self.context2pos[second_context].append(second_pos) self.pos2context[first_pos] = first_context self.pos2context[second_pos] = second_context # sequence context for three prime splice site for i, three_ss in enumerate(gene_seq.three_prime_seq): first_nucs = three_ss[:3] second_nucs = three_ss[1:4] first_pos = 2*i + gene_len + five_ss_len second_pos = 2*i + gene_len + five_ss_len + 1 if ncontext == 1.5: first_context = prob2020.python.mutation_context.get_chasm_context(first_nucs) second_context = prob2020.python.mutation_context.get_chasm_context(second_nucs) else: first_context = first_nucs second_context = second_nucs self.context2pos.setdefault(first_context, []) self.context2pos[first_context].append(first_pos) self.context2pos.setdefault(second_context, []) self.context2pos[second_context].append(second_pos) self.pos2context[first_pos] = first_context self.pos2context[second_pos] = second_context # hack solution for context for first nuc if gene_seq.exon_seq: first_nuc = gene_seq.exon_seq[0] + gene_seq.exon_seq[:2] if ncontext == 1.5: first_context = prob2020.python.mutation_context.get_chasm_context(first_nuc) else: first_context = first_nuc self.pos2context[0] = first_context self.context2pos.setdefault(first_context, []) self.context2pos[first_context].append(0) last_nuc = gene_seq.exon_seq[-2:] + gene_seq.exon_seq[-1] if ncontext == 1.5: last_context = prob2020.python.mutation_context.get_chasm_context(last_nuc) else: last_context = last_nuc last_pos = len(gene_seq.exon_seq) - 1 self.pos2context[last_pos] = first_context self.context2pos.setdefault(last_context, []) self.context2pos[last_context].append(last_pos) else: # case where there is no context, # mutations occur with uniform probability at each # position for i in range(gene_len + five_ss_len + three_ss_len): self.pos2context[i] = 'None' self.context2pos['None'] = range(gene_len + five_ss_len + three_ss_len)
[ "def", "_init_context", "(", "self", ",", "gene_seq", ")", ":", "self", ".", "context2pos", ",", "self", ".", "pos2context", "=", "{", "}", ",", "{", "}", "gene_len", "=", "len", "(", "gene_seq", ".", "exon_seq", ")", "# get length of CDS", "five_ss_len", "=", "2", "*", "len", "(", "gene_seq", ".", "five_prime_seq", ")", "# total length of 5' splice sites", "three_ss_len", "=", "2", "*", "len", "(", "gene_seq", ".", "three_prime_seq", ")", "# total length of 3' splice sites", "if", "gene_seq", ".", "nuc_context", "in", "[", "1", ",", "2", "]", ":", "# case where context matters", "index_context", "=", "int", "(", "gene_seq", ".", "nuc_context", ")", "-", "1", "# subtract 1 since python is zero-based index", "for", "i", "in", "range", "(", "index_context", ",", "gene_len", ")", ":", "nucs", "=", "gene_seq", ".", "exon_seq", "[", "i", "-", "index_context", ":", "i", "+", "1", "]", "self", ".", "context2pos", ".", "setdefault", "(", "nucs", ",", "[", "]", ")", "self", ".", "context2pos", "[", "nucs", "]", ".", "append", "(", "i", ")", "self", ".", "pos2context", "[", "i", "]", "=", "nucs", "# sequence context for five prime splice site", "for", "i", ",", "five_ss", "in", "enumerate", "(", "gene_seq", ".", "five_prime_seq", ")", ":", "first_nucs", "=", "five_ss", "[", "1", "-", "index_context", ":", "1", "+", "1", "]", "second_nucs", "=", "five_ss", "[", "2", "-", "index_context", ":", "2", "+", "1", "]", "first_pos", "=", "2", "*", "i", "+", "gene_len", "second_pos", "=", "2", "*", "i", "+", "gene_len", "+", "1", "self", ".", "context2pos", ".", "setdefault", "(", "first_nucs", ",", "[", "]", ")", "self", ".", "context2pos", "[", "first_nucs", "]", ".", "append", "(", "first_pos", ")", "self", ".", "context2pos", ".", "setdefault", "(", "second_nucs", ",", "[", "]", ")", "self", ".", "context2pos", "[", "second_nucs", "]", ".", "append", "(", "second_pos", ")", "self", ".", "pos2context", "[", "first_pos", "]", "=", "first_nucs", "self", ".", "pos2context", "[", "second_pos", "]", "=", "second_nucs", "# sequence context for three prime splice site", "for", "i", ",", "three_ss", "in", "enumerate", "(", "gene_seq", ".", "three_prime_seq", ")", ":", "first_nucs", "=", "three_ss", "[", "1", "-", "index_context", ":", "1", "+", "1", "]", "second_nucs", "=", "three_ss", "[", "2", "-", "index_context", ":", "2", "+", "1", "]", "first_pos", "=", "2", "*", "i", "+", "gene_len", "+", "five_ss_len", "second_pos", "=", "2", "*", "i", "+", "gene_len", "+", "five_ss_len", "+", "1", "self", ".", "context2pos", ".", "setdefault", "(", "first_nucs", ",", "[", "]", ")", "self", ".", "context2pos", "[", "first_nucs", "]", ".", "append", "(", "first_pos", ")", "self", ".", "context2pos", ".", "setdefault", "(", "second_nucs", ",", "[", "]", ")", "self", ".", "context2pos", "[", "second_nucs", "]", ".", "append", "(", "second_pos", ")", "self", ".", "pos2context", "[", "first_pos", "]", "=", "first_nucs", "self", ".", "pos2context", "[", "second_pos", "]", "=", "second_nucs", "# hack solution for context for first nuc", "if", "gene_seq", ".", "exon_seq", "and", "gene_seq", ".", "nuc_context", ">", "1", ":", "self", ".", "pos2context", "[", "0", "]", "=", "gene_seq", ".", "exon_seq", "[", "0", "]", "*", "2", "self", ".", "context2pos", ".", "setdefault", "(", "gene_seq", ".", "exon_seq", "[", "0", "]", "*", "2", ",", "[", "]", ")", "self", ".", "context2pos", "[", "gene_seq", ".", "exon_seq", "[", "0", "]", "*", "2", "]", ".", "append", "(", "0", ")", "elif", "gene_seq", ".", "nuc_context", "in", "[", "1.5", ",", "3", "]", ":", "# use the nucleotide context from chasm if nuc", "# context is 1.5 otherwise always use a three", "# nucleotide context", "ncontext", "=", "gene_seq", ".", "nuc_context", "for", "i", "in", "range", "(", "1", ",", "len", "(", "gene_seq", ".", "exon_seq", ")", "-", "1", ")", ":", "nucs", "=", "gene_seq", ".", "exon_seq", "[", "i", "-", "1", ":", "i", "+", "2", "]", "if", "ncontext", "==", "1.5", ":", "context", "=", "prob2020", ".", "python", ".", "mutation_context", ".", "get_chasm_context", "(", "nucs", ")", "else", ":", "context", "=", "nucs", "self", ".", "context2pos", ".", "setdefault", "(", "context", ",", "[", "]", ")", "self", ".", "context2pos", "[", "context", "]", ".", "append", "(", "i", ")", "self", ".", "pos2context", "[", "i", "]", "=", "context", "# sequence context for five prime splice site", "for", "i", ",", "five_ss", "in", "enumerate", "(", "gene_seq", ".", "five_prime_seq", ")", ":", "first_nucs", "=", "five_ss", "[", ":", "3", "]", "second_nucs", "=", "five_ss", "[", "1", ":", "4", "]", "first_pos", "=", "2", "*", "i", "+", "gene_len", "second_pos", "=", "2", "*", "i", "+", "gene_len", "+", "1", "if", "ncontext", "==", "1.5", ":", "first_context", "=", "prob2020", ".", "python", ".", "mutation_context", ".", "get_chasm_context", "(", "first_nucs", ")", "second_context", "=", "prob2020", ".", "python", ".", "mutation_context", ".", "get_chasm_context", "(", "second_nucs", ")", "else", ":", "first_context", "=", "first_nucs", "second_context", "=", "second_nucs", "self", ".", "context2pos", ".", "setdefault", "(", "first_context", ",", "[", "]", ")", "self", ".", "context2pos", "[", "first_context", "]", ".", "append", "(", "first_pos", ")", "self", ".", "context2pos", ".", "setdefault", "(", "second_context", ",", "[", "]", ")", "self", ".", "context2pos", "[", "second_context", "]", ".", "append", "(", "second_pos", ")", "self", ".", "pos2context", "[", "first_pos", "]", "=", "first_context", "self", ".", "pos2context", "[", "second_pos", "]", "=", "second_context", "# sequence context for three prime splice site", "for", "i", ",", "three_ss", "in", "enumerate", "(", "gene_seq", ".", "three_prime_seq", ")", ":", "first_nucs", "=", "three_ss", "[", ":", "3", "]", "second_nucs", "=", "three_ss", "[", "1", ":", "4", "]", "first_pos", "=", "2", "*", "i", "+", "gene_len", "+", "five_ss_len", "second_pos", "=", "2", "*", "i", "+", "gene_len", "+", "five_ss_len", "+", "1", "if", "ncontext", "==", "1.5", ":", "first_context", "=", "prob2020", ".", "python", ".", "mutation_context", ".", "get_chasm_context", "(", "first_nucs", ")", "second_context", "=", "prob2020", ".", "python", ".", "mutation_context", ".", "get_chasm_context", "(", "second_nucs", ")", "else", ":", "first_context", "=", "first_nucs", "second_context", "=", "second_nucs", "self", ".", "context2pos", ".", "setdefault", "(", "first_context", ",", "[", "]", ")", "self", ".", "context2pos", "[", "first_context", "]", ".", "append", "(", "first_pos", ")", "self", ".", "context2pos", ".", "setdefault", "(", "second_context", ",", "[", "]", ")", "self", ".", "context2pos", "[", "second_context", "]", ".", "append", "(", "second_pos", ")", "self", ".", "pos2context", "[", "first_pos", "]", "=", "first_context", "self", ".", "pos2context", "[", "second_pos", "]", "=", "second_context", "# hack solution for context for first nuc", "if", "gene_seq", ".", "exon_seq", ":", "first_nuc", "=", "gene_seq", ".", "exon_seq", "[", "0", "]", "+", "gene_seq", ".", "exon_seq", "[", ":", "2", "]", "if", "ncontext", "==", "1.5", ":", "first_context", "=", "prob2020", ".", "python", ".", "mutation_context", ".", "get_chasm_context", "(", "first_nuc", ")", "else", ":", "first_context", "=", "first_nuc", "self", ".", "pos2context", "[", "0", "]", "=", "first_context", "self", ".", "context2pos", ".", "setdefault", "(", "first_context", ",", "[", "]", ")", "self", ".", "context2pos", "[", "first_context", "]", ".", "append", "(", "0", ")", "last_nuc", "=", "gene_seq", ".", "exon_seq", "[", "-", "2", ":", "]", "+", "gene_seq", ".", "exon_seq", "[", "-", "1", "]", "if", "ncontext", "==", "1.5", ":", "last_context", "=", "prob2020", ".", "python", ".", "mutation_context", ".", "get_chasm_context", "(", "last_nuc", ")", "else", ":", "last_context", "=", "last_nuc", "last_pos", "=", "len", "(", "gene_seq", ".", "exon_seq", ")", "-", "1", "self", ".", "pos2context", "[", "last_pos", "]", "=", "first_context", "self", ".", "context2pos", ".", "setdefault", "(", "last_context", ",", "[", "]", ")", "self", ".", "context2pos", "[", "last_context", "]", ".", "append", "(", "last_pos", ")", "else", ":", "# case where there is no context,", "# mutations occur with uniform probability at each", "# position", "for", "i", "in", "range", "(", "gene_len", "+", "five_ss_len", "+", "three_ss_len", ")", ":", "self", ".", "pos2context", "[", "i", "]", "=", "'None'", "self", ".", "context2pos", "[", "'None'", "]", "=", "range", "(", "gene_len", "+", "five_ss_len", "+", "three_ss_len", ")" ]
52.426471
0.001927
def get_groups_to_remove(self, current_roles, roles_to_remove): """get groups to remove from list of roles to remove and current roles """ current_roles = set(current_roles) ret = {} roles_to_remove = set(roles_to_remove) tmp = set([]) # get sub roles of the role to remove that the user belongs to # if we remove a role, there is no reason to keep the sub roles for r in roles_to_remove: for sr in self._get_subroles(r): if sr not in roles_to_remove and sr in current_roles: tmp.add(sr) roles_to_remove = roles_to_remove.union(tmp) roles = current_roles.difference(set(roles_to_remove)) groups_roles = self._get_groups(roles) groups_roles_to_remove = self._get_groups(roles_to_remove) # if groups belongs to roles the user keeps, don't remove it for b in groups_roles_to_remove: if b in groups_roles: groups_roles_to_remove[b] = \ groups_roles_to_remove[b].difference(groups_roles[b]) return groups_roles_to_remove
[ "def", "get_groups_to_remove", "(", "self", ",", "current_roles", ",", "roles_to_remove", ")", ":", "current_roles", "=", "set", "(", "current_roles", ")", "ret", "=", "{", "}", "roles_to_remove", "=", "set", "(", "roles_to_remove", ")", "tmp", "=", "set", "(", "[", "]", ")", "# get sub roles of the role to remove that the user belongs to", "# if we remove a role, there is no reason to keep the sub roles", "for", "r", "in", "roles_to_remove", ":", "for", "sr", "in", "self", ".", "_get_subroles", "(", "r", ")", ":", "if", "sr", "not", "in", "roles_to_remove", "and", "sr", "in", "current_roles", ":", "tmp", ".", "add", "(", "sr", ")", "roles_to_remove", "=", "roles_to_remove", ".", "union", "(", "tmp", ")", "roles", "=", "current_roles", ".", "difference", "(", "set", "(", "roles_to_remove", ")", ")", "groups_roles", "=", "self", ".", "_get_groups", "(", "roles", ")", "groups_roles_to_remove", "=", "self", ".", "_get_groups", "(", "roles_to_remove", ")", "# if groups belongs to roles the user keeps, don't remove it", "for", "b", "in", "groups_roles_to_remove", ":", "if", "b", "in", "groups_roles", ":", "groups_roles_to_remove", "[", "b", "]", "=", "groups_roles_to_remove", "[", "b", "]", ".", "difference", "(", "groups_roles", "[", "b", "]", ")", "return", "groups_roles_to_remove" ]
41.62963
0.001739
def port_add(self, dpid, port, mac): """ :returns: old port if learned. (this may be = port) None otherwise """ old_port = self.mac_to_port[dpid].get(mac, None) self.mac_to_port[dpid][mac] = port if old_port is not None and old_port != port: LOG.debug('port_add: 0x%016x 0x%04x %s', dpid, port, haddr_to_str(mac)) return old_port
[ "def", "port_add", "(", "self", ",", "dpid", ",", "port", ",", "mac", ")", ":", "old_port", "=", "self", ".", "mac_to_port", "[", "dpid", "]", ".", "get", "(", "mac", ",", "None", ")", "self", ".", "mac_to_port", "[", "dpid", "]", "[", "mac", "]", "=", "port", "if", "old_port", "is", "not", "None", "and", "old_port", "!=", "port", ":", "LOG", ".", "debug", "(", "'port_add: 0x%016x 0x%04x %s'", ",", "dpid", ",", "port", ",", "haddr_to_str", "(", "mac", ")", ")", "return", "old_port" ]
32.846154
0.004556
def macf_period_find( times, mags, errs, fillgaps=0.0, filterwindow=11, forcetimebin=None, maxlags=None, maxacfpeaks=10, smoothacf=21, # set for Kepler-type LCs, see details below smoothfunc=_smooth_acf_savgol, smoothfunckwargs=None, magsarefluxes=False, sigclip=3.0, verbose=True, periodepsilon=0.1, # doesn't do anything, for consistent external API nworkers=None, # doesn't do anything, for consistent external API startp=None, # doesn't do anything, for consistent external API endp=None, # doesn't do anything, for consistent external API autofreq=None, # doesn't do anything, for consistent external API stepsize=None, # doesn't do anything, for consistent external API ): '''This finds periods using the McQuillan+ (2013a, 2014) ACF method. The kwargs from `periodepsilon` to `stepsize` don't do anything but are used to present a consistent API for all periodbase period-finders to an outside driver (e.g. the one in the checkplotserver). Parameters ---------- times,mags,errs : np.array The input magnitude/flux time-series to run the period-finding for. fillgaps : 'noiselevel' or float This sets what to use to fill in gaps in the time series. If this is 'noiselevel', will smooth the light curve using a point window size of `filterwindow` (this should be an odd integer), subtract the smoothed LC from the actual LC and estimate the RMS. This RMS will be used to fill in the gaps. Other useful values here are 0.0, and npnan. filterwindow : int The light curve's smoothing filter window size to use if `fillgaps='noiselevel`'. forcetimebin : None or float This is used to force a particular cadence in the light curve other than the automatically determined cadence. This effectively rebins the light curve to this cadence. This should be in the same time units as `times`. maxlags : None or int This is the maximum number of lags to calculate. If None, will calculate all lags. maxacfpeaks : int This is the maximum number of ACF peaks to use when finding the highest peak and obtaining a fit period. smoothacf : int This is the number of points to use as the window size when smoothing the ACF with the `smoothfunc`. This should be an odd integer value. If this is None, will not smooth the ACF, but this will probably lead to finding spurious peaks in a generally noisy ACF. For Kepler, a value between 21 and 51 seems to work fine. For ground based data, much larger values may be necessary: between 1001 and 2001 seem to work best for the HAT surveys. This is dependent on cadence, RMS of the light curve, the periods of the objects you're looking for, and finally, any correlated noise in the light curve. Make a plot of the smoothed/unsmoothed ACF vs. lag using the result dict of this function and the `plot_acf_results` function above to see the identified ACF peaks and what kind of smoothing might be needed. The value of `smoothacf` will also be used to figure out the interval to use when searching for local peaks in the ACF: this interval is 1/2 of the `smoothacf` value. smoothfunc : Python function This is the function that will be used to smooth the ACF. This should take at least one kwarg: 'windowsize'. Other kwargs can be passed in using a dict provided in `smoothfunckwargs`. By default, this uses a Savitsky-Golay filter, a Gaussian filter is also provided but not used. Another good option would be an actual low-pass filter (generated using scipy.signal?) to remove all high frequency noise from the ACF. smoothfunckwargs : dict or None The dict of optional kwargs to pass in to the `smoothfunc`. magsarefluxes : bool If your input measurements in `mags` are actually fluxes instead of mags, set this is True. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. verbose : bool If True, will indicate progress and report errors. Returns ------- dict Returns a dict with results. dict['bestperiod'] is the estimated best period and dict['fitperiodrms'] is its estimated error. Other interesting things in the output include: - dict['acfresults']: all results from calculating the ACF. in particular, the unsmoothed ACF might be of interest: dict['acfresults']['acf'] and dict['acfresults']['lags']. - dict['lags'] and dict['acf'] contain the ACF after smoothing was applied. - dict['periods'] and dict['lspvals'] can be used to construct a pseudo-periodogram. - dict['naivebestperiod'] is obtained by multiplying the lag at the highest ACF peak with the cadence. This is usually close to the fit period (dict['fitbestperiod']), which is calculated by doing a fit to the lags vs. peak index relation as in McQuillan+ 2014. ''' # get the ACF acfres = autocorr_magseries( times, mags, errs, maxlags=maxlags, fillgaps=fillgaps, forcetimebin=forcetimebin, sigclip=sigclip, magsarefluxes=magsarefluxes, filterwindow=filterwindow, verbose=verbose ) xlags = acfres['lags'] # smooth the ACF if requested if smoothacf and isinstance(smoothacf, int) and smoothacf > 0: if smoothfunckwargs is None: sfkwargs = {'windowsize':smoothacf} else: sfkwargs = smoothfunckwargs.copy() sfkwargs.update({'windowsize':smoothacf}) xacf = smoothfunc(acfres['acf'], **sfkwargs) else: xacf = acfres['acf'] # get the relative peak heights and fit best lag peakres = _get_acf_peakheights(xlags, xacf, npeaks=maxacfpeaks, searchinterval=int(smoothacf/2)) # this is the best period's best ACF peak height bestlspval = peakres['bestpeakheight'] try: # get the fit best lag from a linear fit to the peak index vs time(peak # lag) function as in McQillian+ (2014) fity = npconcatenate(( [0.0, peakres['bestlag']], peakres['relpeaklags'][peakres['relpeaklags'] > peakres['bestlag']] )) fity = fity*acfres['cadence'] fitx = nparange(fity.size) fitcoeffs, fitcovar = nppolyfit(fitx, fity, 1, cov=True) # fit best period is the gradient of fit fitbestperiod = fitcoeffs[0] bestperiodrms = npsqrt(fitcovar[0,0]) # from the covariance matrix except Exception as e: LOGWARNING('linear fit to time at each peak lag ' 'value vs. peak number failed, ' 'naively calculated ACF period may not be accurate') fitcoeffs = nparray([npnan, npnan]) fitcovar = nparray([[npnan, npnan], [npnan, npnan]]) fitbestperiod = npnan bestperiodrms = npnan raise # calculate the naive best period using delta_tau = lag * cadence naivebestperiod = peakres['bestlag']*acfres['cadence'] if fitbestperiod < naivebestperiod: LOGWARNING('fit bestperiod = %.5f may be an alias, ' 'naively calculated bestperiod is = %.5f' % (fitbestperiod, naivebestperiod)) if npisfinite(fitbestperiod): bestperiod = fitbestperiod else: bestperiod = naivebestperiod return {'bestperiod':bestperiod, 'bestlspval':bestlspval, 'nbestpeaks':maxacfpeaks, # for compliance with the common pfmethod API 'nbestperiods':npconcatenate([ [fitbestperiod], peakres['relpeaklags'][1:maxacfpeaks]*acfres['cadence'] ]), 'nbestlspvals':peakres['maxacfs'][:maxacfpeaks], 'lspvals':xacf, 'periods':xlags*acfres['cadence'], 'acf':xacf, 'lags':xlags, 'method':'acf', 'naivebestperiod':naivebestperiod, 'fitbestperiod':fitbestperiod, 'fitperiodrms':bestperiodrms, 'periodfitcoeffs':fitcoeffs, 'periodfitcovar':fitcovar, 'kwargs':{'maxlags':maxlags, 'maxacfpeaks':maxacfpeaks, 'fillgaps':fillgaps, 'filterwindow':filterwindow, 'smoothacf':smoothacf, 'smoothfunckwargs':sfkwargs, 'magsarefluxes':magsarefluxes, 'sigclip':sigclip}, 'acfresults':acfres, 'acfpeaks':peakres}
[ "def", "macf_period_find", "(", "times", ",", "mags", ",", "errs", ",", "fillgaps", "=", "0.0", ",", "filterwindow", "=", "11", ",", "forcetimebin", "=", "None", ",", "maxlags", "=", "None", ",", "maxacfpeaks", "=", "10", ",", "smoothacf", "=", "21", ",", "# set for Kepler-type LCs, see details below", "smoothfunc", "=", "_smooth_acf_savgol", ",", "smoothfunckwargs", "=", "None", ",", "magsarefluxes", "=", "False", ",", "sigclip", "=", "3.0", ",", "verbose", "=", "True", ",", "periodepsilon", "=", "0.1", ",", "# doesn't do anything, for consistent external API", "nworkers", "=", "None", ",", "# doesn't do anything, for consistent external API", "startp", "=", "None", ",", "# doesn't do anything, for consistent external API", "endp", "=", "None", ",", "# doesn't do anything, for consistent external API", "autofreq", "=", "None", ",", "# doesn't do anything, for consistent external API", "stepsize", "=", "None", ",", "# doesn't do anything, for consistent external API", ")", ":", "# get the ACF", "acfres", "=", "autocorr_magseries", "(", "times", ",", "mags", ",", "errs", ",", "maxlags", "=", "maxlags", ",", "fillgaps", "=", "fillgaps", ",", "forcetimebin", "=", "forcetimebin", ",", "sigclip", "=", "sigclip", ",", "magsarefluxes", "=", "magsarefluxes", ",", "filterwindow", "=", "filterwindow", ",", "verbose", "=", "verbose", ")", "xlags", "=", "acfres", "[", "'lags'", "]", "# smooth the ACF if requested", "if", "smoothacf", "and", "isinstance", "(", "smoothacf", ",", "int", ")", "and", "smoothacf", ">", "0", ":", "if", "smoothfunckwargs", "is", "None", ":", "sfkwargs", "=", "{", "'windowsize'", ":", "smoothacf", "}", "else", ":", "sfkwargs", "=", "smoothfunckwargs", ".", "copy", "(", ")", "sfkwargs", ".", "update", "(", "{", "'windowsize'", ":", "smoothacf", "}", ")", "xacf", "=", "smoothfunc", "(", "acfres", "[", "'acf'", "]", ",", "*", "*", "sfkwargs", ")", "else", ":", "xacf", "=", "acfres", "[", "'acf'", "]", "# get the relative peak heights and fit best lag", "peakres", "=", "_get_acf_peakheights", "(", "xlags", ",", "xacf", ",", "npeaks", "=", "maxacfpeaks", ",", "searchinterval", "=", "int", "(", "smoothacf", "/", "2", ")", ")", "# this is the best period's best ACF peak height", "bestlspval", "=", "peakres", "[", "'bestpeakheight'", "]", "try", ":", "# get the fit best lag from a linear fit to the peak index vs time(peak", "# lag) function as in McQillian+ (2014)", "fity", "=", "npconcatenate", "(", "(", "[", "0.0", ",", "peakres", "[", "'bestlag'", "]", "]", ",", "peakres", "[", "'relpeaklags'", "]", "[", "peakres", "[", "'relpeaklags'", "]", ">", "peakres", "[", "'bestlag'", "]", "]", ")", ")", "fity", "=", "fity", "*", "acfres", "[", "'cadence'", "]", "fitx", "=", "nparange", "(", "fity", ".", "size", ")", "fitcoeffs", ",", "fitcovar", "=", "nppolyfit", "(", "fitx", ",", "fity", ",", "1", ",", "cov", "=", "True", ")", "# fit best period is the gradient of fit", "fitbestperiod", "=", "fitcoeffs", "[", "0", "]", "bestperiodrms", "=", "npsqrt", "(", "fitcovar", "[", "0", ",", "0", "]", ")", "# from the covariance matrix", "except", "Exception", "as", "e", ":", "LOGWARNING", "(", "'linear fit to time at each peak lag '", "'value vs. peak number failed, '", "'naively calculated ACF period may not be accurate'", ")", "fitcoeffs", "=", "nparray", "(", "[", "npnan", ",", "npnan", "]", ")", "fitcovar", "=", "nparray", "(", "[", "[", "npnan", ",", "npnan", "]", ",", "[", "npnan", ",", "npnan", "]", "]", ")", "fitbestperiod", "=", "npnan", "bestperiodrms", "=", "npnan", "raise", "# calculate the naive best period using delta_tau = lag * cadence", "naivebestperiod", "=", "peakres", "[", "'bestlag'", "]", "*", "acfres", "[", "'cadence'", "]", "if", "fitbestperiod", "<", "naivebestperiod", ":", "LOGWARNING", "(", "'fit bestperiod = %.5f may be an alias, '", "'naively calculated bestperiod is = %.5f'", "%", "(", "fitbestperiod", ",", "naivebestperiod", ")", ")", "if", "npisfinite", "(", "fitbestperiod", ")", ":", "bestperiod", "=", "fitbestperiod", "else", ":", "bestperiod", "=", "naivebestperiod", "return", "{", "'bestperiod'", ":", "bestperiod", ",", "'bestlspval'", ":", "bestlspval", ",", "'nbestpeaks'", ":", "maxacfpeaks", ",", "# for compliance with the common pfmethod API", "'nbestperiods'", ":", "npconcatenate", "(", "[", "[", "fitbestperiod", "]", ",", "peakres", "[", "'relpeaklags'", "]", "[", "1", ":", "maxacfpeaks", "]", "*", "acfres", "[", "'cadence'", "]", "]", ")", ",", "'nbestlspvals'", ":", "peakres", "[", "'maxacfs'", "]", "[", ":", "maxacfpeaks", "]", ",", "'lspvals'", ":", "xacf", ",", "'periods'", ":", "xlags", "*", "acfres", "[", "'cadence'", "]", ",", "'acf'", ":", "xacf", ",", "'lags'", ":", "xlags", ",", "'method'", ":", "'acf'", ",", "'naivebestperiod'", ":", "naivebestperiod", ",", "'fitbestperiod'", ":", "fitbestperiod", ",", "'fitperiodrms'", ":", "bestperiodrms", ",", "'periodfitcoeffs'", ":", "fitcoeffs", ",", "'periodfitcovar'", ":", "fitcovar", ",", "'kwargs'", ":", "{", "'maxlags'", ":", "maxlags", ",", "'maxacfpeaks'", ":", "maxacfpeaks", ",", "'fillgaps'", ":", "fillgaps", ",", "'filterwindow'", ":", "filterwindow", ",", "'smoothacf'", ":", "smoothacf", ",", "'smoothfunckwargs'", ":", "sfkwargs", ",", "'magsarefluxes'", ":", "magsarefluxes", ",", "'sigclip'", ":", "sigclip", "}", ",", "'acfresults'", ":", "acfres", ",", "'acfpeaks'", ":", "peakres", "}" ]
39.217742
0.004011
def find_equals_true_or_false(node): """Finds equals true or false""" return ( isinstance(node, ast.Compare) and len(node.ops) == 1 and isinstance(node.ops[0], ast.Eq) and any(h.is_boolean(n) for n in node.comparators) )
[ "def", "find_equals_true_or_false", "(", "node", ")", ":", "return", "(", "isinstance", "(", "node", ",", "ast", ".", "Compare", ")", "and", "len", "(", "node", ".", "ops", ")", "==", "1", "and", "isinstance", "(", "node", ".", "ops", "[", "0", "]", ",", "ast", ".", "Eq", ")", "and", "any", "(", "h", ".", "is_boolean", "(", "n", ")", "for", "n", "in", "node", ".", "comparators", ")", ")" ]
32.125
0.003788
def add_row(self, data: list): """ Add a row of buttons each with their own callbacks to the current widget. Each element in `data` will consist of a label and a command. :param data: a list of tuples of the form ('label', <callback>) :return: None """ # validation if self.headers and data: if len(self.headers) != len(data): raise ValueError offset = 0 if not self.headers else 1 row = list() for i, e in enumerate(data): if not isinstance(e, tuple): raise ValueError('all elements must be a tuple ' 'consisting of ("label", <command>)') label, command = e button = tk.Button(self, text=str(label), relief=tk.RAISED, command=command, padx=self.padding, pady=self.padding) button.grid(row=len(self._rows) + offset, column=i, sticky='ew') row.append(button) self._rows.append(row)
[ "def", "add_row", "(", "self", ",", "data", ":", "list", ")", ":", "# validation", "if", "self", ".", "headers", "and", "data", ":", "if", "len", "(", "self", ".", "headers", ")", "!=", "len", "(", "data", ")", ":", "raise", "ValueError", "offset", "=", "0", "if", "not", "self", ".", "headers", "else", "1", "row", "=", "list", "(", ")", "for", "i", ",", "e", "in", "enumerate", "(", "data", ")", ":", "if", "not", "isinstance", "(", "e", ",", "tuple", ")", ":", "raise", "ValueError", "(", "'all elements must be a tuple '", "'consisting of (\"label\", <command>)'", ")", "label", ",", "command", "=", "e", "button", "=", "tk", ".", "Button", "(", "self", ",", "text", "=", "str", "(", "label", ")", ",", "relief", "=", "tk", ".", "RAISED", ",", "command", "=", "command", ",", "padx", "=", "self", ".", "padding", ",", "pady", "=", "self", ".", "padding", ")", "button", ".", "grid", "(", "row", "=", "len", "(", "self", ".", "_rows", ")", "+", "offset", ",", "column", "=", "i", ",", "sticky", "=", "'ew'", ")", "row", ".", "append", "(", "button", ")", "self", ".", "_rows", ".", "append", "(", "row", ")" ]
34.0625
0.001784
def resource(*args, loop=None, **kwargs): """ Create a resource service client by name using the default session. See :py:meth:`aioboto3.session.Session.resource`. """ return _get_default_session(loop=loop).resource(*args, **kwargs)
[ "def", "resource", "(", "*", "args", ",", "loop", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_get_default_session", "(", "loop", "=", "loop", ")", ".", "resource", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
41.166667
0.003968
def get_lldp_tlv(self, port_name, is_ncb=True, is_nb=False): """Function to Query LLDP TLV on the interface. """ reply = None if is_ncb: reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name, "-g", "ncb"]) elif is_nb: reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name, "-g", "nb"]) else: LOG.error("Both NCB and NB are not selected to " "query LLDP") return reply
[ "def", "get_lldp_tlv", "(", "self", ",", "port_name", ",", "is_ncb", "=", "True", ",", "is_nb", "=", "False", ")", ":", "reply", "=", "None", "if", "is_ncb", ":", "reply", "=", "self", ".", "run_lldptool", "(", "[", "\"get-tlv\"", ",", "\"-n\"", ",", "\"-i\"", ",", "port_name", ",", "\"-g\"", ",", "\"ncb\"", "]", ")", "elif", "is_nb", ":", "reply", "=", "self", ".", "run_lldptool", "(", "[", "\"get-tlv\"", ",", "\"-n\"", ",", "\"-i\"", ",", "port_name", ",", "\"-g\"", ",", "\"nb\"", "]", ")", "else", ":", "LOG", ".", "error", "(", "\"Both NCB and NB are not selected to \"", "\"query LLDP\"", ")", "return", "reply" ]
42.384615
0.003552
def _overlapOK(self, i, j, overlap=None): """ Return True if the given overlap between bucket indices i and j are acceptable. If overlap is not specified, calculate it from the bucketMap """ if overlap is None: overlap = self._countOverlapIndices(i, j) if abs(i-j) < self.w: if overlap == (self.w - abs(i-j)): return True else: return False else: if overlap <= self._maxOverlap: return True else: return False
[ "def", "_overlapOK", "(", "self", ",", "i", ",", "j", ",", "overlap", "=", "None", ")", ":", "if", "overlap", "is", "None", ":", "overlap", "=", "self", ".", "_countOverlapIndices", "(", "i", ",", "j", ")", "if", "abs", "(", "i", "-", "j", ")", "<", "self", ".", "w", ":", "if", "overlap", "==", "(", "self", ".", "w", "-", "abs", "(", "i", "-", "j", ")", ")", ":", "return", "True", "else", ":", "return", "False", "else", ":", "if", "overlap", "<=", "self", ".", "_maxOverlap", ":", "return", "True", "else", ":", "return", "False" ]
28.411765
0.012024
def _attributeStrs(self): """ Return name=value, semi-colon-separated string for attributes, including url-style quoting """ return ";".join([self._attributeStr(name) for name in self.attributes.iterkeys()])
[ "def", "_attributeStrs", "(", "self", ")", ":", "return", "\";\"", ".", "join", "(", "[", "self", ".", "_attributeStr", "(", "name", ")", "for", "name", "in", "self", ".", "attributes", ".", "iterkeys", "(", ")", "]", ")" ]
38
0.007353
def _normalize_metadata(metadata): """Normalize metadata to improve match accuracy.""" metadata = str(metadata) metadata = metadata.lower() metadata = re.sub(r'\/\s*\d+', '', metadata) # Remove "/<totaltracks>" from track number. metadata = re.sub(r'^0+([0-9]+)', r'\1', metadata) # Remove leading zero(s) from track number. metadata = re.sub(r'^\d+\.+', '', metadata) # Remove dots from track number. metadata = re.sub(r'[^\w\s]', '', metadata) # Remove any non-words. metadata = re.sub(r'\s+', ' ', metadata) # Reduce multiple spaces to a single space. metadata = re.sub(r'^\s+', '', metadata) # Remove leading space. metadata = re.sub(r'\s+$', '', metadata) # Remove trailing space. metadata = re.sub(r'^the\s+', '', metadata, re.I) # Remove leading "the". return metadata
[ "def", "_normalize_metadata", "(", "metadata", ")", ":", "metadata", "=", "str", "(", "metadata", ")", "metadata", "=", "metadata", ".", "lower", "(", ")", "metadata", "=", "re", ".", "sub", "(", "r'\\/\\s*\\d+'", ",", "''", ",", "metadata", ")", "# Remove \"/<totaltracks>\" from track number.", "metadata", "=", "re", ".", "sub", "(", "r'^0+([0-9]+)'", ",", "r'\\1'", ",", "metadata", ")", "# Remove leading zero(s) from track number.", "metadata", "=", "re", ".", "sub", "(", "r'^\\d+\\.+'", ",", "''", ",", "metadata", ")", "# Remove dots from track number.", "metadata", "=", "re", ".", "sub", "(", "r'[^\\w\\s]'", ",", "''", ",", "metadata", ")", "# Remove any non-words.", "metadata", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "metadata", ")", "# Reduce multiple spaces to a single space.", "metadata", "=", "re", ".", "sub", "(", "r'^\\s+'", ",", "''", ",", "metadata", ")", "# Remove leading space.", "metadata", "=", "re", ".", "sub", "(", "r'\\s+$'", ",", "''", ",", "metadata", ")", "# Remove trailing space.", "metadata", "=", "re", ".", "sub", "(", "r'^the\\s+'", ",", "''", ",", "metadata", ",", "re", ".", "I", ")", "# Remove leading \"the\".", "return", "metadata" ]
48.9375
0.02005
def _intern( m: lmap.Map, sym: sym.Symbol, new_var: Var, force: bool = False ) -> lmap.Map: """Swap function used by intern to atomically intern a new variable in the symbol mapping for this Namespace.""" var = m.entry(sym, None) if var is None or force: return m.assoc(sym, new_var) return m
[ "def", "_intern", "(", "m", ":", "lmap", ".", "Map", ",", "sym", ":", "sym", ".", "Symbol", ",", "new_var", ":", "Var", ",", "force", ":", "bool", "=", "False", ")", "->", "lmap", ".", "Map", ":", "var", "=", "m", ".", "entry", "(", "sym", ",", "None", ")", "if", "var", "is", "None", "or", "force", ":", "return", "m", ".", "assoc", "(", "sym", ",", "new_var", ")", "return", "m" ]
38.666667
0.008427
def get_qubits(self, indices=True): """ Returns all of the qubit indices used in this program, including gate applications and allocated qubits. e.g. >>> p = Program() >>> p.inst(("H", 1)) >>> p.get_qubits() {1} >>> q = p.alloc() >>> p.inst(H(q)) >>> len(p.get_qubits()) 2 :param indices: Return qubit indices as integers intead of the wrapping :py:class:`Qubit` object :return: A set of all the qubit indices used in this program :rtype: set """ qubits = set() for instr in self.instructions: if isinstance(instr, (Gate, Measurement)): qubits |= instr.get_qubits(indices=indices) return qubits
[ "def", "get_qubits", "(", "self", ",", "indices", "=", "True", ")", ":", "qubits", "=", "set", "(", ")", "for", "instr", "in", "self", ".", "instructions", ":", "if", "isinstance", "(", "instr", ",", "(", "Gate", ",", "Measurement", ")", ")", ":", "qubits", "|=", "instr", ".", "get_qubits", "(", "indices", "=", "indices", ")", "return", "qubits" ]
32.875
0.003695
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec: """ Further readings `CF Broker API#Deprovisioning <https://docs.cloudfoundry.org/services/api.html#deprovisioning>`_ :param instance_id: Instance id provided by the platform :param details: Details about the service to delete :param async_allowed: Client allows async creation :rtype: DeprovisionServiceSpec :raises ErrInstanceDoesNotExist: If instance does not exists :raises ErrAsyncRequired: If async is required but not supported """ raise NotImplementedError()
[ "def", "deprovision", "(", "self", ",", "instance_id", ":", "str", ",", "details", ":", "DeprovisionDetails", ",", "async_allowed", ":", "bool", ")", "->", "DeprovisionServiceSpec", ":", "raise", "NotImplementedError", "(", ")" ]
51.692308
0.005848
def _is_valid_templates_dict(policy_templates_dict, schema=None): """ Is this a valid policy template dictionary :param dict policy_templates_dict: Data to be validated :param dict schema: Optional, dictionary containing JSON Schema representing policy template :return: True, if it is valid. :raises ValueError: If the template dictionary doesn't match up with the schema """ if not schema: schema = PolicyTemplatesProcessor._read_schema() try: jsonschema.validate(policy_templates_dict, schema) except ValidationError as ex: # Stringifying the exception will give us useful error message raise ValueError(str(ex)) return True
[ "def", "_is_valid_templates_dict", "(", "policy_templates_dict", ",", "schema", "=", "None", ")", ":", "if", "not", "schema", ":", "schema", "=", "PolicyTemplatesProcessor", ".", "_read_schema", "(", ")", "try", ":", "jsonschema", ".", "validate", "(", "policy_templates_dict", ",", "schema", ")", "except", "ValidationError", "as", "ex", ":", "# Stringifying the exception will give us useful error message", "raise", "ValueError", "(", "str", "(", "ex", ")", ")", "return", "True" ]
37.4
0.005215
def json(self, data): """Set the POST/PUT body content in JSON format for this request.""" if data is not None: self._body = json.dumps(data) self.add_header('Content-Type', 'application/json')
[ "def", "json", "(", "self", ",", "data", ")", ":", "if", "data", "is", "not", "None", ":", "self", ".", "_body", "=", "json", ".", "dumps", "(", "data", ")", "self", ".", "add_header", "(", "'Content-Type'", ",", "'application/json'", ")" ]
45.8
0.008584
def get_policies_from_aws(client, scope='Local'): """Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the policies for the specified scope Args: client (:obj:`boto3.session.Session`): A boto3 Session object scope (`str`): The policy scope to use. Default: Local Returns: :obj:`list` of `dict` """ done = False marker = None policies = [] while not done: if marker: response = client.list_policies(Marker=marker, Scope=scope) else: response = client.list_policies(Scope=scope) policies += response['Policies'] if response['IsTruncated']: marker = response['Marker'] else: done = True return policies
[ "def", "get_policies_from_aws", "(", "client", ",", "scope", "=", "'Local'", ")", ":", "done", "=", "False", "marker", "=", "None", "policies", "=", "[", "]", "while", "not", "done", ":", "if", "marker", ":", "response", "=", "client", ".", "list_policies", "(", "Marker", "=", "marker", ",", "Scope", "=", "scope", ")", "else", ":", "response", "=", "client", ".", "list_policies", "(", "Scope", "=", "scope", ")", "policies", "+=", "response", "[", "'Policies'", "]", "if", "response", "[", "'IsTruncated'", "]", ":", "marker", "=", "response", "[", "'Marker'", "]", "else", ":", "done", "=", "True", "return", "policies" ]
29.965517
0.003344
def getStreamNetworkAsGeoJson(self, session, withNodes=True): """ Retrieve the stream network geometry in GeoJSON format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string. """ features_list = [] # Assemble link features for link in self.streamLinks: link_geoJson = link.getAsGeoJson(session) if link_geoJson: link_geometry = json.loads(link.getAsGeoJson(session)) link_properties = {"link_number": link.linkNumber, "type": link.type, "num_elements": link.numElements, "dx": link.dx, "erode": link.erode, "subsurface": link.subsurface} link_feature = {"type": "Feature", "geometry": link_geometry, "properties": link_properties, "id": link.id} features_list.append(link_feature) # Assemble node features if withNodes: for node in link.nodes: node_geoJson = node.getAsGeoJson(session) if node_geoJson: node_geometry = json.loads(node_geoJson) node_properties = {"link_number": link.linkNumber, "node_number": node.nodeNumber, "elevation": node.elevation} node_feature = {"type": "Feature", "geometry": node_geometry, "properties": node_properties, "id": node.id} features_list.append(node_feature) feature_collection = {"type": "FeatureCollection", "features": features_list} return json.dumps(feature_collection)
[ "def", "getStreamNetworkAsGeoJson", "(", "self", ",", "session", ",", "withNodes", "=", "True", ")", ":", "features_list", "=", "[", "]", "# Assemble link features", "for", "link", "in", "self", ".", "streamLinks", ":", "link_geoJson", "=", "link", ".", "getAsGeoJson", "(", "session", ")", "if", "link_geoJson", ":", "link_geometry", "=", "json", ".", "loads", "(", "link", ".", "getAsGeoJson", "(", "session", ")", ")", "link_properties", "=", "{", "\"link_number\"", ":", "link", ".", "linkNumber", ",", "\"type\"", ":", "link", ".", "type", ",", "\"num_elements\"", ":", "link", ".", "numElements", ",", "\"dx\"", ":", "link", ".", "dx", ",", "\"erode\"", ":", "link", ".", "erode", ",", "\"subsurface\"", ":", "link", ".", "subsurface", "}", "link_feature", "=", "{", "\"type\"", ":", "\"Feature\"", ",", "\"geometry\"", ":", "link_geometry", ",", "\"properties\"", ":", "link_properties", ",", "\"id\"", ":", "link", ".", "id", "}", "features_list", ".", "append", "(", "link_feature", ")", "# Assemble node features", "if", "withNodes", ":", "for", "node", "in", "link", ".", "nodes", ":", "node_geoJson", "=", "node", ".", "getAsGeoJson", "(", "session", ")", "if", "node_geoJson", ":", "node_geometry", "=", "json", ".", "loads", "(", "node_geoJson", ")", "node_properties", "=", "{", "\"link_number\"", ":", "link", ".", "linkNumber", ",", "\"node_number\"", ":", "node", ".", "nodeNumber", ",", "\"elevation\"", ":", "node", ".", "elevation", "}", "node_feature", "=", "{", "\"type\"", ":", "\"Feature\"", ",", "\"geometry\"", ":", "node_geometry", ",", "\"properties\"", ":", "node_properties", ",", "\"id\"", ":", "node", ".", "id", "}", "features_list", ".", "append", "(", "node_feature", ")", "feature_collection", "=", "{", "\"type\"", ":", "\"FeatureCollection\"", ",", "\"features\"", ":", "features_list", "}", "return", "json", ".", "dumps", "(", "feature_collection", ")" ]
38.263158
0.001341
def periodrec_worker(task): '''This is a parallel worker for running period-recovery. Parameters ---------- task : tuple This is used to pass args to the `periodicvar_recovery` function:: task[0] = period-finding result pickle to work on task[1] = simbasedir task[2] = period_tolerance Returns ------- dict This is the dict produced by the `periodicvar_recovery` function for the input period-finding result pickle. ''' pfpkl, simbasedir, period_tolerance = task try: return periodicvar_recovery(pfpkl, simbasedir, period_tolerance=period_tolerance) except Exception as e: LOGEXCEPTION('periodic var recovery failed for %s' % repr(task)) return None
[ "def", "periodrec_worker", "(", "task", ")", ":", "pfpkl", ",", "simbasedir", ",", "period_tolerance", "=", "task", "try", ":", "return", "periodicvar_recovery", "(", "pfpkl", ",", "simbasedir", ",", "period_tolerance", "=", "period_tolerance", ")", "except", "Exception", "as", "e", ":", "LOGEXCEPTION", "(", "'periodic var recovery failed for %s'", "%", "repr", "(", "task", ")", ")", "return", "None" ]
25.84375
0.002331
def addif(self, iname): """ Add an interface to the bridge """ _runshell([brctlexe, 'addif', self.name, iname], "Could not add interface %s to %s." % (iname, self.name))
[ "def", "addif", "(", "self", ",", "iname", ")", ":", "_runshell", "(", "[", "brctlexe", ",", "'addif'", ",", "self", ".", "name", ",", "iname", "]", ",", "\"Could not add interface %s to %s.\"", "%", "(", "iname", ",", "self", ".", "name", ")", ")" ]
48.5
0.015228
def funnel(self, steps, timeframe=None, timezone=None, max_age=None, all_keys=False): """ Performs a Funnel query Returns an object containing the results for each step of the funnel. :param steps: array of dictionaries, one for each step. example: [{"event_collection":"signup","actor_property":"user.id"}, {"event_collection":"purchase","actor_property:"user.id"}] :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds :all_keys: set to true to return all keys on response (i.e. "result", "actors", "steps") """ params = self.get_params( steps=steps, timeframe=timeframe, timezone=timezone, max_age=max_age, ) return self.api.query("funnel", params, all_keys=all_keys)
[ "def", "funnel", "(", "self", ",", "steps", ",", "timeframe", "=", "None", ",", "timezone", "=", "None", ",", "max_age", "=", "None", ",", "all_keys", "=", "False", ")", ":", "params", "=", "self", ".", "get_params", "(", "steps", "=", "steps", ",", "timeframe", "=", "timeframe", ",", "timezone", "=", "timezone", ",", "max_age", "=", "max_age", ",", ")", "return", "self", ".", "api", ".", "query", "(", "\"funnel\"", ",", "params", ",", "all_keys", "=", "all_keys", ")" ]
44.64
0.004386
def message_create(self, originator, recipients, body, params=None): """Create a new message.""" if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'originator': originator, 'body': body, 'recipients': recipients}) return Message().load(self.request('messages', 'POST', params))
[ "def", "message_create", "(", "self", ",", "originator", ",", "recipients", ",", "body", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "type", "(", "recipients", ")", "==", "list", ":", "recipients", "=", "','", ".", "join", "(", "recipients", ")", "params", ".", "update", "(", "{", "'originator'", ":", "originator", ",", "'body'", ":", "body", ",", "'recipients'", ":", "recipients", "}", ")", "return", "Message", "(", ")", ".", "load", "(", "self", ".", "request", "(", "'messages'", ",", "'POST'", ",", "params", ")", ")" ]
47.75
0.010283
def run(items, background=None): """Detect copy number variations from batched set of samples using cn.mops. """ if not background: background = [] names = [tz.get_in(["rgnames", "sample"], x) for x in items + background] work_bams = [x["align_bam"] for x in items + background] if len(items + background) < 2: raise ValueError("cn.mops only works on batches with multiple samples") data = items[0] work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", names[0], "cn_mops")) parallel = {"type": "local", "cores": data["config"]["algorithm"].get("num_cores", 1), "progs": ["delly"]} with pysam.Samfile(work_bams[0], "rb") as pysam_work_bam: chroms = [None] if _get_regional_bed_file(items[0]) else pysam_work_bam.references out_files = run_multicore(_run_on_chrom, [(chrom, work_bams, names, work_dir, items) for chrom in chroms], data["config"], parallel) out_file = _combine_out_files(out_files, work_dir, data) out = [] for data in items: if "sv" not in data: data["sv"] = [] data["sv"].append({"variantcaller": "cn_mops", "vrn_file": _prep_sample_cnvs(out_file, data)}) out.append(data) return out
[ "def", "run", "(", "items", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "names", "=", "[", "tz", ".", "get_in", "(", "[", "\"rgnames\"", ",", "\"sample\"", "]", ",", "x", ")", "for", "x", "in", "items", "+", "background", "]", "work_bams", "=", "[", "x", "[", "\"align_bam\"", "]", "for", "x", "in", "items", "+", "background", "]", "if", "len", "(", "items", "+", "background", ")", "<", "2", ":", "raise", "ValueError", "(", "\"cn.mops only works on batches with multiple samples\"", ")", "data", "=", "items", "[", "0", "]", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"structural\"", ",", "names", "[", "0", "]", ",", "\"cn_mops\"", ")", ")", "parallel", "=", "{", "\"type\"", ":", "\"local\"", ",", "\"cores\"", ":", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"num_cores\"", ",", "1", ")", ",", "\"progs\"", ":", "[", "\"delly\"", "]", "}", "with", "pysam", ".", "Samfile", "(", "work_bams", "[", "0", "]", ",", "\"rb\"", ")", "as", "pysam_work_bam", ":", "chroms", "=", "[", "None", "]", "if", "_get_regional_bed_file", "(", "items", "[", "0", "]", ")", "else", "pysam_work_bam", ".", "references", "out_files", "=", "run_multicore", "(", "_run_on_chrom", ",", "[", "(", "chrom", ",", "work_bams", ",", "names", ",", "work_dir", ",", "items", ")", "for", "chrom", "in", "chroms", "]", ",", "data", "[", "\"config\"", "]", ",", "parallel", ")", "out_file", "=", "_combine_out_files", "(", "out_files", ",", "work_dir", ",", "data", ")", "out", "=", "[", "]", "for", "data", "in", "items", ":", "if", "\"sv\"", "not", "in", "data", ":", "data", "[", "\"sv\"", "]", "=", "[", "]", "data", "[", "\"sv\"", "]", ".", "append", "(", "{", "\"variantcaller\"", ":", "\"cn_mops\"", ",", "\"vrn_file\"", ":", "_prep_sample_cnvs", "(", "out_file", ",", "data", ")", "}", ")", "out", ".", "append", "(", "data", ")", "return", "out" ]
51.444444
0.00424
def route(rule=None, blueprint=None, defaults=None, endpoint=None, is_member=False, methods=None, only_if=None, **rule_options): """ Decorator to set default route rules for a view function. The arguments this function accepts are very similar to Flask's :meth:`~flask.Flask.route`, however, the ``is_member`` perhaps deserves an example:: class UserResource(ModelResource): class Meta: model = User member_param = '<int:id>' include_methods = ['list', 'get'] @route(is_member=True, methods=['POST']) def set_profile_pic(user): # do stuff # registered like so in your ``app_bundle/routes.py``: routes = lambda: [ resource(UserResource), ] # results in the following routes: # UserResource.list => GET /users # UserResource.get => GET /users/<int:id> # UserResource.set_profile_pic => POST /users/<int:id>/set-profile-pic :param rule: The URL rule. :param defaults: Any default values for parameters in the URL rule. :param endpoint: The endpoint name of this view. Determined automatically if left unspecified. :param is_member: Whether or not this view is for a :class:`~flask_unchained.bundles.resource.resource.Resource` member method. :param methods: A list of HTTP methods supported by this view. Defaults to ``['GET']``. :param only_if: A boolean or callable to dynamically determine whether or not to register this route with the app. :param rule_options: Other kwargs passed on to :class:`~werkzeug.routing.Rule`. """ def wrapper(fn): fn_routes = getattr(fn, FN_ROUTES_ATTR, []) route = Route(rule, fn, blueprint=blueprint, defaults=defaults, endpoint=endpoint, is_member=is_member, methods=methods, only_if=only_if, **rule_options) setattr(fn, FN_ROUTES_ATTR, fn_routes + [route]) return fn if callable(rule): fn = rule rule = None return wrapper(fn) return wrapper
[ "def", "route", "(", "rule", "=", "None", ",", "blueprint", "=", "None", ",", "defaults", "=", "None", ",", "endpoint", "=", "None", ",", "is_member", "=", "False", ",", "methods", "=", "None", ",", "only_if", "=", "None", ",", "*", "*", "rule_options", ")", ":", "def", "wrapper", "(", "fn", ")", ":", "fn_routes", "=", "getattr", "(", "fn", ",", "FN_ROUTES_ATTR", ",", "[", "]", ")", "route", "=", "Route", "(", "rule", ",", "fn", ",", "blueprint", "=", "blueprint", ",", "defaults", "=", "defaults", ",", "endpoint", "=", "endpoint", ",", "is_member", "=", "is_member", ",", "methods", "=", "methods", ",", "only_if", "=", "only_if", ",", "*", "*", "rule_options", ")", "setattr", "(", "fn", ",", "FN_ROUTES_ATTR", ",", "fn_routes", "+", "[", "route", "]", ")", "return", "fn", "if", "callable", "(", "rule", ")", ":", "fn", "=", "rule", "rule", "=", "None", "return", "wrapper", "(", "fn", ")", "return", "wrapper" ]
41.641509
0.002213
def window(self, vec): """Apply a window to the coefficients defined by *vec*. *vec* must have length *nmax* + 1. This is good way to filter the pattern by windowing in the coefficient domain. Example:: >>> vec = numpy.linspace(0, 1, c.nmax + 1) >>> c.window(vec) Args: vec (numpy.array): Vector of values to apply in the n direction of the data. Has length *nmax* + 1. Returns: Nothing, applies the window to the data in place. """ slce = slice(None, None, None) self.__setitem__((slce, 0), self.__getitem__((slce, 0)) * vec) for m in xrange(1, self.mmax + 1): self.__setitem__((slce, -m), self.__getitem__((slce, -m)) * vec[m:]) self.__setitem__((slce, m), self.__getitem__((slce, m)) * vec[m:])
[ "def", "window", "(", "self", ",", "vec", ")", ":", "slce", "=", "slice", "(", "None", ",", "None", ",", "None", ")", "self", ".", "__setitem__", "(", "(", "slce", ",", "0", ")", ",", "self", ".", "__getitem__", "(", "(", "slce", ",", "0", ")", ")", "*", "vec", ")", "for", "m", "in", "xrange", "(", "1", ",", "self", ".", "mmax", "+", "1", ")", ":", "self", ".", "__setitem__", "(", "(", "slce", ",", "-", "m", ")", ",", "self", ".", "__getitem__", "(", "(", "slce", ",", "-", "m", ")", ")", "*", "vec", "[", "m", ":", "]", ")", "self", ".", "__setitem__", "(", "(", "slce", ",", "m", ")", ",", "self", ".", "__getitem__", "(", "(", "slce", ",", "m", ")", ")", "*", "vec", "[", "m", ":", "]", ")" ]
34.92
0.005574
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset): '''Mirror Image Visualization: Shows the encoding space juxtaposed against the coincidence space. The encoding space is the bottom-up sensory encoding and the coincidence space depicts the corresponding activation of coincidences in the SP. Hence, the mirror image visualization is a visual depiction of the mapping of SP cells to the input representations. Note: * The files spBUOut and sensorBUOut are assumed to be in the output format used for LPF experiment outputs. * BU outputs for some sample datasets are provided. Specify the name of the dataset as an option while running this script. ''' lines = activeCoincsFile.readlines() inputs = encodingsFile.readlines() w = len(inputs[0].split(' '))-1 patterns = set([]) encodings = set([]) coincs = [] #The set of all coincidences that have won at least once reUsedCoincs = [] firstLine = inputs[0].split(' ') size = int(firstLine.pop(0)) spOutput = np.zeros((len(lines),40)) inputBits = np.zeros((len(lines),w)) print 'Total n:', size print 'Total number of records in the file:', len(lines), '\n' print 'w:', w count = 0 for x in xrange(len(lines)): inputSpace = [] #Encoded representation for each input spBUout = [int(z) for z in lines[x].split(' ')] spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP temp = set(spBUout) spOutput[x]=spBUout input = [int(z) for z in inputs[x].split(' ')] input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space tempInput = set(input) inputBits[x]=input #Creating the encoding space for m in xrange(size): if m in tempInput: inputSpace.append(m) else: inputSpace.append('|') #A non-active bit repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active #Dividing the coincidences into two difference categories. if len(reUsed)==0: coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary) else: reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput)) patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once encodings = encodings.union(tempInput) count +=1 overlap = {} overlapVal = 0 seen = [] seen = (printOverlaps(coincs, coincs, seen)) print len(seen), 'sets of 40 cells' seen = printOverlaps(reUsedCoincs, coincs, seen) Summ=[] for z in coincs: c=0 for y in reUsedCoincs: c += len(z[1].intersection(y[1])) Summ.append(c) print 'Sum: ', Summ for m in xrange(3): displayLimit = min(51, len(spOutput[m*200:])) if displayLimit>0: drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1) else: print 'No more records to display' pyl.show()
[ "def", "analyzeOverlaps", "(", "activeCoincsFile", ",", "encodingsFile", ",", "dataset", ")", ":", "lines", "=", "activeCoincsFile", ".", "readlines", "(", ")", "inputs", "=", "encodingsFile", ".", "readlines", "(", ")", "w", "=", "len", "(", "inputs", "[", "0", "]", ".", "split", "(", "' '", ")", ")", "-", "1", "patterns", "=", "set", "(", "[", "]", ")", "encodings", "=", "set", "(", "[", "]", ")", "coincs", "=", "[", "]", "#The set of all coincidences that have won at least once", "reUsedCoincs", "=", "[", "]", "firstLine", "=", "inputs", "[", "0", "]", ".", "split", "(", "' '", ")", "size", "=", "int", "(", "firstLine", ".", "pop", "(", "0", ")", ")", "spOutput", "=", "np", ".", "zeros", "(", "(", "len", "(", "lines", ")", ",", "40", ")", ")", "inputBits", "=", "np", ".", "zeros", "(", "(", "len", "(", "lines", ")", ",", "w", ")", ")", "print", "'Total n:'", ",", "size", "print", "'Total number of records in the file:'", ",", "len", "(", "lines", ")", ",", "'\\n'", "print", "'w:'", ",", "w", "count", "=", "0", "for", "x", "in", "xrange", "(", "len", "(", "lines", ")", ")", ":", "inputSpace", "=", "[", "]", "#Encoded representation for each input ", "spBUout", "=", "[", "int", "(", "z", ")", "for", "z", "in", "lines", "[", "x", "]", ".", "split", "(", "' '", ")", "]", "spBUout", ".", "pop", "(", "0", ")", "#The first element of each row of spBUOut is the size of the SP ", "temp", "=", "set", "(", "spBUout", ")", "spOutput", "[", "x", "]", "=", "spBUout", "input", "=", "[", "int", "(", "z", ")", "for", "z", "in", "inputs", "[", "x", "]", ".", "split", "(", "' '", ")", "]", "input", ".", "pop", "(", "0", ")", "#The first element of each row of sensorBUout is the size of the encoding space", "tempInput", "=", "set", "(", "input", ")", "inputBits", "[", "x", "]", "=", "input", "#Creating the encoding space ", "for", "m", "in", "xrange", "(", "size", ")", ":", "if", "m", "in", "tempInput", ":", "inputSpace", ".", "append", "(", "m", ")", "else", ":", "inputSpace", ".", "append", "(", "'|'", ")", "#A non-active bit", "repeatedBits", "=", "tempInput", ".", "intersection", "(", "encodings", ")", "#Storing the bits that have been previously active", "reUsed", "=", "temp", ".", "intersection", "(", "patterns", ")", "#Checking if any of the active cells have been previously active ", "#Dividing the coincidences into two difference categories. ", "if", "len", "(", "reUsed", ")", "==", "0", ":", "coincs", ".", "append", "(", "(", "count", ",", "temp", ",", "repeatedBits", ",", "inputSpace", ",", "tempInput", ")", ")", "#Pattern no, active cells, repeated bits, encoding (full), encoding (summary)", "else", ":", "reUsedCoincs", ".", "append", "(", "(", "count", ",", "temp", ",", "repeatedBits", ",", "inputSpace", ",", "tempInput", ")", ")", "patterns", "=", "patterns", ".", "union", "(", "temp", ")", "#Adding the active cells to the set of coincs that have been active at least once", "encodings", "=", "encodings", ".", "union", "(", "tempInput", ")", "count", "+=", "1", "overlap", "=", "{", "}", "overlapVal", "=", "0", "seen", "=", "[", "]", "seen", "=", "(", "printOverlaps", "(", "coincs", ",", "coincs", ",", "seen", ")", ")", "print", "len", "(", "seen", ")", ",", "'sets of 40 cells'", "seen", "=", "printOverlaps", "(", "reUsedCoincs", ",", "coincs", ",", "seen", ")", "Summ", "=", "[", "]", "for", "z", "in", "coincs", ":", "c", "=", "0", "for", "y", "in", "reUsedCoincs", ":", "c", "+=", "len", "(", "z", "[", "1", "]", ".", "intersection", "(", "y", "[", "1", "]", ")", ")", "Summ", ".", "append", "(", "c", ")", "print", "'Sum: '", ",", "Summ", "for", "m", "in", "xrange", "(", "3", ")", ":", "displayLimit", "=", "min", "(", "51", ",", "len", "(", "spOutput", "[", "m", "*", "200", ":", "]", ")", ")", "if", "displayLimit", ">", "0", ":", "drawFile", "(", "dataset", ",", "np", ".", "zeros", "(", "[", "len", "(", "inputBits", "[", ":", "(", "m", "+", "1", ")", "*", "displayLimit", "]", ")", ",", "len", "(", "inputBits", "[", ":", "(", "m", "+", "1", ")", "*", "displayLimit", "]", ")", "]", ")", ",", "inputBits", "[", ":", "(", "m", "+", "1", ")", "*", "displayLimit", "]", ",", "spOutput", "[", ":", "(", "m", "+", "1", ")", "*", "displayLimit", "]", ",", "w", ",", "m", "+", "1", ")", "else", ":", "print", "'No more records to display'", "pyl", ".", "show", "(", ")" ]
36.454545
0.028528
def docCopyNode(self, doc, extended): """Do a copy of the node to a given document. """ if doc is None: doc__o = None else: doc__o = doc._o ret = libxml2mod.xmlDocCopyNode(self._o, doc__o, extended) if ret is None:raise treeError('xmlDocCopyNode() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "docCopyNode", "(", "self", ",", "doc", ",", "extended", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlDocCopyNode", "(", "self", ".", "_o", ",", "doc__o", ",", "extended", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlDocCopyNode() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
43
0.017094
def deserialize(self, value, **kwargs): """Return a deserialized copy of the dict""" kwargs.update({'trusted': kwargs.get('trusted', False)}) if self.deserializer is not None: return self.deserializer(value, **kwargs) if value is None: return None output_tuples = [ ( self.key_prop.deserialize(key, **kwargs), self.value_prop.deserialize(val, **kwargs) ) for key, val in iteritems(value) ] try: output_dict = {key: val for key, val in output_tuples} except TypeError as err: raise TypeError('Dictionary property {} cannot be deserialized - ' 'keys contain {}'.format(self.name, err)) return self._class_container(output_dict)
[ "def", "deserialize", "(", "self", ",", "value", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'trusted'", ":", "kwargs", ".", "get", "(", "'trusted'", ",", "False", ")", "}", ")", "if", "self", ".", "deserializer", "is", "not", "None", ":", "return", "self", ".", "deserializer", "(", "value", ",", "*", "*", "kwargs", ")", "if", "value", "is", "None", ":", "return", "None", "output_tuples", "=", "[", "(", "self", ".", "key_prop", ".", "deserialize", "(", "key", ",", "*", "*", "kwargs", ")", ",", "self", ".", "value_prop", ".", "deserialize", "(", "val", ",", "*", "*", "kwargs", ")", ")", "for", "key", ",", "val", "in", "iteritems", "(", "value", ")", "]", "try", ":", "output_dict", "=", "{", "key", ":", "val", "for", "key", ",", "val", "in", "output_tuples", "}", "except", "TypeError", "as", "err", ":", "raise", "TypeError", "(", "'Dictionary property {} cannot be deserialized - '", "'keys contain {}'", ".", "format", "(", "self", ".", "name", ",", "err", ")", ")", "return", "self", ".", "_class_container", "(", "output_dict", ")" ]
41.1
0.002378
def format_ascii(sensor_graph): """Format this sensor graph as a loadable ascii file format. This includes commands to reset and clear previously stored sensor graphs. NB. This format does not include any required configuration variables that were specified in this sensor graph, so you should also output tha information separately in, e.g. the config format. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: str: The ascii output lines concatenated as a single string """ cmdfile = CommandFile("Sensor Graph", "1.0") # Clear any old sensor graph cmdfile.add("set_online", False) cmdfile.add("clear") cmdfile.add("reset") # Load in the nodes for node in sensor_graph.dump_nodes(): cmdfile.add('add_node', node) # Load in the streamers for streamer in sensor_graph.streamers: other = 0xFF if streamer.with_other is not None: other = streamer.with_other args = [streamer.selector, streamer.dest, streamer.automatic, streamer.format, streamer.report_type, other] cmdfile.add('add_streamer', *args) # Load all the constants for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()): cmdfile.add("push_reading", stream, value) # Persist the sensor graph cmdfile.add("persist") cmdfile.add("set_online", True) return cmdfile.dump()
[ "def", "format_ascii", "(", "sensor_graph", ")", ":", "cmdfile", "=", "CommandFile", "(", "\"Sensor Graph\"", ",", "\"1.0\"", ")", "# Clear any old sensor graph", "cmdfile", ".", "add", "(", "\"set_online\"", ",", "False", ")", "cmdfile", ".", "add", "(", "\"clear\"", ")", "cmdfile", ".", "add", "(", "\"reset\"", ")", "# Load in the nodes", "for", "node", "in", "sensor_graph", ".", "dump_nodes", "(", ")", ":", "cmdfile", ".", "add", "(", "'add_node'", ",", "node", ")", "# Load in the streamers", "for", "streamer", "in", "sensor_graph", ".", "streamers", ":", "other", "=", "0xFF", "if", "streamer", ".", "with_other", "is", "not", "None", ":", "other", "=", "streamer", ".", "with_other", "args", "=", "[", "streamer", ".", "selector", ",", "streamer", ".", "dest", ",", "streamer", ".", "automatic", ",", "streamer", ".", "format", ",", "streamer", ".", "report_type", ",", "other", "]", "cmdfile", ".", "add", "(", "'add_streamer'", ",", "*", "args", ")", "# Load all the constants", "for", "stream", ",", "value", "in", "sorted", "(", "sensor_graph", ".", "constant_database", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ".", "encode", "(", ")", ")", ":", "cmdfile", ".", "add", "(", "\"push_reading\"", ",", "stream", ",", "value", ")", "# Persist the sensor graph", "cmdfile", ".", "add", "(", "\"persist\"", ")", "cmdfile", ".", "add", "(", "\"set_online\"", ",", "True", ")", "return", "cmdfile", ".", "dump", "(", ")" ]
30.659574
0.002017
def hmsm_to_days(hour=0,min=0,sec=0,micro=0): """ Convert hours, minutes, seconds, and microseconds to fractional days. Parameters ---------- hour : int, optional Hour number. Defaults to 0. min : int, optional Minute number. Defaults to 0. sec : int, optional Second number. Defaults to 0. micro : int, optional Microsecond number. Defaults to 0. Returns ------- days : float Fractional days. Examples -------- >>> hmsm_to_days(hour=6) 0.25 """ days = sec + (micro / 1.e6) days = min + (days / 60.) days = hour + (days / 60.) return days / 24.
[ "def", "hmsm_to_days", "(", "hour", "=", "0", ",", "min", "=", "0", ",", "sec", "=", "0", ",", "micro", "=", "0", ")", ":", "days", "=", "sec", "+", "(", "micro", "/", "1.e6", ")", "days", "=", "min", "+", "(", "days", "/", "60.", ")", "days", "=", "hour", "+", "(", "days", "/", "60.", ")", "return", "days", "/", "24." ]
17.75
0.005935
def _filter(msgdata, mailparser, mdfolder, mailfilters): """Filter msgdata by mailfilters""" if mailfilters: for f in mailfilters: msg = mailparser.parse(StringIO(msgdata)) rule = f(msg, folder=mdfolder) if rule: yield rule return
[ "def", "_filter", "(", "msgdata", ",", "mailparser", ",", "mdfolder", ",", "mailfilters", ")", ":", "if", "mailfilters", ":", "for", "f", "in", "mailfilters", ":", "msg", "=", "mailparser", ".", "parse", "(", "StringIO", "(", "msgdata", ")", ")", "rule", "=", "f", "(", "msg", ",", "folder", "=", "mdfolder", ")", "if", "rule", ":", "yield", "rule", "return" ]
32.666667
0.003311
def delete_logstore(self, project_name, logstore_name): """ delete log store Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :return: DeleteLogStoreResponse :raise: LogException """ headers = {} params = {} resource = "/logstores/" + logstore_name (resp, header) = self._send("DELETE", project_name, None, resource, params, headers) return DeleteLogStoreResponse(header, resp)
[ "def", "delete_logstore", "(", "self", ",", "project_name", ",", "logstore_name", ")", ":", "headers", "=", "{", "}", "params", "=", "{", "}", "resource", "=", "\"/logstores/\"", "+", "logstore_name", "(", "resp", ",", "header", ")", "=", "self", ".", "_send", "(", "\"DELETE\"", ",", "project_name", ",", "None", ",", "resource", ",", "params", ",", "headers", ")", "return", "DeleteLogStoreResponse", "(", "header", ",", "resp", ")" ]
33.526316
0.007634
def list_is_type(ls, t): '''Assert that a list contains only elements of type t Return True if list contains elements of type t Raise TypeError if t is not a class Raise TypeError if ls is not a list Raise TypeError if ls contains non-t elements :param ls: LIST :param t: python class ''' if not isclass(t): raise TypeError("{} is not a class".format(t)) elif not isinstance(ls, list): raise TypeError("{} is not a list".format(ls)) else: ls_bad_types = [i for i in ls if not isinstance(i, t)] if len(ls_bad_types) > 0: raise TypeError("{} are not {}".format(ls_bad_types, t)) return True
[ "def", "list_is_type", "(", "ls", ",", "t", ")", ":", "if", "not", "isclass", "(", "t", ")", ":", "raise", "TypeError", "(", "\"{} is not a class\"", ".", "format", "(", "t", ")", ")", "elif", "not", "isinstance", "(", "ls", ",", "list", ")", ":", "raise", "TypeError", "(", "\"{} is not a list\"", ".", "format", "(", "ls", ")", ")", "else", ":", "ls_bad_types", "=", "[", "i", "for", "i", "in", "ls", "if", "not", "isinstance", "(", "i", ",", "t", ")", "]", "if", "len", "(", "ls_bad_types", ")", ">", "0", ":", "raise", "TypeError", "(", "\"{} are not {}\"", ".", "format", "(", "ls_bad_types", ",", "t", ")", ")", "return", "True" ]
33.15
0.001466
def _get_powercfg_minute_values(scheme, guid, subguid, safe_name): ''' Returns the AC/DC values in an dict for a guid and subguid for a the given scheme ''' if scheme is None: scheme = _get_current_scheme() if __grains__['osrelease'] == '7': cmd = 'powercfg /q {0} {1}'.format(scheme, guid) else: cmd = 'powercfg /q {0} {1} {2}'.format(scheme, guid, subguid) out = __salt__['cmd.run'](cmd, python_shell=False) split = out.split('\r\n\r\n') if len(split) > 1: for s in split: if safe_name in s or subguid in s: out = s break else: out = split[0] raw_settings = re.findall(r'Power Setting Index: ([0-9a-fx]+)', out) return {'ac': int(raw_settings[0], 0) / 60, 'dc': int(raw_settings[1], 0) / 60}
[ "def", "_get_powercfg_minute_values", "(", "scheme", ",", "guid", ",", "subguid", ",", "safe_name", ")", ":", "if", "scheme", "is", "None", ":", "scheme", "=", "_get_current_scheme", "(", ")", "if", "__grains__", "[", "'osrelease'", "]", "==", "'7'", ":", "cmd", "=", "'powercfg /q {0} {1}'", ".", "format", "(", "scheme", ",", "guid", ")", "else", ":", "cmd", "=", "'powercfg /q {0} {1} {2}'", ".", "format", "(", "scheme", ",", "guid", ",", "subguid", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "split", "=", "out", ".", "split", "(", "'\\r\\n\\r\\n'", ")", "if", "len", "(", "split", ")", ">", "1", ":", "for", "s", "in", "split", ":", "if", "safe_name", "in", "s", "or", "subguid", "in", "s", ":", "out", "=", "s", "break", "else", ":", "out", "=", "split", "[", "0", "]", "raw_settings", "=", "re", ".", "findall", "(", "r'Power Setting Index: ([0-9a-fx]+)'", ",", "out", ")", "return", "{", "'ac'", ":", "int", "(", "raw_settings", "[", "0", "]", ",", "0", ")", "/", "60", ",", "'dc'", ":", "int", "(", "raw_settings", "[", "1", "]", ",", "0", ")", "/", "60", "}" ]
31.5
0.001185
def database_clone(targetcall, databasepath, complete=False): """ Checks to see if the database has already been downloaded. If not, runs the system call to download the database, and writes stdout and stderr to the logfile :param targetcall: system call to download, and possibly set-up the database :param databasepath: absolute path of the database :param complete: boolean variable to determine whether the complete file should be created """ # Create a file to store the logs; it will be used to determine if the database was downloaded and set-up completefile = os.path.join(databasepath, 'complete') # Run the system call if the database is not already downloaded if not os.path.isfile(completefile): out, err = run_subprocess(targetcall) if complete: # Create the database completeness assessment file and populate it with the out and err streams with open(completefile, 'w') as complete: complete.write(out) complete.write(err)
[ "def", "database_clone", "(", "targetcall", ",", "databasepath", ",", "complete", "=", "False", ")", ":", "# Create a file to store the logs; it will be used to determine if the database was downloaded and set-up", "completefile", "=", "os", ".", "path", ".", "join", "(", "databasepath", ",", "'complete'", ")", "# Run the system call if the database is not already downloaded", "if", "not", "os", ".", "path", ".", "isfile", "(", "completefile", ")", ":", "out", ",", "err", "=", "run_subprocess", "(", "targetcall", ")", "if", "complete", ":", "# Create the database completeness assessment file and populate it with the out and err streams", "with", "open", "(", "completefile", ",", "'w'", ")", "as", "complete", ":", "complete", ".", "write", "(", "out", ")", "complete", ".", "write", "(", "err", ")" ]
61.277778
0.00625
def pip(filename): """Parse pip reqs file and transform it to setuptools requirements.""" requirements = [] for line in open(join(ROOT, 'requirements', filename)): line = line.strip() if not line or '://' in line: continue match = RE_REQUIREMENT.match(line) if match: requirements.extend(pip(match.group('filename'))) else: requirements.append(line) return requirements
[ "def", "pip", "(", "filename", ")", ":", "requirements", "=", "[", "]", "for", "line", "in", "open", "(", "join", "(", "ROOT", ",", "'requirements'", ",", "filename", ")", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", "or", "'://'", "in", "line", ":", "continue", "match", "=", "RE_REQUIREMENT", ".", "match", "(", "line", ")", "if", "match", ":", "requirements", ".", "extend", "(", "pip", "(", "match", ".", "group", "(", "'filename'", ")", ")", ")", "else", ":", "requirements", ".", "append", "(", "line", ")", "return", "requirements" ]
34.538462
0.002169
def fix(args): """ %prog fix ahrd.csv > ahrd.fixed.csv Fix ugly names from Uniprot. """ p = OptionParser(fix.__doc__) p.add_option("--ignore_sym_pat", default=False, action="store_true", help="Do not fix names matching symbol patterns i.e." + \ " names beginning or ending with gene symbols or a series of numbers." + \ " e.g. `ARM repeat superfamily protein`, `beta-hexosaminidase 3`," + \ " `CYCLIN A3;4`, `WALL ASSOCIATED KINASE (WAK)-LIKE 10`") p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) csvfile, = args fp = open(csvfile) fw = must_open(opts.outfile, "w") for row in fp: if row[0] == '#': continue if row.strip() == "": continue atoms = row.rstrip("\r\n").split("\t") name, hit, ahrd_code, desc = atoms[:4] \ if len(atoms) > 2 else \ (atoms[0], None, None, atoms[-1]) newdesc = fix_text(desc, ignore_sym_pat=opts.ignore_sym_pat) if hit and hit.strip() != "" and newdesc == Hypothetical: newdesc = "conserved " + newdesc print("\t".join(atoms[:4] + [newdesc] + atoms[4:]), file=fw)
[ "def", "fix", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fix", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--ignore_sym_pat\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Do not fix names matching symbol patterns i.e.\"", "+", "\" names beginning or ending with gene symbols or a series of numbers.\"", "+", "\" e.g. `ARM repeat superfamily protein`, `beta-hexosaminidase 3`,\"", "+", "\" `CYCLIN A3;4`, `WALL ASSOCIATED KINASE (WAK)-LIKE 10`\"", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "csvfile", ",", "=", "args", "fp", "=", "open", "(", "csvfile", ")", "fw", "=", "must_open", "(", "opts", ".", "outfile", ",", "\"w\"", ")", "for", "row", "in", "fp", ":", "if", "row", "[", "0", "]", "==", "'#'", ":", "continue", "if", "row", ".", "strip", "(", ")", "==", "\"\"", ":", "continue", "atoms", "=", "row", ".", "rstrip", "(", "\"\\r\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "name", ",", "hit", ",", "ahrd_code", ",", "desc", "=", "atoms", "[", ":", "4", "]", "if", "len", "(", "atoms", ")", ">", "2", "else", "(", "atoms", "[", "0", "]", ",", "None", ",", "None", ",", "atoms", "[", "-", "1", "]", ")", "newdesc", "=", "fix_text", "(", "desc", ",", "ignore_sym_pat", "=", "opts", ".", "ignore_sym_pat", ")", "if", "hit", "and", "hit", ".", "strip", "(", ")", "!=", "\"\"", "and", "newdesc", "==", "Hypothetical", ":", "newdesc", "=", "\"conserved \"", "+", "newdesc", "print", "(", "\"\\t\"", ".", "join", "(", "atoms", "[", ":", "4", "]", "+", "[", "newdesc", "]", "+", "atoms", "[", "4", ":", "]", ")", ",", "file", "=", "fw", ")" ]
34.942857
0.008751
def computeDistortion(self, eEye, fU, fV): """ Gets the result of the distortion function for the specified eye and input UVs. UVs go from 0,0 in the upper left of that eye's viewport and 1,1 in the lower right of that eye's viewport. Returns true for success. Otherwise, returns false, and distortion coordinates are not suitable. """ fn = self.function_table.computeDistortion pDistortionCoordinates = DistortionCoordinates_t() result = fn(eEye, fU, fV, byref(pDistortionCoordinates)) return result, pDistortionCoordinates
[ "def", "computeDistortion", "(", "self", ",", "eEye", ",", "fU", ",", "fV", ")", ":", "fn", "=", "self", ".", "function_table", ".", "computeDistortion", "pDistortionCoordinates", "=", "DistortionCoordinates_t", "(", ")", "result", "=", "fn", "(", "eEye", ",", "fU", ",", "fV", ",", "byref", "(", "pDistortionCoordinates", ")", ")", "return", "result", ",", "pDistortionCoordinates" ]
53.454545
0.010033
def output(self, filename): """ Output the inheritance relation _filename is not used Args: _filename(string) """ info = 'Inheritance\n' if not self.contracts: return info += blue('Child_Contract -> ') + green('Immediate_Base_Contracts') info += green(' [Not_Immediate_Base_Contracts]') for child in self.contracts: info += blue(f'\n+ {child.name}') if child.inheritance: immediate = child.immediate_inheritance not_immediate = [i for i in child.inheritance if i not in immediate] info += ' -> ' + green(", ".join(map(str, immediate))) if not_immediate: info += ", ["+ green(", ".join(map(str, not_immediate))) + "]" info += green('\n\nBase_Contract -> ') + blue('Immediate_Child_Contracts') info += blue(' [Not_Immediate_Child_Contracts]') for base in self.contracts: info += green(f'\n+ {base.name}') children = list(self._get_child_contracts(base)) if children: immediate = [child for child in children if base in child.immediate_inheritance] not_immediate = [child for child in children if not child in immediate] info += ' -> ' + blue(", ".join(map(str, immediate))) if not_immediate: info += ', [' + blue(", ".join(map(str, not_immediate))) + ']' self.info(info)
[ "def", "output", "(", "self", ",", "filename", ")", ":", "info", "=", "'Inheritance\\n'", "if", "not", "self", ".", "contracts", ":", "return", "info", "+=", "blue", "(", "'Child_Contract -> '", ")", "+", "green", "(", "'Immediate_Base_Contracts'", ")", "info", "+=", "green", "(", "' [Not_Immediate_Base_Contracts]'", ")", "for", "child", "in", "self", ".", "contracts", ":", "info", "+=", "blue", "(", "f'\\n+ {child.name}'", ")", "if", "child", ".", "inheritance", ":", "immediate", "=", "child", ".", "immediate_inheritance", "not_immediate", "=", "[", "i", "for", "i", "in", "child", ".", "inheritance", "if", "i", "not", "in", "immediate", "]", "info", "+=", "' -> '", "+", "green", "(", "\", \"", ".", "join", "(", "map", "(", "str", ",", "immediate", ")", ")", ")", "if", "not_immediate", ":", "info", "+=", "\", [\"", "+", "green", "(", "\", \"", ".", "join", "(", "map", "(", "str", ",", "not_immediate", ")", ")", ")", "+", "\"]\"", "info", "+=", "green", "(", "'\\n\\nBase_Contract -> '", ")", "+", "blue", "(", "'Immediate_Child_Contracts'", ")", "info", "+=", "blue", "(", "' [Not_Immediate_Child_Contracts]'", ")", "for", "base", "in", "self", ".", "contracts", ":", "info", "+=", "green", "(", "f'\\n+ {base.name}'", ")", "children", "=", "list", "(", "self", ".", "_get_child_contracts", "(", "base", ")", ")", "if", "children", ":", "immediate", "=", "[", "child", "for", "child", "in", "children", "if", "base", "in", "child", ".", "immediate_inheritance", "]", "not_immediate", "=", "[", "child", "for", "child", "in", "children", "if", "not", "child", "in", "immediate", "]", "info", "+=", "' -> '", "+", "blue", "(", "\", \"", ".", "join", "(", "map", "(", "str", ",", "immediate", ")", ")", ")", "if", "not_immediate", ":", "info", "+=", "', ['", "+", "blue", "(", "\", \"", ".", "join", "(", "map", "(", "str", ",", "not_immediate", ")", ")", ")", "+", "']'", "self", ".", "info", "(", "info", ")" ]
42.138889
0.006443
def find_root(self): """Finds the index of the root node of the tree.""" self.find_parents() index = 0 while len(self.vertices[index].parents)>0: index = self.vertices[index].parents[0] return index
[ "def", "find_root", "(", "self", ")", ":", "self", ".", "find_parents", "(", ")", "index", "=", "0", "while", "len", "(", "self", ".", "vertices", "[", "index", "]", ".", "parents", ")", ">", "0", ":", "index", "=", "self", ".", "vertices", "[", "index", "]", ".", "parents", "[", "0", "]", "return", "index" ]
34.857143
0.012
def around_me_in(self, leaderboard_name, member, **options): ''' Retrieve a page of leaders from the named leaderboard around a given member. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a page of leaders from the named leaderboard around a given member. Returns an empty array for a non-existent member. ''' reverse_rank_for_member = None if self.order == self.DESC: reverse_rank_for_member = self.redis_connection.zrevrank( leaderboard_name, member) else: reverse_rank_for_member = self.redis_connection.zrank( leaderboard_name, member) if reverse_rank_for_member is None: return [] page_size = options.get('page_size', self.page_size) starting_offset = reverse_rank_for_member - (page_size // 2) if starting_offset < 0: starting_offset = 0 ending_offset = (starting_offset + page_size) - 1 raw_leader_data = self._range_method( self.redis_connection, leaderboard_name, int(starting_offset), int(ending_offset), withscores=False) return self._parse_raw_members( leaderboard_name, raw_leader_data, **options)
[ "def", "around_me_in", "(", "self", ",", "leaderboard_name", ",", "member", ",", "*", "*", "options", ")", ":", "reverse_rank_for_member", "=", "None", "if", "self", ".", "order", "==", "self", ".", "DESC", ":", "reverse_rank_for_member", "=", "self", ".", "redis_connection", ".", "zrevrank", "(", "leaderboard_name", ",", "member", ")", "else", ":", "reverse_rank_for_member", "=", "self", ".", "redis_connection", ".", "zrank", "(", "leaderboard_name", ",", "member", ")", "if", "reverse_rank_for_member", "is", "None", ":", "return", "[", "]", "page_size", "=", "options", ".", "get", "(", "'page_size'", ",", "self", ".", "page_size", ")", "starting_offset", "=", "reverse_rank_for_member", "-", "(", "page_size", "//", "2", ")", "if", "starting_offset", "<", "0", ":", "starting_offset", "=", "0", "ending_offset", "=", "(", "starting_offset", "+", "page_size", ")", "-", "1", "raw_leader_data", "=", "self", ".", "_range_method", "(", "self", ".", "redis_connection", ",", "leaderboard_name", ",", "int", "(", "starting_offset", ")", ",", "int", "(", "ending_offset", ")", ",", "withscores", "=", "False", ")", "return", "self", ".", "_parse_raw_members", "(", "leaderboard_name", ",", "raw_leader_data", ",", "*", "*", "options", ")" ]
38.131579
0.003365
def export(self, version, export_dir): """ Create prov entities and activities. """ atts = [ (PROV['type'], self.type), (PROV['label'], self.label), ] if version['num'] == "1.0.0": atts += [ (NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type), (PROV['value'], self.stat_threshold), (NIDM_P_VALUE_UNCORRECTED, self.p_uncorr_threshold), (NIDM_P_VALUE_FWER, self.p_corr_threshold) ] else: atts += [ (PROV['type'], self.threshold_type), (PROV['value'], self.value) ] if self.equiv_thresh is not None: for equiv in self.equiv_thresh: atts += [ (NIDM_EQUIVALENT_THRESHOLD, equiv.id) ] self.add_attributes([(k, v) for k, v in atts if v is not None])
[ "def", "export", "(", "self", ",", "version", ",", "export_dir", ")", ":", "atts", "=", "[", "(", "PROV", "[", "'type'", "]", ",", "self", ".", "type", ")", ",", "(", "PROV", "[", "'label'", "]", ",", "self", ".", "label", ")", ",", "]", "if", "version", "[", "'num'", "]", "==", "\"1.0.0\"", ":", "atts", "+=", "[", "(", "NIDM_USER_SPECIFIED_THRESHOLD_TYPE", ",", "self", ".", "user_threshold_type", ")", ",", "(", "PROV", "[", "'value'", "]", ",", "self", ".", "stat_threshold", ")", ",", "(", "NIDM_P_VALUE_UNCORRECTED", ",", "self", ".", "p_uncorr_threshold", ")", ",", "(", "NIDM_P_VALUE_FWER", ",", "self", ".", "p_corr_threshold", ")", "]", "else", ":", "atts", "+=", "[", "(", "PROV", "[", "'type'", "]", ",", "self", ".", "threshold_type", ")", ",", "(", "PROV", "[", "'value'", "]", ",", "self", ".", "value", ")", "]", "if", "self", ".", "equiv_thresh", "is", "not", "None", ":", "for", "equiv", "in", "self", ".", "equiv_thresh", ":", "atts", "+=", "[", "(", "NIDM_EQUIVALENT_THRESHOLD", ",", "equiv", ".", "id", ")", "]", "self", ".", "add_attributes", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "atts", "if", "v", "is", "not", "None", "]", ")" ]
31.333333
0.002064
def local_datetime(utcdatetime, format=None, timezone=None): """ Return local datetime based on the timezone It will automatically format the date. To not format the date, set format=False :param utcdatetime: Arrow or string :param format: string of format or False :param timezone: string, ie: US/Eastern :return: """ if utcdatetime is None: return None timezone = timezone or config("DATETIME_TIMEZONE", "US/Eastern") dt = utcdatetime.to(timezone) \ if isinstance(utcdatetime, arrow.Arrow) \ else arrow.get(utcdatetime, timezone) if format is False: return dt _ = config("DATETIME_FORMAT") format = _.get("default") or "MM/DD/YYYY" if not format else _.get(format) return dt.format(format)
[ "def", "local_datetime", "(", "utcdatetime", ",", "format", "=", "None", ",", "timezone", "=", "None", ")", ":", "if", "utcdatetime", "is", "None", ":", "return", "None", "timezone", "=", "timezone", "or", "config", "(", "\"DATETIME_TIMEZONE\"", ",", "\"US/Eastern\"", ")", "dt", "=", "utcdatetime", ".", "to", "(", "timezone", ")", "if", "isinstance", "(", "utcdatetime", ",", "arrow", ".", "Arrow", ")", "else", "arrow", ".", "get", "(", "utcdatetime", ",", "timezone", ")", "if", "format", "is", "False", ":", "return", "dt", "_", "=", "config", "(", "\"DATETIME_FORMAT\"", ")", "format", "=", "_", ".", "get", "(", "\"default\"", ")", "or", "\"MM/DD/YYYY\"", "if", "not", "format", "else", "_", ".", "get", "(", "format", ")", "return", "dt", ".", "format", "(", "format", ")" ]
32.166667
0.003774
def level_order(tree, include_all=False): """ Returns an iterator over the tree in level-order If include_all is set to True, empty parts of the tree are filled with dummy entries and the iterator becomes infinite. """ q = deque() q.append(tree) while q: node = q.popleft() yield node if include_all or node.left: q.append(node.left or node.__class__()) if include_all or node.right: q.append(node.right or node.__class__())
[ "def", "level_order", "(", "tree", ",", "include_all", "=", "False", ")", ":", "q", "=", "deque", "(", ")", "q", ".", "append", "(", "tree", ")", "while", "q", ":", "node", "=", "q", ".", "popleft", "(", ")", "yield", "node", "if", "include_all", "or", "node", ".", "left", ":", "q", ".", "append", "(", "node", ".", "left", "or", "node", ".", "__class__", "(", ")", ")", "if", "include_all", "or", "node", ".", "right", ":", "q", ".", "append", "(", "node", ".", "right", "or", "node", ".", "__class__", "(", ")", ")" ]
28.941176
0.001969
def releaseCompleteNetToMs(Cause_presence=0, Facility_presence=0, UserUser_presence=0): """RELEASE COMPLETE Section 9.3.19.1""" a = TpPd(pd=0x3) b = MessageType(mesType=0x2a) # 00101010 packet = a / b if Cause_presence is 1: c = CauseHdr(ieiC=0x08, eightBitC=0x0) packet = packet / c if Facility_presence is 1: d = FacilityHdr(ieiF=0x1C, eightBitF=0x0) packet = packet / d if UserUser_presence is 1: e = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0) packet = packet / e return packet
[ "def", "releaseCompleteNetToMs", "(", "Cause_presence", "=", "0", ",", "Facility_presence", "=", "0", ",", "UserUser_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x3", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x2a", ")", "# 00101010", "packet", "=", "a", "/", "b", "if", "Cause_presence", "is", "1", ":", "c", "=", "CauseHdr", "(", "ieiC", "=", "0x08", ",", "eightBitC", "=", "0x0", ")", "packet", "=", "packet", "/", "c", "if", "Facility_presence", "is", "1", ":", "d", "=", "FacilityHdr", "(", "ieiF", "=", "0x1C", ",", "eightBitF", "=", "0x0", ")", "packet", "=", "packet", "/", "d", "if", "UserUser_presence", "is", "1", ":", "e", "=", "UserUserHdr", "(", "ieiUU", "=", "0x7E", ",", "eightBitUU", "=", "0x0", ")", "packet", "=", "packet", "/", "e", "return", "packet" ]
35.625
0.001709
def get_logical_plan(cluster, environ, topology, role=None): ''' Get the logical plan state of a topology in a cluster :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(LOGICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
[ "def", "get_logical_plan", "(", "cluster", ",", "environ", ",", "topology", ",", "role", "=", "None", ")", ":", "params", "=", "dict", "(", "cluster", "=", "cluster", ",", "environ", "=", "environ", ",", "topology", "=", "topology", ")", "if", "role", "is", "not", "None", ":", "params", "[", "'role'", "]", "=", "role", "request_url", "=", "tornado", ".", "httputil", ".", "url_concat", "(", "create_url", "(", "LOGICALPLAN_URL_FMT", ")", ",", "params", ")", "raise", "tornado", ".", "gen", ".", "Return", "(", "(", "yield", "fetch_url_as_json", "(", "request_url", ")", ")", ")" ]
31.466667
0.012346
def render_to_texture(self, data, texture, offset, size): """Render a SDF to a texture at a given offset and size Parameters ---------- data : array Must be 2D with type np.ubyte. texture : instance of Texture2D The texture to render to. offset : tuple of int Offset (x, y) to render to inside the texture. size : tuple of int Size (w, h) to render inside the texture. """ assert isinstance(texture, Texture2D) set_state(blend=False, depth_test=False) # calculate the negative half (within object) orig_tex = Texture2D(255 - data, format='luminance', wrapping='clamp_to_edge', interpolation='nearest') edf_neg_tex = self._render_edf(orig_tex) # calculate positive half (outside object) orig_tex[:, :, 0] = data edf_pos_tex = self._render_edf(orig_tex) # render final product to output texture self.program_insert['u_texture'] = orig_tex self.program_insert['u_pos_texture'] = edf_pos_tex self.program_insert['u_neg_texture'] = edf_neg_tex self.fbo_to[-1].color_buffer = texture with self.fbo_to[-1]: set_viewport(tuple(offset) + tuple(size)) self.program_insert.draw('triangle_strip')
[ "def", "render_to_texture", "(", "self", ",", "data", ",", "texture", ",", "offset", ",", "size", ")", ":", "assert", "isinstance", "(", "texture", ",", "Texture2D", ")", "set_state", "(", "blend", "=", "False", ",", "depth_test", "=", "False", ")", "# calculate the negative half (within object)", "orig_tex", "=", "Texture2D", "(", "255", "-", "data", ",", "format", "=", "'luminance'", ",", "wrapping", "=", "'clamp_to_edge'", ",", "interpolation", "=", "'nearest'", ")", "edf_neg_tex", "=", "self", ".", "_render_edf", "(", "orig_tex", ")", "# calculate positive half (outside object)", "orig_tex", "[", ":", ",", ":", ",", "0", "]", "=", "data", "edf_pos_tex", "=", "self", ".", "_render_edf", "(", "orig_tex", ")", "# render final product to output texture", "self", ".", "program_insert", "[", "'u_texture'", "]", "=", "orig_tex", "self", ".", "program_insert", "[", "'u_pos_texture'", "]", "=", "edf_pos_tex", "self", ".", "program_insert", "[", "'u_neg_texture'", "]", "=", "edf_neg_tex", "self", ".", "fbo_to", "[", "-", "1", "]", ".", "color_buffer", "=", "texture", "with", "self", ".", "fbo_to", "[", "-", "1", "]", ":", "set_viewport", "(", "tuple", "(", "offset", ")", "+", "tuple", "(", "size", ")", ")", "self", ".", "program_insert", ".", "draw", "(", "'triangle_strip'", ")" ]
38.314286
0.002909
def linkify_h_by_hd(self, hosts): """Add dependency in host objects :param hosts: hosts list :type hosts: alignak.objects.host.Hosts :return: None """ for hostdep in self: # Only used for debugging purpose when loops are detected setattr(hostdep, "host_name_string", "undefined") setattr(hostdep, "dependent_host_name_string", "undefined") # if the host dep conf is bad, pass this one if getattr(hostdep, 'host_name', None) is None or\ getattr(hostdep, 'dependent_host_name', None) is None: continue if hostdep.host_name not in hosts or hostdep.dependent_host_name not in hosts: continue hosts.add_act_dependency(hostdep.dependent_host_name, hostdep.host_name, hostdep.notification_failure_criteria, getattr(hostdep, 'dependency_period', ''), hostdep.inherits_parent) hosts.add_chk_dependency(hostdep.dependent_host_name, hostdep.host_name, hostdep.execution_failure_criteria, getattr(hostdep, 'dependency_period', ''), hostdep.inherits_parent) # Only used for debugging purpose when loops are detected setattr(hostdep, "host_name_string", hosts[hostdep.host_name].get_name()) setattr(hostdep, "dependent_host_name_string", hosts[hostdep.dependent_host_name].get_name())
[ "def", "linkify_h_by_hd", "(", "self", ",", "hosts", ")", ":", "for", "hostdep", "in", "self", ":", "# Only used for debugging purpose when loops are detected", "setattr", "(", "hostdep", ",", "\"host_name_string\"", ",", "\"undefined\"", ")", "setattr", "(", "hostdep", ",", "\"dependent_host_name_string\"", ",", "\"undefined\"", ")", "# if the host dep conf is bad, pass this one", "if", "getattr", "(", "hostdep", ",", "'host_name'", ",", "None", ")", "is", "None", "or", "getattr", "(", "hostdep", ",", "'dependent_host_name'", ",", "None", ")", "is", "None", ":", "continue", "if", "hostdep", ".", "host_name", "not", "in", "hosts", "or", "hostdep", ".", "dependent_host_name", "not", "in", "hosts", ":", "continue", "hosts", ".", "add_act_dependency", "(", "hostdep", ".", "dependent_host_name", ",", "hostdep", ".", "host_name", ",", "hostdep", ".", "notification_failure_criteria", ",", "getattr", "(", "hostdep", ",", "'dependency_period'", ",", "''", ")", ",", "hostdep", ".", "inherits_parent", ")", "hosts", ".", "add_chk_dependency", "(", "hostdep", ".", "dependent_host_name", ",", "hostdep", ".", "host_name", ",", "hostdep", ".", "execution_failure_criteria", ",", "getattr", "(", "hostdep", ",", "'dependency_period'", ",", "''", ")", ",", "hostdep", ".", "inherits_parent", ")", "# Only used for debugging purpose when loops are detected", "setattr", "(", "hostdep", ",", "\"host_name_string\"", ",", "hosts", "[", "hostdep", ".", "host_name", "]", ".", "get_name", "(", ")", ")", "setattr", "(", "hostdep", ",", "\"dependent_host_name_string\"", ",", "hosts", "[", "hostdep", ".", "dependent_host_name", "]", ".", "get_name", "(", ")", ")" ]
47.558824
0.003636
def open_streaming_interface(self): """Called when someone opens a streaming interface to the device. This method will automatically notify sensor_graph that there is a streaming interface opened. Returns: list: A list of IOTileReport objects that should be sent out the streaming interface. """ super(ReferenceDevice, self).open_streaming_interface() self.rpc(8, rpcs.SG_GRAPH_INPUT, 8, streams.COMM_TILE_OPEN) return []
[ "def", "open_streaming_interface", "(", "self", ")", ":", "super", "(", "ReferenceDevice", ",", "self", ")", ".", "open_streaming_interface", "(", ")", "self", ".", "rpc", "(", "8", ",", "rpcs", ".", "SG_GRAPH_INPUT", ",", "8", ",", "streams", ".", "COMM_TILE_OPEN", ")", "return", "[", "]" ]
33.533333
0.003868
def create_message(self): """Returns a message body to send in this email. Should be from email.mime.*""" body = dedent("""\ Received exception {exception} on {queue} from worker {worker}: {traceback} Payload: {payload} """).format(exception=self._exception, traceback=self._traceback, queue=self._queue, payload=self._payload, worker=self._worker) return MIMEText(body)
[ "def", "create_message", "(", "self", ")", ":", "body", "=", "dedent", "(", "\"\"\"\\\n Received exception {exception} on {queue} from worker {worker}:\n\n {traceback}\n\n Payload:\n {payload}\n\n \"\"\"", ")", ".", "format", "(", "exception", "=", "self", ".", "_exception", ",", "traceback", "=", "self", ".", "_traceback", ",", "queue", "=", "self", ".", "_queue", ",", "payload", "=", "self", ".", "_payload", ",", "worker", "=", "self", ".", "_worker", ")", "return", "MIMEText", "(", "body", ")" ]
27.666667
0.013592
def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)): """ Create VTK image data directly from numpy arrays. A uniform grid is defined by the node spacings for each axis (uniform along each individual axis) and the number of nodes on each axis. These are relative to a specified origin (default is ``(0.0, 0.0, 0.0)``). Parameters ---------- dims : tuple(int) Length 3 tuple of ints specifying how many nodes along each axis spacing : tuple(float) Length 3 tuple of floats/ints specifying the node spacings for each axis origin : tuple(float) Length 3 tuple of floats/ints specifying minimum value for each axis """ xn, yn, zn = dims[0], dims[1], dims[2] xs, ys, zs = spacing[0], spacing[1], spacing[2] xo, yo, zo = origin[0], origin[1], origin[2] self.SetDimensions(xn, yn, zn) self.SetOrigin(xo, yo, zo) self.SetSpacing(xs, ys, zs)
[ "def", "_from_specs", "(", "self", ",", "dims", ",", "spacing", "=", "(", "1.0", ",", "1.0", ",", "1.0", ")", ",", "origin", "=", "(", "0.0", ",", "0.0", ",", "0.0", ")", ")", ":", "xn", ",", "yn", ",", "zn", "=", "dims", "[", "0", "]", ",", "dims", "[", "1", "]", ",", "dims", "[", "2", "]", "xs", ",", "ys", ",", "zs", "=", "spacing", "[", "0", "]", ",", "spacing", "[", "1", "]", ",", "spacing", "[", "2", "]", "xo", ",", "yo", ",", "zo", "=", "origin", "[", "0", "]", ",", "origin", "[", "1", "]", ",", "origin", "[", "2", "]", "self", ".", "SetDimensions", "(", "xn", ",", "yn", ",", "zn", ")", "self", ".", "SetOrigin", "(", "xo", ",", "yo", ",", "zo", ")", "self", ".", "SetSpacing", "(", "xs", ",", "ys", ",", "zs", ")" ]
41.916667
0.005831
def policy_evaluation(pi, U, mdp, k=20): """Return an updated utility mapping U from each state in the MDP to its utility, using an approximation (modified policy iteration).""" R, T, gamma = mdp.R, mdp.T, mdp.gamma for i in range(k): for s in mdp.states: U[s] = R(s) + gamma * sum([p * U[s1] for (p, s1) in T(s, pi[s])]) return U
[ "def", "policy_evaluation", "(", "pi", ",", "U", ",", "mdp", ",", "k", "=", "20", ")", ":", "R", ",", "T", ",", "gamma", "=", "mdp", ".", "R", ",", "mdp", ".", "T", ",", "mdp", ".", "gamma", "for", "i", "in", "range", "(", "k", ")", ":", "for", "s", "in", "mdp", ".", "states", ":", "U", "[", "s", "]", "=", "R", "(", "s", ")", "+", "gamma", "*", "sum", "(", "[", "p", "*", "U", "[", "s1", "]", "for", "(", "p", ",", "s1", ")", "in", "T", "(", "s", ",", "pi", "[", "s", "]", ")", "]", ")", "return", "U" ]
45.375
0.002703
def encode(self, inputRow): """Encodes the given input row as a dict, with the keys being the field names. This also adds in some meta fields: '_category': The value from the category field (if any) '_reset': True if the reset field was True (if any) '_sequenceId': the value from the sequenceId field (if any) :param inputRow: sequence of values corresponding to a single input metric data row :rtype: dict """ # Create the return dict result = dict(zip(self._fieldNames, inputRow)) # Add in the special fields if self._categoryFieldIndex is not None: # category value can be an int or a list if isinstance(inputRow[self._categoryFieldIndex], int): result['_category'] = [inputRow[self._categoryFieldIndex]] else: result['_category'] = (inputRow[self._categoryFieldIndex] if inputRow[self._categoryFieldIndex] else [None]) else: result['_category'] = [None] if self._resetFieldIndex is not None: result['_reset'] = int(bool(inputRow[self._resetFieldIndex])) else: result['_reset'] = 0 if self._learningFieldIndex is not None: result['_learning'] = int(bool(inputRow[self._learningFieldIndex])) result['_timestampRecordIdx'] = None if self._timestampFieldIndex is not None: result['_timestamp'] = inputRow[self._timestampFieldIndex] # Compute the record index based on timestamp result['_timestampRecordIdx'] = self._computeTimestampRecordIdx( inputRow[self._timestampFieldIndex]) else: result['_timestamp'] = None # ----------------------------------------------------------------------- # Figure out the sequence ID hasReset = self._resetFieldIndex is not None hasSequenceId = self._sequenceFieldIndex is not None if hasReset and not hasSequenceId: # Reset only if result['_reset']: self._sequenceId += 1 sequenceId = self._sequenceId elif not hasReset and hasSequenceId: sequenceId = inputRow[self._sequenceFieldIndex] result['_reset'] = int(sequenceId != self._sequenceId) self._sequenceId = sequenceId elif hasReset and hasSequenceId: sequenceId = inputRow[self._sequenceFieldIndex] else: sequenceId = 0 if sequenceId is not None: result['_sequenceId'] = hash(sequenceId) else: result['_sequenceId'] = None return result
[ "def", "encode", "(", "self", ",", "inputRow", ")", ":", "# Create the return dict", "result", "=", "dict", "(", "zip", "(", "self", ".", "_fieldNames", ",", "inputRow", ")", ")", "# Add in the special fields", "if", "self", ".", "_categoryFieldIndex", "is", "not", "None", ":", "# category value can be an int or a list", "if", "isinstance", "(", "inputRow", "[", "self", ".", "_categoryFieldIndex", "]", ",", "int", ")", ":", "result", "[", "'_category'", "]", "=", "[", "inputRow", "[", "self", ".", "_categoryFieldIndex", "]", "]", "else", ":", "result", "[", "'_category'", "]", "=", "(", "inputRow", "[", "self", ".", "_categoryFieldIndex", "]", "if", "inputRow", "[", "self", ".", "_categoryFieldIndex", "]", "else", "[", "None", "]", ")", "else", ":", "result", "[", "'_category'", "]", "=", "[", "None", "]", "if", "self", ".", "_resetFieldIndex", "is", "not", "None", ":", "result", "[", "'_reset'", "]", "=", "int", "(", "bool", "(", "inputRow", "[", "self", ".", "_resetFieldIndex", "]", ")", ")", "else", ":", "result", "[", "'_reset'", "]", "=", "0", "if", "self", ".", "_learningFieldIndex", "is", "not", "None", ":", "result", "[", "'_learning'", "]", "=", "int", "(", "bool", "(", "inputRow", "[", "self", ".", "_learningFieldIndex", "]", ")", ")", "result", "[", "'_timestampRecordIdx'", "]", "=", "None", "if", "self", ".", "_timestampFieldIndex", "is", "not", "None", ":", "result", "[", "'_timestamp'", "]", "=", "inputRow", "[", "self", ".", "_timestampFieldIndex", "]", "# Compute the record index based on timestamp", "result", "[", "'_timestampRecordIdx'", "]", "=", "self", ".", "_computeTimestampRecordIdx", "(", "inputRow", "[", "self", ".", "_timestampFieldIndex", "]", ")", "else", ":", "result", "[", "'_timestamp'", "]", "=", "None", "# -----------------------------------------------------------------------", "# Figure out the sequence ID", "hasReset", "=", "self", ".", "_resetFieldIndex", "is", "not", "None", "hasSequenceId", "=", "self", ".", "_sequenceFieldIndex", "is", "not", "None", "if", "hasReset", "and", "not", "hasSequenceId", ":", "# Reset only", "if", "result", "[", "'_reset'", "]", ":", "self", ".", "_sequenceId", "+=", "1", "sequenceId", "=", "self", ".", "_sequenceId", "elif", "not", "hasReset", "and", "hasSequenceId", ":", "sequenceId", "=", "inputRow", "[", "self", ".", "_sequenceFieldIndex", "]", "result", "[", "'_reset'", "]", "=", "int", "(", "sequenceId", "!=", "self", ".", "_sequenceId", ")", "self", ".", "_sequenceId", "=", "sequenceId", "elif", "hasReset", "and", "hasSequenceId", ":", "sequenceId", "=", "inputRow", "[", "self", ".", "_sequenceFieldIndex", "]", "else", ":", "sequenceId", "=", "0", "if", "sequenceId", "is", "not", "None", ":", "result", "[", "'_sequenceId'", "]", "=", "hash", "(", "sequenceId", ")", "else", ":", "result", "[", "'_sequenceId'", "]", "=", "None", "return", "result" ]
34.070423
0.008839
def tangent_bundle(self): """The tangent bundle associated with `domain` using `partition`. The tangent bundle of a space ``X`` of functions ``R^d --> F`` can be interpreted as the space of vector-valued functions ``R^d --> F^d``. This space can be identified with the power space ``X^d`` as used in this implementation. """ if self.ndim == 0: return ProductSpace(field=self.field) else: return ProductSpace(self, self.ndim)
[ "def", "tangent_bundle", "(", "self", ")", ":", "if", "self", ".", "ndim", "==", "0", ":", "return", "ProductSpace", "(", "field", "=", "self", ".", "field", ")", "else", ":", "return", "ProductSpace", "(", "self", ",", "self", ".", "ndim", ")" ]
41.833333
0.003899
def create(self, validated_data): """ This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context. """ if self.context.get('parent_field') \ and self.context.get('parent_instance'): validated_data.update({ self.context.get('parent_field'): self.context.get('parent_instance')}) instance = self.Meta.model(**validated_data) instance.full_clean() instance.save() return instance
[ "def", "create", "(", "self", ",", "validated_data", ")", ":", "if", "self", ".", "context", ".", "get", "(", "'parent_field'", ")", "and", "self", ".", "context", ".", "get", "(", "'parent_instance'", ")", ":", "validated_data", ".", "update", "(", "{", "self", ".", "context", ".", "get", "(", "'parent_field'", ")", ":", "self", ".", "context", ".", "get", "(", "'parent_instance'", ")", "}", ")", "instance", "=", "self", ".", "Meta", ".", "model", "(", "*", "*", "validated_data", ")", "instance", ".", "full_clean", "(", ")", "instance", ".", "save", "(", ")", "return", "instance" ]
38.8125
0.004717
def GetServiceDll(self, key): """Get the Service DLL for a service, if it exists. Checks for a ServiceDLL for in the Parameters subkey of a service key in the Registry. Args: key (dfwinreg.WinRegistryKey): a Windows Registry key. Returns: str: path of the service DLL or None. """ parameters_key = key.GetSubkeyByName('Parameters') if not parameters_key: return None service_dll = parameters_key.GetValueByName('ServiceDll') if not service_dll: return None return service_dll.GetDataAsObject()
[ "def", "GetServiceDll", "(", "self", ",", "key", ")", ":", "parameters_key", "=", "key", ".", "GetSubkeyByName", "(", "'Parameters'", ")", "if", "not", "parameters_key", ":", "return", "None", "service_dll", "=", "parameters_key", ".", "GetValueByName", "(", "'ServiceDll'", ")", "if", "not", "service_dll", ":", "return", "None", "return", "service_dll", ".", "GetDataAsObject", "(", ")" ]
26
0.0053
def auto_zip_open(filepath, mode): """Convenience function for opening potentially-compressed files.""" if filepath.endswith('.gz'): outfile = gzip.open(filepath, mode) elif filepath.endswith('.bz2'): outfile = bz2.BZ2File(filepath, mode) else: outfile = open(filepath, mode) return outfile
[ "def", "auto_zip_open", "(", "filepath", ",", "mode", ")", ":", "if", "filepath", ".", "endswith", "(", "'.gz'", ")", ":", "outfile", "=", "gzip", ".", "open", "(", "filepath", ",", "mode", ")", "elif", "filepath", ".", "endswith", "(", "'.bz2'", ")", ":", "outfile", "=", "bz2", ".", "BZ2File", "(", "filepath", ",", "mode", ")", "else", ":", "outfile", "=", "open", "(", "filepath", ",", "mode", ")", "return", "outfile" ]
36.222222
0.002994
def change_note_duration(self, at, to): """Change the note duration at the given index to the given duration.""" if valid_beat_duration(to): diff = 0 for x in self.bar: if diff != 0: x[0][0] -= diff if x[0] == at: cur = x[0][1] x[0][1] = to diff = 1 / cur - 1 / to
[ "def", "change_note_duration", "(", "self", ",", "at", ",", "to", ")", ":", "if", "valid_beat_duration", "(", "to", ")", ":", "diff", "=", "0", "for", "x", "in", "self", ".", "bar", ":", "if", "diff", "!=", "0", ":", "x", "[", "0", "]", "[", "0", "]", "-=", "diff", "if", "x", "[", "0", "]", "==", "at", ":", "cur", "=", "x", "[", "0", "]", "[", "1", "]", "x", "[", "0", "]", "[", "1", "]", "=", "to", "diff", "=", "1", "/", "cur", "-", "1", "/", "to" ]
34.416667
0.004717
def user_absent(name, htpasswd_file=None, runas=None): ''' Make sure the user is not in the specified htpasswd file name User name htpasswd_file Path to the htpasswd file runas The system user to run htpasswd command with ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': None} exists = __salt__['file.grep']( htpasswd_file, '^{0}:'.format(name))['retcode'] == 0 if not exists: if __opts__['test']: ret['result'] = None else: ret['result'] = True ret['comment'] = 'User already not in file' else: if __opts__['test']: ret['result'] = None ret['comment'] = 'User \'{0}\' is set to be removed from htpasswd file'.format(name) ret['changes'] = {name: True} else: userdel_ret = __salt__['webutil.userdel']( htpasswd_file, name, runas=runas, all_results=True) ret['result'] = userdel_ret['retcode'] == 0 ret['comment'] = userdel_ret['stderr'] if ret['result']: ret['changes'] = {name: True} return ret
[ "def", "user_absent", "(", "name", ",", "htpasswd_file", "=", "None", ",", "runas", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'result'", ":", "None", "}", "exists", "=", "__salt__", "[", "'file.grep'", "]", "(", "htpasswd_file", ",", "'^{0}:'", ".", "format", "(", "name", ")", ")", "[", "'retcode'", "]", "==", "0", "if", "not", "exists", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "else", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'User already not in file'", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'User \\'{0}\\' is set to be removed from htpasswd file'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "True", "}", "else", ":", "userdel_ret", "=", "__salt__", "[", "'webutil.userdel'", "]", "(", "htpasswd_file", ",", "name", ",", "runas", "=", "runas", ",", "all_results", "=", "True", ")", "ret", "[", "'result'", "]", "=", "userdel_ret", "[", "'retcode'", "]", "==", "0", "ret", "[", "'comment'", "]", "=", "userdel_ret", "[", "'stderr'", "]", "if", "ret", "[", "'result'", "]", ":", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "True", "}", "return", "ret" ]
26.5
0.001654
def parse_simple_duration(duration): """ Attepmt to parse an ISO8601 formatted duration, using a naive calculation. Accepts a ``duration`` which must be an ISO8601 formatted string, and assumes 365 days in a year and 30 days in a month for the calculation. Returns a ``datetime.timedelta`` object. """ elements = _parse_duration_string(_clean(duration)) if not elements: raise ParseError() return _timedelta_from_elements(elements)
[ "def", "parse_simple_duration", "(", "duration", ")", ":", "elements", "=", "_parse_duration_string", "(", "_clean", "(", "duration", ")", ")", "if", "not", "elements", ":", "raise", "ParseError", "(", ")", "return", "_timedelta_from_elements", "(", "elements", ")" ]
31
0.002088
def construct_rest_of_worlds_mapping(self, excluded, fp=None): """Construct topo mapping file for ``excluded``. ``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``. Topo mapping has the data format: .. code-block:: python { 'data': [ ['location label', ['topo face integer ids']], ], 'metadata': { 'filename': 'name of face definitions file', 'field': 'field with uniquely identifies the fields in ``filename``', 'sha256': 'SHA 256 hash of ``filename``' } } """ metadata = { 'filename': 'faces.gpkg', 'field': 'id', 'sha256': sha256(self.faces_fp) } data = [] for key, locations in excluded.items(): for location in locations: assert location in self.locations, "Can't find location {}".format(location) included = self.all_faces.difference( {face for loc in locations for face in self.data[loc]} ) data.append((key, sorted(included))) obj = {'data': data, 'metadata': metadata} if fp: with open(fp, "w") as f: json.dump(obj, f, indent=2) else: return obj
[ "def", "construct_rest_of_worlds_mapping", "(", "self", ",", "excluded", ",", "fp", "=", "None", ")", ":", "metadata", "=", "{", "'filename'", ":", "'faces.gpkg'", ",", "'field'", ":", "'id'", ",", "'sha256'", ":", "sha256", "(", "self", ".", "faces_fp", ")", "}", "data", "=", "[", "]", "for", "key", ",", "locations", "in", "excluded", ".", "items", "(", ")", ":", "for", "location", "in", "locations", ":", "assert", "location", "in", "self", ".", "locations", ",", "\"Can't find location {}\"", ".", "format", "(", "location", ")", "included", "=", "self", ".", "all_faces", ".", "difference", "(", "{", "face", "for", "loc", "in", "locations", "for", "face", "in", "self", ".", "data", "[", "loc", "]", "}", ")", "data", ".", "append", "(", "(", "key", ",", "sorted", "(", "included", ")", ")", ")", "obj", "=", "{", "'data'", ":", "data", ",", "'metadata'", ":", "metadata", "}", "if", "fp", ":", "with", "open", "(", "fp", ",", "\"w\"", ")", "as", "f", ":", "json", ".", "dump", "(", "obj", ",", "f", ",", "indent", "=", "2", ")", "else", ":", "return", "obj" ]
34.95
0.003479
def has_aligned_reads(align_bam, region=None): """Check if the aligned BAM file has any reads in the region. region can be a chromosome string ("chr22"), a tuple region (("chr22", 1, 100)) or a file of regions. """ import pybedtools if region is not None: if isinstance(region, six.string_types) and os.path.isfile(region): regions = [tuple(r) for r in pybedtools.BedTool(region)] else: regions = [region] with pysam.Samfile(align_bam, "rb") as cur_bam: if region is not None: for region in regions: if isinstance(region, six.string_types): for item in cur_bam.fetch(str(region)): return True else: for item in cur_bam.fetch(str(region[0]), int(region[1]), int(region[2])): return True else: for item in cur_bam: if not item.is_unmapped: return True return False
[ "def", "has_aligned_reads", "(", "align_bam", ",", "region", "=", "None", ")", ":", "import", "pybedtools", "if", "region", "is", "not", "None", ":", "if", "isinstance", "(", "region", ",", "six", ".", "string_types", ")", "and", "os", ".", "path", ".", "isfile", "(", "region", ")", ":", "regions", "=", "[", "tuple", "(", "r", ")", "for", "r", "in", "pybedtools", ".", "BedTool", "(", "region", ")", "]", "else", ":", "regions", "=", "[", "region", "]", "with", "pysam", ".", "Samfile", "(", "align_bam", ",", "\"rb\"", ")", "as", "cur_bam", ":", "if", "region", "is", "not", "None", ":", "for", "region", "in", "regions", ":", "if", "isinstance", "(", "region", ",", "six", ".", "string_types", ")", ":", "for", "item", "in", "cur_bam", ".", "fetch", "(", "str", "(", "region", ")", ")", ":", "return", "True", "else", ":", "for", "item", "in", "cur_bam", ".", "fetch", "(", "str", "(", "region", "[", "0", "]", ")", ",", "int", "(", "region", "[", "1", "]", ")", ",", "int", "(", "region", "[", "2", "]", ")", ")", ":", "return", "True", "else", ":", "for", "item", "in", "cur_bam", ":", "if", "not", "item", ".", "is_unmapped", ":", "return", "True", "return", "False" ]
38.692308
0.00194
def map_volume_to_sdc(self, volumeObj, sdcObj=None, allowMultipleMappings=False, **kwargs): """ Map a Volume to SDC :param volumeObj: ScaleIO Volume object :param sdcObj: ScaleIO SDC object :param allowMultipleMappings: True to allow more than one SDC to be mapped to volume :return: POST request response :rtype: Requests POST response object """ self.conn.connection._check_login() if kwargs: for key, value in kwargs.iteritems(): if key == 'enableMapAllSdcs': if value == True: mapVolumeToSdcDict = {'allSdcs': 'True'} else: mapVolumeToSdcDict = {'sdcId': sdcObj.id, 'allowMultipleMappings': str(allowMultipleMappings).upper()} response = self.conn.connection._do_post("{}/{}{}/{}".format(self._api_url, "instances/Volume::", volumeObj.id, 'action/addMappedSdc'), json=mapVolumeToSdcDict) return response
[ "def", "map_volume_to_sdc", "(", "self", ",", "volumeObj", ",", "sdcObj", "=", "None", ",", "allowMultipleMappings", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "conn", ".", "connection", ".", "_check_login", "(", ")", "if", "kwargs", ":", "for", "key", ",", "value", "in", "kwargs", ".", "iteritems", "(", ")", ":", "if", "key", "==", "'enableMapAllSdcs'", ":", "if", "value", "==", "True", ":", "mapVolumeToSdcDict", "=", "{", "'allSdcs'", ":", "'True'", "}", "else", ":", "mapVolumeToSdcDict", "=", "{", "'sdcId'", ":", "sdcObj", ".", "id", ",", "'allowMultipleMappings'", ":", "str", "(", "allowMultipleMappings", ")", ".", "upper", "(", ")", "}", "response", "=", "self", ".", "conn", ".", "connection", ".", "_do_post", "(", "\"{}/{}{}/{}\"", ".", "format", "(", "self", ".", "_api_url", ",", "\"instances/Volume::\"", ",", "volumeObj", ".", "id", ",", "'action/addMappedSdc'", ")", ",", "json", "=", "mapVolumeToSdcDict", ")", "return", "response" ]
51.421053
0.007035
def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin} or I{subclass of} L{Object} """ simulation = kwargs[self.injkey] msg = simulation.get('msg') reply = simulation.get('reply') fault = simulation.get('fault') if msg is None: if reply is not None: return self.__reply(reply, args, kwargs) if fault is not None: return self.__fault(fault) raise Exception('(reply|fault) expected when msg=None') sax = Parser() msg = sax.parse(string=msg) return self.send(msg)
[ "def", "invoke", "(", "self", ",", "args", ",", "kwargs", ")", ":", "simulation", "=", "kwargs", "[", "self", ".", "injkey", "]", "msg", "=", "simulation", ".", "get", "(", "'msg'", ")", "reply", "=", "simulation", ".", "get", "(", "'reply'", ")", "fault", "=", "simulation", ".", "get", "(", "'fault'", ")", "if", "msg", "is", "None", ":", "if", "reply", "is", "not", "None", ":", "return", "self", ".", "__reply", "(", "reply", ",", "args", ",", "kwargs", ")", "if", "fault", "is", "not", "None", ":", "return", "self", ".", "__fault", "(", "fault", ")", "raise", "Exception", "(", "'(reply|fault) expected when msg=None'", ")", "sax", "=", "Parser", "(", ")", "msg", "=", "sax", ".", "parse", "(", "string", "=", "msg", ")", "return", "self", ".", "send", "(", "msg", ")" ]
39.043478
0.002174
def saveTargetsToFile(self, filename): """ Deprecated. """ fp = open(filename, 'w') for target in self.targets: vec = self.replacePatterns(target) for item in vec: fp.write("%f " % item) fp.write("\n")
[ "def", "saveTargetsToFile", "(", "self", ",", "filename", ")", ":", "fp", "=", "open", "(", "filename", ",", "'w'", ")", "for", "target", "in", "self", ".", "targets", ":", "vec", "=", "self", ".", "replacePatterns", "(", "target", ")", "for", "item", "in", "vec", ":", "fp", ".", "write", "(", "\"%f \"", "%", "item", ")", "fp", ".", "write", "(", "\"\\n\"", ")" ]
28.4
0.006826
def finder(self, figsize=(7,7), **kwargs): ''' Plot a finder chart. This *does* create a new figure. ''' try: center = self.meta['center'] radius = self.meta['radius'] except KeyError: return self.allskyfinder(**kwargs) plt.figure(figsize=figsize) scatter = self.plot(**kwargs) plt.xlabel(r'Right Ascension ($^\circ$)'); plt.ylabel(r'Declination ($^\circ$)') #plt.title('{} in {:.1f}'.format(self.name, epoch)) r = radius.to('deg').value plt.xlim(center.ra.deg + r/np.cos(center.dec), center.ra.deg - r/np.cos(center.dec)) plt.ylim(center.dec.deg - r, center.dec.deg + r) ax = plt.gca() ax.set_aspect(1.0/np.cos(center.dec)) return scatter
[ "def", "finder", "(", "self", ",", "figsize", "=", "(", "7", ",", "7", ")", ",", "*", "*", "kwargs", ")", ":", "try", ":", "center", "=", "self", ".", "meta", "[", "'center'", "]", "radius", "=", "self", ".", "meta", "[", "'radius'", "]", "except", "KeyError", ":", "return", "self", ".", "allskyfinder", "(", "*", "*", "kwargs", ")", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "scatter", "=", "self", ".", "plot", "(", "*", "*", "kwargs", ")", "plt", ".", "xlabel", "(", "r'Right Ascension ($^\\circ$)'", ")", "plt", ".", "ylabel", "(", "r'Declination ($^\\circ$)'", ")", "#plt.title('{} in {:.1f}'.format(self.name, epoch))", "r", "=", "radius", ".", "to", "(", "'deg'", ")", ".", "value", "plt", ".", "xlim", "(", "center", ".", "ra", ".", "deg", "+", "r", "/", "np", ".", "cos", "(", "center", ".", "dec", ")", ",", "center", ".", "ra", ".", "deg", "-", "r", "/", "np", ".", "cos", "(", "center", ".", "dec", ")", ")", "plt", ".", "ylim", "(", "center", ".", "dec", ".", "deg", "-", "r", ",", "center", ".", "dec", ".", "deg", "+", "r", ")", "ax", "=", "plt", ".", "gca", "(", ")", "ax", ".", "set_aspect", "(", "1.0", "/", "np", ".", "cos", "(", "center", ".", "dec", ")", ")", "return", "scatter" ]
35.318182
0.008772
def time_to_number(self, time): """ Converts a time instance to a corresponding float value. """ if not isinstance(time, datetime.time): raise TypeError(time) return ((time.second / 60.0 + time.minute) / 60.0 + time.hour) / 24.0
[ "def", "time_to_number", "(", "self", ",", "time", ")", ":", "if", "not", "isinstance", "(", "time", ",", "datetime", ".", "time", ")", ":", "raise", "TypeError", "(", "time", ")", "return", "(", "(", "time", ".", "second", "/", "60.0", "+", "time", ".", "minute", ")", "/", "60.0", "+", "time", ".", "hour", ")", "/", "24.0" ]
39.142857
0.007143
def TP2(dv, u, jac=False): '''Demo problem 2 for horsetail matching, takes two input vectors of size 2 and returns just the qoi if jac is False or the qoi and its gradient if jac is True''' y = dv[0]/2. z = dv[1]/2. + 12 q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) + 0.2*z*u[1]**3 + 7 if not jac: return q else: dqdx1 = (1./8.)*( 2*y/10. ) dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3 return q, [dqdx1, dqdx2]
[ "def", "TP2", "(", "dv", ",", "u", ",", "jac", "=", "False", ")", ":", "y", "=", "dv", "[", "0", "]", "/", "2.", "z", "=", "dv", "[", "1", "]", "/", "2.", "+", "12", "q", "=", "0.25", "*", "(", "(", "y", "**", "2", "+", "z", "**", "2", ")", "/", "10", "+", "5", "*", "u", "[", "0", "]", "*", "u", "[", "1", "]", "-", "z", "*", "u", "[", "1", "]", "**", "2", ")", "+", "0.2", "*", "z", "*", "u", "[", "1", "]", "**", "3", "+", "7", "if", "not", "jac", ":", "return", "q", "else", ":", "dqdx1", "=", "(", "1.", "/", "8.", ")", "*", "(", "2", "*", "y", "/", "10.", ")", "dqdx2", "=", "(", "1.", "/", "8.", ")", "*", "(", "2", "*", "z", "/", "10.", "-", "u", "[", "1", "]", "**", "2", ")", "+", "0.1", "*", "u", "[", "1", "]", "**", "3", "return", "q", ",", "[", "dqdx1", ",", "dqdx2", "]" ]
31.8
0.008147
def _process_command(self, command, name=None): """Process ``make_ndx`` command and return name and temp index file.""" self._command_counter += 1 if name is None: name = "CMD{0:03d}".format(self._command_counter) # Need to build it with two make_ndx calls because I cannot reliably # name the new group without knowing its number. try: fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx', prefix='tmp_'+name+'__') cmd = [command, '', 'q'] # empty command '' necessary to get list # This sometimes fails with 'OSError: Broken Pipe' --- hard to debug rc,out,err = self.make_ndx(o=tmp_ndx, input=cmd) self.check_output(out, "No atoms found for selection {command!r}.".format(**vars()), err=err) # For debugging, look at out and err or set stdout=True, stderr=True # TODO: check ' 0 r_300_&_ALA_&_O : 1 atoms' has at least 1 atom ##print "DEBUG: _process_command()" ##print out groups = parse_ndxlist(out) last = groups[-1] # reduce and name this group fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__') name_cmd = ["keep {0:d}".format(last['nr']), "name 0 {0!s}".format(name), 'q'] rc,out,err = self.make_ndx(n=tmp_ndx, o=ndx, input=name_cmd) finally: utilities.unlink_gmx(tmp_ndx) return name, ndx
[ "def", "_process_command", "(", "self", ",", "command", ",", "name", "=", "None", ")", ":", "self", ".", "_command_counter", "+=", "1", "if", "name", "is", "None", ":", "name", "=", "\"CMD{0:03d}\"", ".", "format", "(", "self", ".", "_command_counter", ")", "# Need to build it with two make_ndx calls because I cannot reliably", "# name the new group without knowing its number.", "try", ":", "fd", ",", "tmp_ndx", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'.ndx'", ",", "prefix", "=", "'tmp_'", "+", "name", "+", "'__'", ")", "cmd", "=", "[", "command", ",", "''", ",", "'q'", "]", "# empty command '' necessary to get list", "# This sometimes fails with 'OSError: Broken Pipe' --- hard to debug", "rc", ",", "out", ",", "err", "=", "self", ".", "make_ndx", "(", "o", "=", "tmp_ndx", ",", "input", "=", "cmd", ")", "self", ".", "check_output", "(", "out", ",", "\"No atoms found for selection {command!r}.\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ",", "err", "=", "err", ")", "# For debugging, look at out and err or set stdout=True, stderr=True", "# TODO: check ' 0 r_300_&_ALA_&_O : 1 atoms' has at least 1 atom", "##print \"DEBUG: _process_command()\"", "##print out", "groups", "=", "parse_ndxlist", "(", "out", ")", "last", "=", "groups", "[", "-", "1", "]", "# reduce and name this group", "fd", ",", "ndx", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'.ndx'", ",", "prefix", "=", "name", "+", "'__'", ")", "name_cmd", "=", "[", "\"keep {0:d}\"", ".", "format", "(", "last", "[", "'nr'", "]", ")", ",", "\"name 0 {0!s}\"", ".", "format", "(", "name", ")", ",", "'q'", "]", "rc", ",", "out", ",", "err", "=", "self", ".", "make_ndx", "(", "n", "=", "tmp_ndx", ",", "o", "=", "ndx", ",", "input", "=", "name_cmd", ")", "finally", ":", "utilities", ".", "unlink_gmx", "(", "tmp_ndx", ")", "return", "name", ",", "ndx" ]
49.2
0.009302
def issuer(self, value): """ An asn1crypto.x509.Certificate object of the issuer. Used to populate both the issuer field, but also the authority key identifier extension. """ is_oscrypto = isinstance(value, asymmetric.Certificate) if not isinstance(value, x509.Certificate) and not is_oscrypto: raise TypeError(_pretty_message( ''' issuer must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._issuer = value.subject self._key_identifier = self._subject_public_key.sha1 self._authority_key_identifier = x509.AuthorityKeyIdentifier({ 'key_identifier': value.public_key.sha1 })
[ "def", "issuer", "(", "self", ",", "value", ")", ":", "is_oscrypto", "=", "isinstance", "(", "value", ",", "asymmetric", ".", "Certificate", ")", "if", "not", "isinstance", "(", "value", ",", "x509", ".", "Certificate", ")", "and", "not", "is_oscrypto", ":", "raise", "TypeError", "(", "_pretty_message", "(", "'''\n issuer must be an instance of asn1crypto.x509.Certificate or\n oscrypto.asymmetric.Certificate, not %s\n '''", ",", "_type_name", "(", "value", ")", ")", ")", "if", "is_oscrypto", ":", "value", "=", "value", ".", "asn1", "self", ".", "_issuer", "=", "value", ".", "subject", "self", ".", "_key_identifier", "=", "self", ".", "_subject_public_key", ".", "sha1", "self", ".", "_authority_key_identifier", "=", "x509", ".", "AuthorityKeyIdentifier", "(", "{", "'key_identifier'", ":", "value", ".", "public_key", ".", "sha1", "}", ")" ]
35.08
0.00222
def empy_hankel(ftype, zsrc, zrec, res, freqtime, depth=None, aniso=None, epermH=None, epermV=None, mpermH=None, mpermV=None, htarg=None, verblhs=0, verbrhs=0): r"""Numerical transform pair with empymod. All parameters except ``ftype``, ``verblhs``, and ``verbrhs`` correspond to the input parameters to ``empymod.dipole``. See there for more information. Note that if depth=None or [], the analytical full-space solutions will be used (much faster). Parameters ---------- ftype : str or list of strings Either of: {'j0', 'j1', 'j2', ['j0', 'j1']} - 'j0': Analyze J0-term with ab=11, angle=45° - 'j1': Analyze J1-term with ab=31, angle=0° - 'j2': Analyze J0- and J1-terms jointly with ab=12, angle=45° - ['j0', 'j1']: Same as calling empy_hankel twice, once with 'j0' and one with 'j1'; can be provided like this to fdesign.design. verblhs, verbrhs: int verb-values provided to empymod for lhs and rhs. Note that ftype='j2' only works for fC, not for fI. """ # Loop over ftypes, if there are several if isinstance(ftype, list): out = [] for f in ftype: out.append(empy_hankel(f, zsrc, zrec, res, freqtime, depth, aniso, epermH, epermV, mpermH, mpermV, htarg, verblhs, verbrhs)) return out # Collect model model = {'src': [0, 0, zsrc], 'depth': depth, 'res': res, 'aniso': aniso, 'epermH': epermH, 'epermV': epermV, 'mpermH': mpermH, 'mpermV': mpermV} # Finalize model depending on ftype if ftype == 'j0': # J0: 11, 45° model['ab'] = 11 x = 1/np.sqrt(2) y = 1/np.sqrt(2) elif ftype == 'j1': # J1: 31, 0° model['ab'] = 31 x = 1 y = 0 elif ftype == 'j2': # J2: 12, 45° model['ab'] = 12 x = 1/np.sqrt(2) y = 1/np.sqrt(2) # rhs: empymod.model.dipole # If depth=[], the analytical full-space solution will be used internally def rhs(r): out = dipole(rec=[r*x, r*y, zrec], ht='qwe', xdirect=True, verb=verbrhs, htarg=htarg, freqtime=freqtime, **model) return out # lhs: empymod.model.dipole_k def lhs(k): lhs0, lhs1 = dipole_k(rec=[x, y, zrec], wavenumber=k, verb=verblhs, freq=freqtime, **model) if ftype == 'j0': return lhs0 elif ftype == 'j1': return lhs1 elif ftype == 'j2': return (lhs0, lhs1) return Ghosh(ftype, lhs, rhs)
[ "def", "empy_hankel", "(", "ftype", ",", "zsrc", ",", "zrec", ",", "res", ",", "freqtime", ",", "depth", "=", "None", ",", "aniso", "=", "None", ",", "epermH", "=", "None", ",", "epermV", "=", "None", ",", "mpermH", "=", "None", ",", "mpermV", "=", "None", ",", "htarg", "=", "None", ",", "verblhs", "=", "0", ",", "verbrhs", "=", "0", ")", ":", "# Loop over ftypes, if there are several", "if", "isinstance", "(", "ftype", ",", "list", ")", ":", "out", "=", "[", "]", "for", "f", "in", "ftype", ":", "out", ".", "append", "(", "empy_hankel", "(", "f", ",", "zsrc", ",", "zrec", ",", "res", ",", "freqtime", ",", "depth", ",", "aniso", ",", "epermH", ",", "epermV", ",", "mpermH", ",", "mpermV", ",", "htarg", ",", "verblhs", ",", "verbrhs", ")", ")", "return", "out", "# Collect model", "model", "=", "{", "'src'", ":", "[", "0", ",", "0", ",", "zsrc", "]", ",", "'depth'", ":", "depth", ",", "'res'", ":", "res", ",", "'aniso'", ":", "aniso", ",", "'epermH'", ":", "epermH", ",", "'epermV'", ":", "epermV", ",", "'mpermH'", ":", "mpermH", ",", "'mpermV'", ":", "mpermV", "}", "# Finalize model depending on ftype", "if", "ftype", "==", "'j0'", ":", "# J0: 11, 45°", "model", "[", "'ab'", "]", "=", "11", "x", "=", "1", "/", "np", ".", "sqrt", "(", "2", ")", "y", "=", "1", "/", "np", ".", "sqrt", "(", "2", ")", "elif", "ftype", "==", "'j1'", ":", "# J1: 31, 0°", "model", "[", "'ab'", "]", "=", "31", "x", "=", "1", "y", "=", "0", "elif", "ftype", "==", "'j2'", ":", "# J2: 12, 45°", "model", "[", "'ab'", "]", "=", "12", "x", "=", "1", "/", "np", ".", "sqrt", "(", "2", ")", "y", "=", "1", "/", "np", ".", "sqrt", "(", "2", ")", "# rhs: empymod.model.dipole", "# If depth=[], the analytical full-space solution will be used internally", "def", "rhs", "(", "r", ")", ":", "out", "=", "dipole", "(", "rec", "=", "[", "r", "*", "x", ",", "r", "*", "y", ",", "zrec", "]", ",", "ht", "=", "'qwe'", ",", "xdirect", "=", "True", ",", "verb", "=", "verbrhs", ",", "htarg", "=", "htarg", ",", "freqtime", "=", "freqtime", ",", "*", "*", "model", ")", "return", "out", "# lhs: empymod.model.dipole_k", "def", "lhs", "(", "k", ")", ":", "lhs0", ",", "lhs1", "=", "dipole_k", "(", "rec", "=", "[", "x", ",", "y", ",", "zrec", "]", ",", "wavenumber", "=", "k", ",", "verb", "=", "verblhs", ",", "freq", "=", "freqtime", ",", "*", "*", "model", ")", "if", "ftype", "==", "'j0'", ":", "return", "lhs0", "elif", "ftype", "==", "'j1'", ":", "return", "lhs1", "elif", "ftype", "==", "'j2'", ":", "return", "(", "lhs0", ",", "lhs1", ")", "return", "Ghosh", "(", "ftype", ",", "lhs", ",", "rhs", ")" ]
32
0.000361
def _handle_bugged_tarfile(self, destination, skip_top_level): """ Handle tar file that tarfile library mistakenly reports as invalid. Happens with tar files created on FAT systems. See: http://stackoverflow.com/questions/25552162/tarfile-readerror-file-could-not-be-opened-successfully """ args = ['tar', '-xzf', self.destination, '-C', destination] if skip_top_level: args.extend(['--strip-components', '1']) subprocess.check_call(args)
[ "def", "_handle_bugged_tarfile", "(", "self", ",", "destination", ",", "skip_top_level", ")", ":", "args", "=", "[", "'tar'", ",", "'-xzf'", ",", "self", ".", "destination", ",", "'-C'", ",", "destination", "]", "if", "skip_top_level", ":", "args", ".", "extend", "(", "[", "'--strip-components'", ",", "'1'", "]", ")", "subprocess", ".", "check_call", "(", "args", ")" ]
50.6
0.003883