text
stringlengths
75
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
0.18
def is_tone(char, strict=True): """ Check whether the character is a tone or word accent symbol. In strict mode return True only for the symbols listed in the last group of the chart. If strict=False, also accept symbols that belong to the Modifier Tone Letters Unicode block [1]. [1]: http://www.unicode.org/charts/PDF/UA700.pdf """ if char in chart.tones: return True if not strict: return 0xA700 <= ord(char) <= 0xA71F return False
[ "def", "is_tone", "(", "char", ",", "strict", "=", "True", ")", ":", "if", "char", "in", "chart", ".", "tones", ":", "return", "True", "if", "not", "strict", ":", "return", "0xA700", "<=", "ord", "(", "char", ")", "<=", "0xA71F", "return", "False" ]
27.25
0.028825
def _listen(self): """Listen for messages passed from parent This method distributes messages received via stdin to their corresponding channel. Based on the format of the incoming message, the message is forwarded to its corresponding channel to be processed by its corresponding handler. """ def _listen(): """This runs in a thread""" for line in iter(sys.stdin.readline, b""): try: response = json.loads(line) except Exception as e: # The parent has passed on a message that # isn't formatted in any particular way. # This is likely a bug. raise e else: if response.get("header") == "pyblish-qml:popen.response": self.channels["response"].put(line) elif response.get("header") == "pyblish-qml:popen.parent": self.channels["parent"].put(line) elif response.get("header") == "pyblish-qml:server.pulse": self._kill.cancel() # reset timer self._self_destruct() else: # The parent has passed on a message that # is JSON, but not in any format we recognise. # This is likely a bug. raise Exception("Unhandled message " "passed to Popen, '%s'" % line) thread = threading.Thread(target=_listen) thread.daemon = True thread.start()
[ "def", "_listen", "(", "self", ")", ":", "def", "_listen", "(", ")", ":", "\"\"\"This runs in a thread\"\"\"", "for", "line", "in", "iter", "(", "sys", ".", "stdin", ".", "readline", ",", "b\"\"", ")", ":", "try", ":", "response", "=", "json", ".", "lo...
38.534884
0.001177
def sum(context, key, value, multiplier=1): """ Adds the given value to the total value currently held in ``key``. Use the multiplier if you want to turn a positive value into a negative and actually substract from the current total sum. Usage:: {% sum "MY_TOTAL" 42 -1 %} {{ MY_TOTAL }} """ if key not in context.dicts[0]: context.dicts[0][key] = 0 context.dicts[0][key] += value * multiplier return ''
[ "def", "sum", "(", "context", ",", "key", ",", "value", ",", "multiplier", "=", "1", ")", ":", "if", "key", "not", "in", "context", ".", "dicts", "[", "0", "]", ":", "context", ".", "dicts", "[", "0", "]", "[", "key", "]", "=", "0", "context", ...
26.529412
0.002141
def date_line_to_text(self, date_line): """ Return the textual representation of the given :class:`~taxi.timesheet.lines.DateLine` instance. The date format is set by the `date_format` parameter given when instanciating the parser instance. """ # Changing the date in a dateline is not supported yet, but if it gets implemented someday this will need to be # changed if date_line._text is not None: return date_line._text else: return date_utils.unicode_strftime(date_line.date, self.date_format)
[ "def", "date_line_to_text", "(", "self", ",", "date_line", ")", ":", "# Changing the date in a dateline is not supported yet, but if it gets implemented someday this will need to be", "# changed", "if", "date_line", ".", "_text", "is", "not", "None", ":", "return", "date_line",...
52.181818
0.010274
def view_graph(graph_str, parent=None, prune_to=None): """View a graph.""" from rezgui.dialogs.ImageViewerDialog import ImageViewerDialog from rez.config import config # check for already written tempfile h = hash((graph_str, prune_to)) filepath = graph_file_lookup.get(h) if filepath and not os.path.exists(filepath): filepath = None # write graph to tempfile if filepath is None: suffix = ".%s" % config.dot_image_format fd, filepath = tempfile.mkstemp(suffix=suffix, prefix="rez-graph-") os.close(fd) dlg = WriteGraphDialog(graph_str, filepath, parent, prune_to=prune_to) if not dlg.write_graph(): return # display graph graph_file_lookup[h] = filepath dlg = ImageViewerDialog(filepath, parent) dlg.exec_()
[ "def", "view_graph", "(", "graph_str", ",", "parent", "=", "None", ",", "prune_to", "=", "None", ")", ":", "from", "rezgui", ".", "dialogs", ".", "ImageViewerDialog", "import", "ImageViewerDialog", "from", "rez", ".", "config", "import", "config", "# check for...
32.04
0.001212
def Stat(self, device_filename): """Get a file's stat() information.""" connection = self.protocol_handler.Open(self._handle, destination=b'sync:') mode, size, mtime = self.filesync_handler.Stat( connection, device_filename) connection.Close() return mode, size, mtime
[ "def", "Stat", "(", "self", ",", "device_filename", ")", ":", "connection", "=", "self", ".", "protocol_handler", ".", "Open", "(", "self", ".", "_handle", ",", "destination", "=", "b'sync:'", ")", "mode", ",", "size", ",", "mtime", "=", "self", ".", "...
44.857143
0.009375
def persist(name, value, config=None): ''' Assign and persist a simple sysctl parameter for this minion. If ``config`` is not specified, a sensible default will be chosen using :mod:`sysctl.default_config <salt.modules.linux_sysctl.default_config>`. CLI Example: .. code-block:: bash salt '*' sysctl.persist net.ipv4.ip_forward 1 ''' if config is None: config = default_config() edited = False # If the sysctl.conf is not present, add it if not os.path.isfile(config): sysctl_dir = os.path.dirname(config) if not os.path.exists(sysctl_dir): os.makedirs(sysctl_dir) try: with salt.utils.files.fopen(config, 'w+') as _fh: _fh.write('#\n# Kernel sysctl configuration\n#\n') except (IOError, OSError): msg = 'Could not write to file: {0}' raise CommandExecutionError(msg.format(config)) # Read the existing sysctl.conf nlines = [] try: with salt.utils.files.fopen(config, 'r') as _fh: # Use readlines because this should be a small file # and it seems unnecessary to indent the below for # loop since it is a fairly large block of code. config_data = salt.utils.data.decode(_fh.readlines()) except (IOError, OSError): msg = 'Could not read from file: {0}' raise CommandExecutionError(msg.format(config)) for line in config_data: if line.startswith('#'): nlines.append(line) continue if '=' not in line: nlines.append(line) continue # Strip trailing whitespace and split the k,v comps = [i.strip() for i in line.split('=', 1)] # On Linux procfs, files such as /proc/sys/net/ipv4/tcp_rmem or any # other sysctl with whitespace in it consistently uses 1 tab. Lets # allow our users to put a space or tab between multi-value sysctls # and have salt not try to set it every single time. if isinstance(comps[1], string_types) and ' ' in comps[1]: comps[1] = re.sub(r'\s+', '\t', comps[1]) # Do the same thing for the value 'just in case' if isinstance(value, string_types) and ' ' in value: value = re.sub(r'\s+', '\t', value) if len(comps) < 2: nlines.append(line) continue if name == comps[0]: # This is the line to edit if six.text_type(comps[1]) == six.text_type(value): # It is correct in the config, check if it is correct in /proc if six.text_type(get(name)) != six.text_type(value): assign(name, value) return 'Updated' else: return 'Already set' nlines.append('{0} = {1}\n'.format(name, value)) edited = True continue else: nlines.append(line) if not edited: nlines.append('{0} = {1}\n'.format(name, value)) try: with salt.utils.files.fopen(config, 'wb') as _fh: _fh.writelines(salt.utils.data.encode(nlines)) except (IOError, OSError): msg = 'Could not write to file: {0}' raise CommandExecutionError(msg.format(config)) assign(name, value) return 'Updated'
[ "def", "persist", "(", "name", ",", "value", ",", "config", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "=", "default_config", "(", ")", "edited", "=", "False", "# If the sysctl.conf is not present, add it", "if", "not", "os", ".", ...
36.444444
0.000297
def normalize_parameters(params): """ Normalize parameters """ params = params or {} normalized_parameters = OrderedDict() def get_value_like_as_php(val): """ Prepare value for quote """ try: base = basestring except NameError: base = (str, bytes) if isinstance(val, base): return val elif isinstance(val, bool): return "1" if val else "" elif isinstance(val, int): return str(val) elif isinstance(val, float): return str(int(val)) if val % 1 == 0 else str(val) else: return "" for key, value in params.items(): value = get_value_like_as_php(value) key = quote(unquote(str(key))).replace("%", "%25") value = quote(unquote(str(value))).replace("%", "%25") normalized_parameters[key] = value return normalized_parameters
[ "def", "normalize_parameters", "(", "params", ")", ":", "params", "=", "params", "or", "{", "}", "normalized_parameters", "=", "OrderedDict", "(", ")", "def", "get_value_like_as_php", "(", "val", ")", ":", "\"\"\" Prepare value for quote \"\"\"", "try", ":", "base...
33.233333
0.001949
def set(self, value): """ Sets the value of the object :param value: A unicode string containing an IPv4 address, IPv4 address with CIDR, an IPv6 address or IPv6 address with CIDR """ if not isinstance(value, str_cls): raise TypeError(unwrap( ''' %s value must be a unicode string, not %s ''', type_name(self), type_name(value) )) original_value = value has_cidr = value.find('/') != -1 cidr = 0 if has_cidr: parts = value.split('/', 1) value = parts[0] cidr = int(parts[1]) if cidr < 0: raise ValueError(unwrap( ''' %s value contains a CIDR range less than 0 ''', type_name(self) )) if value.find(':') != -1: family = socket.AF_INET6 if cidr > 128: raise ValueError(unwrap( ''' %s value contains a CIDR range bigger than 128, the maximum value for an IPv6 address ''', type_name(self) )) cidr_size = 128 else: family = socket.AF_INET if cidr > 32: raise ValueError(unwrap( ''' %s value contains a CIDR range bigger than 32, the maximum value for an IPv4 address ''', type_name(self) )) cidr_size = 32 cidr_bytes = b'' if has_cidr: cidr_mask = '1' * cidr cidr_mask += '0' * (cidr_size - len(cidr_mask)) cidr_bytes = int_to_bytes(int(cidr_mask, 2)) cidr_bytes = (b'\x00' * ((cidr_size // 8) - len(cidr_bytes))) + cidr_bytes self._native = original_value self.contents = inet_pton(family, value) + cidr_bytes self._bytes = self.contents self._header = None if self._trailer != b'': self._trailer = b''
[ "def", "set", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str_cls", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n %s value must be a unicode string, not %s\n '''", ",", "type_name", "...
30.928571
0.001791
def _send_broker_unaware_request(self, payloads, encoder_fn, decoder_fn): """ Attempt to send a broker-agnostic request to one of the available brokers. Keep trying until you succeed. """ hosts = set() for broker in self.brokers.values(): host, port, afi = get_ip_port_afi(broker.host) hosts.add((host, broker.port, afi)) hosts.update(self.hosts) hosts = list(hosts) random.shuffle(hosts) for (host, port, afi) in hosts: try: conn = self._get_conn(host, port, afi) except KafkaConnectionError: log.warning("Skipping unconnected connection: %s:%s (AFI %s)", host, port, afi) continue request = encoder_fn(payloads=payloads) future = conn.send(request) # Block while not future.is_done: for r, f in conn.recv(): f.success(r) if future.failed(): log.error("Request failed: %s", future.exception) continue return decoder_fn(future.value) raise KafkaUnavailableError('All servers failed to process request: %s' % (hosts,))
[ "def", "_send_broker_unaware_request", "(", "self", ",", "payloads", ",", "encoder_fn", ",", "decoder_fn", ")", ":", "hosts", "=", "set", "(", ")", "for", "broker", "in", "self", ".", "brokers", ".", "values", "(", ")", ":", "host", ",", "port", ",", "...
34.444444
0.002353
def get_dump_names(self, names, dumps=None): """ Find and return all dump names required (by dependancies) for a given dump names list Beware, the returned name list does not respect order, you should only use it when walking throught the "original" dict builded by OrderedDict """ # Default value for dumps argument is an empty set (setting directly # as a python argument would result as a shared value between # instances) if dumps is None: dumps = set([]) # Add name to the dumps and find its dependancies for item in names: if item not in self: if not self.silent_key_error: raise KeyError("Dump name '{0}' is unknowed".format(item)) else: continue dumps.add(item) # Add dependancies names to the dumps deps = self.__getitem__(item).get('dependancies', []) dumps.update(deps) # Avoid maximum recursion when we allready find all dependancies if names == dumps: return dumps # Seems we don't have finded other dependancies yet, recurse to do it return self.get_dump_names(dumps.copy(), dumps)
[ "def", "get_dump_names", "(", "self", ",", "names", ",", "dumps", "=", "None", ")", ":", "# Default value for dumps argument is an empty set (setting directly", "# as a python argument would result as a shared value between", "# instances)", "if", "dumps", "is", "None", ":", ...
36.823529
0.001556
def get_or_guess_labels(self, x, kwargs): """ Get the label to use in generating an adversarial example for x. The kwargs are fed directly from the kwargs of the attack. If 'y' is in kwargs, then assume it's an untargeted attack and use that as the label. If 'y_target' is in kwargs and is not none, then assume it's a targeted attack and use that as the label. Otherwise, use the model's prediction as the label and perform an untargeted attack. """ if 'y' in kwargs and 'y_target' in kwargs: raise ValueError("Can not set both 'y' and 'y_target'.") elif 'y' in kwargs: labels = kwargs['y'] elif 'y_target' in kwargs and kwargs['y_target'] is not None: labels = kwargs['y_target'] else: preds = self.model.get_probs(x) preds_max = reduce_max(preds, 1, keepdims=True) original_predictions = tf.to_float(tf.equal(preds, preds_max)) labels = tf.stop_gradient(original_predictions) del preds if isinstance(labels, np.ndarray): nb_classes = labels.shape[1] else: nb_classes = labels.get_shape().as_list()[1] return labels, nb_classes
[ "def", "get_or_guess_labels", "(", "self", ",", "x", ",", "kwargs", ")", ":", "if", "'y'", "in", "kwargs", "and", "'y_target'", "in", "kwargs", ":", "raise", "ValueError", "(", "\"Can not set both 'y' and 'y_target'.\"", ")", "elif", "'y'", "in", "kwargs", ":"...
40.357143
0.009507
def format_as_html(explanation, # type: Explanation include_styles=True, # type: bool force_weights=True, # type: bool show=fields.ALL, preserve_density=None, # type: Optional[bool] highlight_spaces=None, # type: Optional[bool] horizontal_layout=True, # type: bool show_feature_values=False # type: bool ): # type: (...) -> str """ Format explanation as html. Most styles are inline, but some are included separately in <style> tag, you can omit them by passing ``include_styles=False`` and call ``format_html_styles`` to render them separately (or just omit them). With ``force_weights=False``, weights will not be displayed in a table for predictions where it is possible to show feature weights highlighted in the document. If ``highlight_spaces`` is None (default), spaces will be highlighted in feature names only if there are any spaces at the start or at the end of the feature. Setting it to True forces space highlighting, and setting it to False turns it off. If ``horizontal_layout`` is True (default), multiclass classifier weights are laid out horizontally. If ``show_feature_values`` is True, feature values are shown if present. Default is False. """ template = template_env.get_template('explain.html') if highlight_spaces is None: highlight_spaces = should_highlight_spaces(explanation) targets = explanation.targets or [] if len(targets) == 1: horizontal_layout = False explaining_prediction = has_any_values_for_weights(explanation) show_feature_values = show_feature_values and explaining_prediction rendered_weighted_spans = render_targets_weighted_spans( targets, preserve_density) weighted_spans_others = [ t.weighted_spans.other if t.weighted_spans else None for t in targets] return template.render( include_styles=include_styles, force_weights=force_weights, target_table_styles= 'border-collapse: collapse; border: none; margin-top: 0em; table-layout: auto;', tr_styles='border: none;', # Weight (th and td) td1_styles='padding: 0 1em 0 0.5em; text-align: right; border: none;', # N more positive/negative tdm_styles='padding: 0 0.5em 0 0.5em; text-align: center; border: none; ' 'white-space: nowrap;', # Feature (th and td) td2_styles='padding: 0 0.5em 0 0.5em; text-align: left; border: none;', # Value (th and td) td3_styles='padding: 0 0.5em 0 1em; text-align: right; border: none;', horizontal_layout_table_styles= 'border-collapse: collapse; border: none; margin-bottom: 1.5em;', horizontal_layout_td_styles= 'padding: 0px; border: 1px solid black; vertical-align: top;', horizontal_layout_header_styles= 'padding: 0.5em; border: 1px solid black; text-align: center;', show=show, expl=explanation, hl_spaces=highlight_spaces, horizontal_layout=horizontal_layout, any_weighted_spans=any(t.weighted_spans for t in targets), feat_imp_weight_range=max_or_0( abs(fw.weight) for fw in explanation.feature_importances.importances) if explanation.feature_importances else 0, target_weight_range=max_or_0( get_weight_range(t.feature_weights) for t in targets), other_weight_range=max_or_0( get_weight_range(other) for other in weighted_spans_others if other), targets_with_weighted_spans=list( zip(targets, rendered_weighted_spans, weighted_spans_others)), show_feature_values=show_feature_values, weights_table_span=3 if show_feature_values else 2, explaining_prediction=explaining_prediction, weight_help=html_escape(WEIGHT_HELP), contribution_help=html_escape(CONTRIBUTION_HELP), )
[ "def", "format_as_html", "(", "explanation", ",", "# type: Explanation", "include_styles", "=", "True", ",", "# type: bool", "force_weights", "=", "True", ",", "# type: bool", "show", "=", "fields", ".", "ALL", ",", "preserve_density", "=", "None", ",", "# type: O...
48.536585
0.002216
def _add_non_batch(self, TX_nodes, PmtInf_nodes): """ Method to add a transaction as non batch, will fold the transaction together with the payment info node and append to the main xml. """ PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode']) PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node']) PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode']) if 'ReqdExctnDtNode' in PmtInf_nodes: PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode']) PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode']) PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node']) PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode']) if 'BIC' in self._config: PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node']) PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode']) TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node']) TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode']) if TX_nodes['BIC_CdtrAgt_Node'].text is not None: TX_nodes['FinInstnId_CdtrAgt_Node'].append( TX_nodes['BIC_CdtrAgt_Node']) TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode']) TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode']) TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node']) TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode']) TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode']) PmtInf_nodes['PmtInfNode'].append(TX_nodes['CdtTrfTxInfNode']) CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn') CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
[ "def", "_add_non_batch", "(", "self", ",", "TX_nodes", ",", "PmtInf_nodes", ")", ":", "PmtInf_nodes", "[", "'PmtInfNode'", "]", ".", "append", "(", "PmtInf_nodes", "[", "'PmtInfIdNode'", "]", ")", "PmtInf_nodes", "[", "'PmtInfNode'", "]", ".", "append", "(", ...
52.592593
0.001728
def as_xml(self,parent): """ Create XML representation of `self`. :Parameters: - `parent`: the element to which the created node should be linked to. :Types: - `parent`: `libxml2.xmlNode` :return: an XML node. :returntype: `libxml2.xmlNode` """ n=parent.newChild(None,"status",None) n.setProp("code","%03i" % (self.code,)) return n
[ "def", "as_xml", "(", "self", ",", "parent", ")", ":", "n", "=", "parent", ".", "newChild", "(", "None", ",", "\"status\"", ",", "None", ")", "n", ".", "setProp", "(", "\"code\"", ",", "\"%03i\"", "%", "(", "self", ".", "code", ",", ")", ")", "re...
28.2
0.018307
def parse_headers(self, req: Request, name: str, field: Field) -> typing.Any: """Pull a value from the header data.""" return core.get_value(req.headers, name, field)
[ "def", "parse_headers", "(", "self", ",", "req", ":", "Request", ",", "name", ":", "str", ",", "field", ":", "Field", ")", "->", "typing", ".", "Any", ":", "return", "core", ".", "get_value", "(", "req", ".", "headers", ",", "name", ",", "field", "...
60
0.010989
def debug_text_simple(self, text: str): """ Draws a text in the top left corner of the screen (up to a max of 6 messages it seems). Don't forget to add 'await self._client.send_debug'. """ self._debug_texts.append(self.to_debug_message(text))
[ "def", "debug_text_simple", "(", "self", ",", "text", ":", "str", ")", ":", "self", ".", "_debug_texts", ".", "append", "(", "self", ".", "to_debug_message", "(", "text", ")", ")" ]
85.333333
0.011628
def get_read_buffers(self, size): """Get buffer(s) from which we can read data. When done reading, use :meth:`advance_read_index` to make the memory available for writing again. :param size: The number of elements desired. :type size: int :returns: * The number of elements available for reading (which might be less than the requested *size*). * The first buffer. * The second buffer. :rtype: (int, buffer, buffer) """ ptr1 = self._ffi.new('void**') ptr2 = self._ffi.new('void**') size1 = self._ffi.new('ring_buffer_size_t*') size2 = self._ffi.new('ring_buffer_size_t*') return (self._lib.PaUtil_GetRingBufferReadRegions( self._ptr, size, ptr1, size1, ptr2, size2), self._ffi.buffer(ptr1[0], size1[0] * self.elementsize), self._ffi.buffer(ptr2[0], size2[0] * self.elementsize))
[ "def", "get_read_buffers", "(", "self", ",", "size", ")", ":", "ptr1", "=", "self", ".", "_ffi", ".", "new", "(", "'void**'", ")", "ptr2", "=", "self", ".", "_ffi", ".", "new", "(", "'void**'", ")", "size1", "=", "self", ".", "_ffi", ".", "new", ...
38.56
0.002024
def total_size(obj): """Returns the approximate total memory footprint an object.""" seen = set() def sizeof(current_obj): try: return _sizeof(current_obj) except Exception: # pylint: disable=broad-except # Not sure what just happened, but let's assume it's a reference. return struct.calcsize('P') def _sizeof(current_obj): """Do a depth-first acyclic traversal of all reachable objects.""" if id(current_obj) in seen: # A rough approximation of the size cost of an additional reference. return struct.calcsize('P') seen.add(id(current_obj)) size = sys.getsizeof(current_obj) if isinstance(current_obj, dict): size += sum(map(sizeof, itertools.chain.from_iterable( six.iteritems(current_obj)))) elif (isinstance(current_obj, collections.Iterable) and not isinstance(current_obj, six.string_types)): size += sum(sizeof(item) for item in current_obj) elif isinstance(current_obj, records.RecordClass): size += sum(sizeof(getattr(current_obj, attr)) for attr in current_obj.__slots__) return size return sizeof(obj)
[ "def", "total_size", "(", "obj", ")", ":", "seen", "=", "set", "(", ")", "def", "sizeof", "(", "current_obj", ")", ":", "try", ":", "return", "_sizeof", "(", "current_obj", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "# Not sure what jus...
37.466667
0.01301
def RetryOnUnavailable(func): """Function decorator to retry on a service unavailable exception.""" @functools.wraps(func) def Wrapper(*args, **kwargs): while True: try: response = func(*args, **kwargs) except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: time.sleep(5) if (isinstance(e, urlerror.HTTPError) and e.getcode() == httpclient.SERVICE_UNAVAILABLE): continue elif isinstance(e, socket.timeout): continue raise else: if response.getcode() == httpclient.OK: return response else: raise StatusException(response) return Wrapper
[ "def", "RetryOnUnavailable", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "Wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "while", "True", ":", "try", ":", "response", "=", "func", "(", "*", "args"...
30.545455
0.018759
def _execute_callback(self, status, message, job, res, err, stacktrace): """Execute the callback. :param status: Job status. Possible values are "invalid" (job could not be deserialized or was malformed), "failure" (job raised an error), "timeout" (job timed out), or "success" (job finished successfully and returned a result). :type status: str :param message: Kafka message. :type message: :doc:`kq.Message <message>` :param job: Job object, or None if **status** was "invalid". :type job: kq.Job :param res: Job result, or None if an exception was raised. :type res: object | None :param err: Exception raised by job, or None if there was none. :type err: Exception | None :param stacktrace: Exception traceback, or None if there was none. :type stacktrace: str | None """ if self._callback is not None: try: self._logger.info('Executing callback ...') self._callback(status, message, job, res, err, stacktrace) except Exception as e: self._logger.exception( 'Callback raised an exception: {}'.format(e))
[ "def", "_execute_callback", "(", "self", ",", "status", ",", "message", ",", "job", ",", "res", ",", "err", ",", "stacktrace", ")", ":", "if", "self", ".", "_callback", "is", "not", "None", ":", "try", ":", "self", ".", "_logger", ".", "info", "(", ...
47.384615
0.001591
def without(seq1, seq2): r"""Return a list with all elements in `seq2` removed from `seq1`, order preserved. Examples: >>> without([1,2,3,1,2], [1]) [2, 3, 2] """ if isSet(seq2): d2 = seq2 else: d2 = set(seq2) return [elt for elt in seq1 if elt not in d2]
[ "def", "without", "(", "seq1", ",", "seq2", ")", ":", "if", "isSet", "(", "seq2", ")", ":", "d2", "=", "seq2", "else", ":", "d2", "=", "set", "(", "seq2", ")", "return", "[", "elt", "for", "elt", "in", "seq1", "if", "elt", "not", "in", "d2", ...
23.5
0.010239
def add_arguments(self, parser): """Adds the arguments for the emulator command. Args: self (EmulatorCommand): the ``EmulatorCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None`` """ group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-l', '--list', nargs='?', type=str.lower, default='_', choices=['usb', 'ip'], help='list all the connected emulators') group.add_argument('-s', '--supported', nargs=1, help='query whether a device is supported') group.add_argument('-t', '--test', action='store_true', help='perform a self-test') return None
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", "required", "=", "True", ")", "group", ".", "add_argument", "(", "'-l'", ",", "'--list'", ",", "nargs", "=", "'?'", ",", "type",...
41.8
0.002339
def _validate(self): """ The purpose of this method is to verify that the user has set sensible values for the training program before rendering. The user will still be able to render, but error messages will be printed. This method: * Validates that the average intensity is in the range [65, 85]. * Validates that the number of repetitions is in the range [15, 45]. * Validates that 'reps_to_intensity_func' maps to [0, 100]. * Validates that 'reps_to_intensity_func' is a decreasing function. * Validates that the exercises do not grow more than 2.5% per week. Apart from these sanity checks, the user is on his own. """ # Validate the intensity if max([s * self.intensity for s in self._intensity_scalers]) > 85: warnings.warn('\nWARNING: Average intensity is > 85.') if min([s * self.intensity for s in self._intensity_scalers]) < 65: warnings.warn('\nWARNING: Average intensity is < 65.') # Validate the repetitions if max([s * self.reps_per_exercise for s in self._rep_scalers]) > 45: warnings.warn('\nWARNING: Number of repetitions > 45.') if min([s * self.reps_per_exercise for s in self._rep_scalers]) < 15: warnings.warn('\nWARNING: Number of repetitions < 15.') # Validate the 'reps_to_intensity_func' for x1, x2 in zip(range(1, 20), range(2, 21)): y1 = self.reps_to_intensity_func(x1) y2 = self.reps_to_intensity_func(x2) if y1 < y2: warnings.warn("\n'reps_to_intensity_func' is not decreasing.") if any(self.reps_to_intensity_func(x) > 100 for x in range(1, 20)): warnings.warn("\n'reps_to_intensity_func' maps to > 100.") if any(self.reps_to_intensity_func(x) < 0 for x in range(1, 20)): warnings.warn("\n'reps_to_intensity_func' maps to < 0.") # Validate the exercises for day in self.days: for dynamic_ex in day.dynamic_exercises: start, end = dynamic_ex.start_weight, dynamic_ex.final_weight percentage_growth = (end / start) ** (1 / self.duration) percentage_growth = dynamic_ex.weekly_growth(self.duration) if percentage_growth > 4: msg = '\n"{}" grows with {}% each week.'.format( dynamic_ex.name, percentage_growth) warnings.warn(msg)
[ "def", "_validate", "(", "self", ")", ":", "# Validate the intensity", "if", "max", "(", "[", "s", "*", "self", ".", "intensity", "for", "s", "in", "self", ".", "_intensity_scalers", "]", ")", ">", "85", ":", "warnings", ".", "warn", "(", "'\\nWARNING: A...
49.156863
0.001955
def _init_sys_auto_lookup(self): """Return a list of tuples of available init systems on the current machine. Note that in some situations (Ubuntu 14.04 for instance) more than one init system can be found. """ # TODO: Instead, check for executables for systemd and upstart # systemctl for systemd and initctl for upstart. # An alternative might be to check the second answer here: # http://unix.stackexchange.com/questions/196166/how-to-find-out-if-a-system-uses-sysv-upstart-or-systemd-initsystem # TODO: Move to each system's implementation init_systems = [] if self._is_init_system_installed('/usr/lib/systemd'): init_systems.append('systemd') if self._is_init_system_installed('/usr/share/upstart'): init_systems.append('upstart') if self._is_init_system_installed('/etc/init.d'): init_systems.append('sysv') return init_systems
[ "def", "_init_sys_auto_lookup", "(", "self", ")", ":", "# TODO: Instead, check for executables for systemd and upstart", "# systemctl for systemd and initctl for upstart.", "# An alternative might be to check the second answer here:", "# http://unix.stackexchange.com/questions/196166/how-to-find-o...
48.55
0.00202
def set_status(self, value): """ Set the status of the motor to the specified value if not already set. """ if not self._status == value: old = self._status self._status = value logger.info("{} changing status from {} to {}".format(self, old.name, value.name)) self._statusChanged(old, value)
[ "def", "set_status", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "_status", "==", "value", ":", "old", "=", "self", ".", "_status", "self", ".", "_status", "=", "value", "logger", ".", "info", "(", "\"{} changing status from {} to {}\"",...
40.444444
0.008065
def get(self, orig_key): """Get cache entry for key, or return None.""" resp = requests.Response() key = self._clean_key(orig_key) path = os.path.join(self.cache_dir, key) try: with open(path, 'rb') as f: # read lines one at a time while True: line = f.readline().decode('utf8').strip('\r\n') # set headers if self.check_last_modified and re.search("last-modified", line, flags=re.I): # line contains last modified header head_resp = requests.head(orig_key) try: new_lm = head_resp.headers['last-modified'] old_lm = line[string.find(line, ':') + 1:].strip() if old_lm != new_lm: # last modified timestamps don't match, need to download again return None except KeyError: # no last modified header present, so redownload return None header = self._header_re.match(line) if header: resp.headers[header.group(1)] = header.group(2) else: break # everything left is the real content resp._content = f.read() # status & encoding will be in headers, but are faked # need to split spaces out of status to get code (e.g. '200 OK') resp.status_code = int(resp.headers.pop('status').split(' ')[0]) resp.encoding = resp.headers.pop('encoding') resp.url = resp.headers.get('content-location', orig_key) # TODO: resp.request = request return resp except IOError: return None
[ "def", "get", "(", "self", ",", "orig_key", ")", ":", "resp", "=", "requests", ".", "Response", "(", ")", "key", "=", "self", ".", "_clean_key", "(", "orig_key", ")", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", ...
42.444444
0.002047
def store(self, key, pk, value): """Store the value/pk in the sorted set index For the parameters, see BaseRangeIndex.store We simple store the pk as a member of the sorted set with the value being the score """ self.connection.zadd(key, value, pk)
[ "def", "store", "(", "self", ",", "key", ",", "pk", ",", "value", ")", ":", "self", ".", "connection", ".", "zadd", "(", "key", ",", "value", ",", "pk", ")" ]
31.444444
0.010309
def from_xarray(da, crs=None, apply_transform=False, nan_nodata=False, **kwargs): """ Returns an RGB or Image element given an xarray DataArray loaded using xr.open_rasterio. If a crs attribute is present on the loaded data it will attempt to decode it into a cartopy projection otherwise it will default to a non-geographic HoloViews element. Parameters ---------- da: xarray.DataArray DataArray to convert to element crs: Cartopy CRS or EPSG string (optional) Overrides CRS inferred from the data apply_transform: boolean Whether to apply affine transform if defined on the data nan_nodata: boolean If data contains nodata values convert them to NaNs **kwargs: Keyword arguments passed to the HoloViews/GeoViews element Returns ------- element: Image/RGB/QuadMesh element """ if crs: kwargs['crs'] = crs elif hasattr(da, 'crs'): try: kwargs['crs'] = process_crs(da.crs) except: param.main.warning('Could not decode projection from crs string %r, ' 'defaulting to non-geographic element.' % da.crs) coords = list(da.coords) if coords not in (['band', 'y', 'x'], ['y', 'x']): from .element.geo import Dataset, HvDataset el = Dataset if 'crs' in kwargs else HvDataset return el(da, **kwargs) if len(coords) == 2: y, x = coords bands = 1 else: y, x = coords[1:] bands = len(da.coords[coords[0]]) if apply_transform: from affine import Affine transform = Affine.from_gdal(*da.attrs['transform'][:6]) nx, ny = da.sizes[x], da.sizes[y] xs, ys = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform data = (xs, ys) else: xres, yres = da.attrs['res'] if 'res' in da.attrs else (1, 1) xs = da.coords[x][::-1] if xres < 0 else da.coords[x] ys = da.coords[y][::-1] if yres < 0 else da.coords[y] data = (xs, ys) for b in range(bands): values = da[b].values if nan_nodata and da.attrs.get('nodatavals', []): values = values.astype(float) for d in da.attrs['nodatavals']: values[values==d] = np.NaN data += (values,) if 'datatype' not in kwargs: kwargs['datatype'] = ['xarray', 'grid', 'image'] if xs.ndim > 1: from .element.geo import QuadMesh, HvQuadMesh el = QuadMesh if 'crs' in kwargs else HvQuadMesh el = el(data, [x, y], **kwargs) elif bands < 3: from .element.geo import Image, HvImage el = Image if 'crs' in kwargs else HvImage el = el(data, [x, y], **kwargs) else: from .element.geo import RGB, HvRGB el = RGB if 'crs' in kwargs else HvRGB vdims = el.vdims[:bands] el = el(data, [x, y], vdims, **kwargs) if hasattr(el.data, 'attrs'): el.data.attrs = da.attrs return el
[ "def", "from_xarray", "(", "da", ",", "crs", "=", "None", ",", "apply_transform", "=", "False", ",", "nan_nodata", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "crs", ":", "kwargs", "[", "'crs'", "]", "=", "crs", "elif", "hasattr", "(", "...
33.159091
0.001997
def post_document(self, data): """ Create and analyze a new document data -- A Dictionary representing the new document """ data.update({ 'client' : CLIENT }) return self._call('POST', self._generate_url_path('documents'), data)
[ "def", "post_document", "(", "self", ",", "data", ")", ":", "data", ".", "update", "(", "{", "'client'", ":", "CLIENT", "}", ")", "return", "self", ".", "_call", "(", "'POST'", ",", "self", ".", "_generate_url_path", "(", "'documents'", ")", ",", "data...
35.142857
0.015873
def start(self): """ Start animation thread. """ self.thread = threading.Thread(target=self._animate) self.thread.start() return
[ "def", "start", "(", "self", ")", ":", "self", ".", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_animate", ")", "self", ".", "thread", ".", "start", "(", ")", "return" ]
24.285714
0.011364
def _request(self, req_and_resp, **kwargs): """ Take a request_and_response object from pyswagger.App and check auth, token, headers, prepare the actual request and fill the response Note on performance : if you need more performance (because you are using this in a batch) you'd rather set raw_body_only=True, as parsed body is really slow. You'll then have to get data from response.raw and convert it to json using "json.loads(response.raw)" :param req_and_resp: the request and response object from pyswagger.App :param raw_body_only: define if we want the body to be parsed as object instead of staying a raw dict. [Default: False] :param opt: options, see pyswagger/blob/master/pyswagger/io.py#L144 :param raise_on_error: boolean to raise an error if HTTP Code >= 400 :return: the final response. """ opt = kwargs.pop('opt', {}) # reset the request and response to reuse existing req_and_resp req_and_resp[0].reset() req_and_resp[1].reset() # required because of inheritance request, response = super(EsiClient, self).request(req_and_resp, opt) # check cache here so we have all headers, formed url and params cache_key = make_cache_key(request) res = self.__make_request(request, opt, cache_key) if res.status_code == 200: self.__cache_response(cache_key, res, request.method.upper()) # generate the Response object from requests response response.raw_body_only = kwargs.pop( 'raw_body_only', self.raw_body_only ) try: response.apply_with( status=res.status_code, header=res.headers, raw=six.BytesIO(res.content).getvalue() ) except ValueError: # catch JSONDecodeError/ValueError when response is not JSON raise APIException( request.url, res.status_code, response=res.content, request_param=request.query, response_header=res.headers ) if 'warning' in res.headers: # send in logger and warnings, so the user doesn't have to use # logging to see it (at least once) LOGGER.warning("[%s] %s", res.url, res.headers['warning']) warnings.warn("[%s] %s" % (res.url, res.headers['warning'])) if res.status_code >= 400 and kwargs.pop('raise_on_error', False): raise APIException( request.url, res.status_code, response=response.raw, request_param=request.query, response_header=response.header ) return response
[ "def", "_request", "(", "self", ",", "req_and_resp", ",", "*", "*", "kwargs", ")", ":", "opt", "=", "kwargs", ".", "pop", "(", "'opt'", ",", "{", "}", ")", "# reset the request and response to reuse existing req_and_resp\r", "req_and_resp", "[", "0", "]", ".",...
38.891892
0.000678
def node(self, parent=None, tag='g', attrib=None, **extras): """Make a new svg node""" if parent is None: parent = self.root attrib = attrib or {} attrib.update(extras) def in_attrib_and_number(key): return key in attrib and isinstance(attrib[key], Number) for pos, dim in (('x', 'width'), ('y', 'height')): if in_attrib_and_number(dim) and attrib[dim] < 0: attrib[dim] = -attrib[dim] if in_attrib_and_number(pos): attrib[pos] = attrib[pos] - attrib[dim] for key, value in dict(attrib).items(): if value is None: del attrib[key] attrib[key] = to_str(value) if key.endswith('_'): attrib[key.rstrip('_')] = attrib[key] del attrib[key] elif key == 'href': attrib[etree.QName('http://www.w3.org/1999/xlink', key)] = attrib[key] del attrib[key] return etree.SubElement(parent, tag, attrib)
[ "def", "node", "(", "self", ",", "parent", "=", "None", ",", "tag", "=", "'g'", ",", "attrib", "=", "None", ",", "*", "*", "extras", ")", ":", "if", "parent", "is", "None", ":", "parent", "=", "self", ".", "root", "attrib", "=", "attrib", "or", ...
37.068966
0.001813
def predict_variant_effect_on_transcript(variant, transcript): """Return the transcript effect (such as FrameShift) that results from applying this genomic variant to a particular transcript. Parameters ---------- transcript : Transcript Transcript we're going to apply mutation to. """ if transcript.__class__ is not Transcript: raise TypeError( "Expected %s : %s to have type Transcript" % ( transcript, type(transcript))) # check for non-coding transcripts first, since # every non-coding transcript is "incomplete". if not transcript.is_protein_coding: return NoncodingTranscript(variant, transcript) if not transcript.complete: return IncompleteTranscript(variant, transcript) # since we're using inclusive base-1 coordinates, # checking for overlap requires special logic for insertions is_insertion = variant.is_insertion # determine if any exons are deleted, and if not, # what is the closest exon and how far is this variant # from that exon (overlapping the exon = 0 distance) completely_lost_exons = [] # list of which (exon #, Exon) pairs this mutation overlaps overlapping_exon_numbers_and_exons = [] distance_to_nearest_exon = float("inf") start_in_exon = False end_in_exon = False nearest_exon = None variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end for i, exon in enumerate(transcript.exons): if variant_start <= exon.start and variant_end >= exon.end: completely_lost_exons.append(exon) if is_insertion and exon.strand == "+" and variant_end == exon.end: # insertions after an exon don't overlap the exon distance = 1 elif is_insertion and exon.strand == "-" and variant_start == exon.start: distance = 1 else: distance = exon.distance_to_interval(variant_start, variant_end) if distance == 0: overlapping_exon_numbers_and_exons.append((i + 1, exon)) # start is contained in current exon if exon.start <= variant_start <= exon.end: start_in_exon = True # end is contained in current exon if exon.end >= variant_end >= exon.start: end_in_exon = True elif distance < distance_to_nearest_exon: distance_to_nearest_exon = distance nearest_exon = exon if len(overlapping_exon_numbers_and_exons) == 0: intronic_effect_class = choose_intronic_effect_class( variant=variant, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) return intronic_effect_class( variant=variant, transcript=transcript, nearest_exon=nearest_exon, distance_to_exon=distance_to_nearest_exon) elif len(completely_lost_exons) > 0 or ( len(overlapping_exon_numbers_and_exons) > 1): # if spanning multiple exons, or completely deleted an exon # then consider that an ExonLoss mutation exons = [exon for (_, exon) in overlapping_exon_numbers_and_exons] return ExonLoss(variant, transcript, exons) assert len(overlapping_exon_numbers_and_exons) == 1 exon_number, exon = overlapping_exon_numbers_and_exons[0] exonic_effect_annotation = exonic_transcript_effect( variant, exon, exon_number, transcript) # simple case: both start and end are in the same if start_in_exon and end_in_exon: return exonic_effect_annotation elif isinstance(exonic_effect_annotation, ExonicSpliceSite): # if mutation bleeds over into intro but even just # the exonic portion got annotated as an exonic splice site # then return it return exonic_effect_annotation return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=exonic_effect_annotation)
[ "def", "predict_variant_effect_on_transcript", "(", "variant", ",", "transcript", ")", ":", "if", "transcript", ".", "__class__", "is", "not", "Transcript", ":", "raise", "TypeError", "(", "\"Expected %s : %s to have type Transcript\"", "%", "(", "transcript", ",", "t...
40.242991
0.001133
async def print_what_is_playing(loop): """Connect to device and print what is playing.""" details = conf.AppleTV(ADDRESS, NAME) details.add_service(conf.DmapService(HSGID)) print('Connecting to {}'.format(details.address)) atv = pyatv.connect_to_apple_tv(details, loop) try: print((await atv.metadata.playing())) finally: # Do not forget to logout await atv.logout()
[ "async", "def", "print_what_is_playing", "(", "loop", ")", ":", "details", "=", "conf", ".", "AppleTV", "(", "ADDRESS", ",", "NAME", ")", "details", ".", "add_service", "(", "conf", ".", "DmapService", "(", "HSGID", ")", ")", "print", "(", "'Connecting to ...
31.384615
0.002381
def generate_fileattr_metadata(local_path, metadata): # type: (blobxfer.models.upload.LocalPath, dict) -> dict """Generate file attribute metadata dict :param blobxfer.models.upload.LocalPath local_path: local path :param dict metadata: existing metadata dict :rtype: dict :return: merged metadata dictionary """ if blobxfer.util.on_windows(): global _FILEATTR_WARNED_ON_WINDOWS if not _FILEATTR_WARNED_ON_WINDOWS: _FILEATTR_WARNED_ON_WINDOWS = True logger.warning( 'file attributes store/restore on Windows is not ' 'supported yet') return None else: md = { _JSON_KEY_FILE_ATTRIBUTES: { _JSON_KEY_FILE_ATTRIBUTES_POSIX: { _JSON_KEY_FILE_ATTRIBUTES_MODE: local_path.mode, _JSON_KEY_FILE_ATTRIBUTES_UID: local_path.uid, _JSON_KEY_FILE_ATTRIBUTES_GID: local_path.gid, } } } return blobxfer.util.merge_dict(metadata, md)
[ "def", "generate_fileattr_metadata", "(", "local_path", ",", "metadata", ")", ":", "# type: (blobxfer.models.upload.LocalPath, dict) -> dict", "if", "blobxfer", ".", "util", ".", "on_windows", "(", ")", ":", "global", "_FILEATTR_WARNED_ON_WINDOWS", "if", "not", "_FILEATTR...
38.777778
0.000932
def is_docstring(tokens, previous_logical): """Return found docstring 'A docstring is a string literal that occurs as the first statement in a module, function, class,' http://www.python.org/dev/peps/pep-0257/#what-is-a-docstring """ for token_type, text, start, _, _ in tokens: if token_type == tokenize.STRING: break elif token_type != tokenize.INDENT: return False else: return False line = text.lstrip() start, start_triple = _find_first_of(line, START_DOCSTRING_TRIPLE) if (previous_logical.startswith("def ") or previous_logical.startswith("class ")): if start == 0: return text
[ "def", "is_docstring", "(", "tokens", ",", "previous_logical", ")", ":", "for", "token_type", ",", "text", ",", "start", ",", "_", ",", "_", "in", "tokens", ":", "if", "token_type", "==", "tokenize", ".", "STRING", ":", "break", "elif", "token_type", "!=...
34.3
0.001418
def _unpack_msg(self, *msg): """ Convert all message elements to string """ l = [] for m in msg: l.append(str(m)) return " ".join(l)
[ "def", "_unpack_msg", "(", "self", ",", "*", "msg", ")", ":", "l", "=", "[", "]", "for", "m", "in", "msg", ":", "l", ".", "append", "(", "str", "(", "m", ")", ")", "return", "\" \"", ".", "join", "(", "l", ")" ]
23.125
0.015625
def write(self, chunk: Union[str, bytes, dict]) -> None: """Writes the given chunk to the output buffer. To write the output to the network, use the `flush()` method below. If the given chunk is a dictionary, we write it as JSON and set the Content-Type of the response to be ``application/json``. (if you want to send JSON as a different ``Content-Type``, call ``set_header`` *after* calling ``write()``). Note that lists are not converted to JSON because of a potential cross-site security vulnerability. All JSON output should be wrapped in a dictionary. More details at http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and https://github.com/facebook/tornado/issues/1009 """ if self._finished: raise RuntimeError("Cannot write() after finish()") if not isinstance(chunk, (bytes, unicode_type, dict)): message = "write() only accepts bytes, unicode, and dict objects" if isinstance(chunk, list): message += ( ". Lists not accepted for security reasons; see " + "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501 ) raise TypeError(message) if isinstance(chunk, dict): chunk = escape.json_encode(chunk) self.set_header("Content-Type", "application/json; charset=UTF-8") chunk = utf8(chunk) self._write_buffer.append(chunk)
[ "def", "write", "(", "self", ",", "chunk", ":", "Union", "[", "str", ",", "bytes", ",", "dict", "]", ")", "->", "None", ":", "if", "self", ".", "_finished", ":", "raise", "RuntimeError", "(", "\"Cannot write() after finish()\"", ")", "if", "not", "isinst...
49.387097
0.001281
def DateTimeField(formatter=types.DEFAULT_DATETIME_FORMAT, default=NOTHING, required=True, repr=True, cmp=True, key=None): """ Create new datetime field on a model. :param formatter: datetime formatter string (default: "ISO_FORMAT") :param default: any datetime or string that can be converted to a datetime :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict. """ default = _init_fields.init_default(required, default, None) validator = _init_fields.init_validator(required, datetime) converter = converters.to_datetime_field(formatter) return attrib(default=default, converter=converter, validator=validator, repr=repr, cmp=cmp, metadata=dict(formatter=formatter, key=key))
[ "def", "DateTimeField", "(", "formatter", "=", "types", ".", "DEFAULT_DATETIME_FORMAT", ",", "default", "=", "NOTHING", ",", "required", "=", "True", ",", "repr", "=", "True", ",", "cmp", "=", "True", ",", "key", "=", "None", ")", ":", "default", "=", ...
54.888889
0.000995
def createStyle(self, body, verbose=None): """ Creates a new Visual Style using the message body. Returns the title of the new Visual Style. If the title of the Visual Style already existed in the session, a new one will be automatically generated and returned. :param body: The details of the new Visual Style to be created. :param verbose: print more :returns: 200: successful operation """ PARAMS=set_param(['body'],[body]) response=api(url=self.___url+'styles', PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "createStyle", "(", "self", ",", "body", ",", "verbose", "=", "None", ")", ":", "PARAMS", "=", "set_param", "(", "[", "'body'", "]", ",", "[", "body", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'styles'",...
40.4
0.012903
def obfn_reg(self): r"""Compute regularisation term, :math:`\| x \|_1`, and contribution to objective function. """ l1 = np.sum(mp_wl1*np.abs(self.obfn_gvar())) return (self.lmbda*l1, l1)
[ "def", "obfn_reg", "(", "self", ")", ":", "l1", "=", "np", ".", "sum", "(", "mp_wl1", "*", "np", ".", "abs", "(", "self", ".", "obfn_gvar", "(", ")", ")", ")", "return", "(", "self", ".", "lmbda", "*", "l1", ",", "l1", ")" ]
37
0.008811
def scaled_pressure2_encode(self, time_boot_ms, press_abs, press_diff, temperature): ''' Barometer readings for 2nd barometer time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) press_abs : Absolute pressure (hectopascal) (float) press_diff : Differential pressure 1 (hectopascal) (float) temperature : Temperature measurement (0.01 degrees celsius) (int16_t) ''' return MAVLink_scaled_pressure2_message(time_boot_ms, press_abs, press_diff, temperature)
[ "def", "scaled_pressure2_encode", "(", "self", ",", "time_boot_ms", ",", "press_abs", ",", "press_diff", ",", "temperature", ")", ":", "return", "MAVLink_scaled_pressure2_message", "(", "time_boot_ms", ",", "press_abs", ",", "press_diff", ",", "temperature", ")" ]
58.909091
0.012158
def tob32(val): """Return provided 32 bit value as a string of four bytes.""" ret = bytearray(4) ret[0] = (val>>24)&M8 ret[1] = (val>>16)&M8 ret[2] = (val>>8)&M8 ret[3] = val&M8 return ret
[ "def", "tob32", "(", "val", ")", ":", "ret", "=", "bytearray", "(", "4", ")", "ret", "[", "0", "]", "=", "(", "val", ">>", "24", ")", "&", "M8", "ret", "[", "1", "]", "=", "(", "val", ">>", "16", ")", "&", "M8", "ret", "[", "2", "]", "=...
26.125
0.037037
def html_table_from_query(rows: Iterable[Iterable[Optional[str]]], descriptions: Iterable[Optional[str]]) -> str: """ Converts rows from an SQL query result to an HTML table. Suitable for processing output from the defunct function ``rnc_db.fetchall_with_fieldnames(sql)``. """ html = u"<table>\n" # Header row html += u"<tr>" for x in descriptions: if x is None: x = u"" html += u"<th>{}</th>".format(webify(x)) html += u"</tr>\n" # Data rows for row in rows: html += u"<tr>" for x in row: if x is None: x = u"" html += u"<td>{}</td>".format(webify(x)) html += u"<tr>\n" html += u"</table>\n" return html
[ "def", "html_table_from_query", "(", "rows", ":", "Iterable", "[", "Iterable", "[", "Optional", "[", "str", "]", "]", "]", ",", "descriptions", ":", "Iterable", "[", "Optional", "[", "str", "]", "]", ")", "->", "str", ":", "html", "=", "u\"<table>\\n\"",...
26.964286
0.001279
def squared_error(y, y_pred): """Calculates the sum of the squared differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the sum of the squared differences between target and prediction """ y, y_pred = convert_assert(y, y_pred) return np.sum((y - y_pred) ** 2)
[ "def", "squared_error", "(", "y", ",", "y_pred", ")", ":", "y", ",", "y_pred", "=", "convert_assert", "(", "y", ",", "y_pred", ")", "return", "np", ".", "sum", "(", "(", "y", "-", "y_pred", ")", "**", "2", ")" ]
22.761905
0.008032
def eigenvalues(T, k=None, reversible=False, mu=None): r"""Compute eigenvalues of given transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T reversible : bool, optional Indicate that transition matrix is reversible mu : (d,) ndarray, optional Stationary distribution of T Returns ------- eig : (n,) ndarray, The eigenvalues of T ordered with decreasing absolute value. If k is None then n=d, if k is int then n=k otherwise n is the length of the given tuple of eigenvalue indices. Notes ----- Eigenvalues are computed using the numpy.linalg interface for the corresponding LAPACK routines. If reversible=True the the eigenvalues of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be computed. The precomputed stationary distribution will only be used if reversible=True. """ if reversible: try: evals = eigenvalues_rev(T, k=k, mu=mu) except: evals = eigvals(T).real # use fallback code but cast to real else: evals = eigvals(T) # nonreversible """Sort by decreasing absolute value""" ind = np.argsort(np.abs(evals))[::-1] evals = evals[ind] if isinstance(k, (list, set, tuple)): try: return [evals[n] for n in k] except IndexError: raise ValueError("given indices do not exist: ", k) elif k is not None: return evals[: k] else: return evals
[ "def", "eigenvalues", "(", "T", ",", "k", "=", "None", ",", "reversible", "=", "False", ",", "mu", "=", "None", ")", ":", "if", "reversible", ":", "try", ":", "evals", "=", "eigenvalues_rev", "(", "T", ",", "k", "=", "k", ",", "mu", "=", "mu", ...
29.592593
0.001211
def stage_pywbem_args(self, method, **kwargs): """ Log request method and all args. Normally called before the cmd is executed to record request parameters. This method does not support the summary detail_level because that seems to add little info to the log that is not also in the response. """ # pylint: disable=attribute-defined-outside-init self._pywbem_method = method if self.enabled and self.api_detail_level is not None and \ self.apilogger.isEnabledFor(logging.DEBUG): # TODO: future bypassed code to only ouput name and method if the # detail is summary. We are not doing this because this is # effectively the same information in the response so the only # additional infomation is the time stamp. # if self.api_detail_level == summary: # self.apilogger.debug('Request:%s %s', self._conn_id, method) # return # Order kwargs. Note that this is done automatically starting # with python 3.6 kwstr = ', '.join([('{0}={1!r}'.format(key, kwargs[key])) for key in sorted(six.iterkeys(kwargs))]) if self.api_maxlen and (len(kwstr) > self.api_maxlen): kwstr = kwstr[:self.api_maxlen] + '...' # pylint: disable=bad-continuation self.apilogger.debug('Request:%s %s(%s)', self._conn_id, method, kwstr)
[ "def", "stage_pywbem_args", "(", "self", ",", "method", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=attribute-defined-outside-init", "self", ".", "_pywbem_method", "=", "method", "if", "self", ".", "enabled", "and", "self", ".", "api_detail_level", "is",...
46.151515
0.001286
def weather_at_places(self, pattern, searchtype, limit=None): """ Queries the OWM Weather API for the currently observed weather in all the locations whose name is matching the specified text search parameters. A twofold search can be issued: *'accurate'* (exact matching) and *'like'* (matches names that are similar to the supplied pattern). :param pattern: the string pattern (not a regex) to be searched for the toponym :type pattern: str :param searchtype: the search mode to be used, must be *'accurate'* for an exact matching or *'like'* for a likelihood matching :type: searchtype: str :param limit: the maximum number of *Observation* items in the returned list (default is ``None``, which stands for any number of items) :param limit: int or ``None`` :returns: a list of *Observation* objects or ``None`` if no weather data is available :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* when bad value is supplied for the search type or the maximum number of items retrieved """ assert isinstance(pattern, str), "'pattern' must be a str" assert isinstance(searchtype, str), "'searchtype' must be a str" if searchtype != "accurate" and searchtype != "like": raise ValueError("'searchtype' value must be 'accurate' or 'like'") if limit is not None: assert isinstance(limit, int), "'limit' must be an int or None" if limit < 1: raise ValueError("'limit' must be None or greater than zero") params = {'q': pattern, 'type': searchtype, 'lang': self._language} if limit is not None: # fix for OWM 2.5 API bug! params['cnt'] = limit - 1 uri = http_client.HttpClient.to_url(FIND_OBSERVATIONS_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) return self._parsers['observation_list'].parse_JSON(json_data)
[ "def", "weather_at_places", "(", "self", ",", "pattern", ",", "searchtype", ",", "limit", "=", "None", ")", ":", "assert", "isinstance", "(", "pattern", ",", "str", ")", ",", "\"'pattern' must be a str\"", "assert", "isinstance", "(", "searchtype", ",", "str",...
56.780488
0.001689
def hover(self): """ Hovers the element """ def do_hover(): """ Perform hover """ ActionChains(self.driver_wrapper.driver).move_to_element(self.element).perform() return self.execute_and_handle_webelement_exceptions(do_hover, 'hover')
[ "def", "hover", "(", "self", ")", ":", "def", "do_hover", "(", ")", ":", "\"\"\"\n Perform hover\n \"\"\"", "ActionChains", "(", "self", ".", "driver_wrapper", ".", "driver", ")", ".", "move_to_element", "(", "self", ".", "element", ")", "....
31.3
0.009317
def new_mountpoint(self, name): """Create a new mountpoint""" url = posixpath.join(self.path, name) r = self._jfs.post(url, extra_headers={'content-type': 'application/x-www-form-urlencoded'}) return r
[ "def", "new_mountpoint", "(", "self", ",", "name", ")", ":", "url", "=", "posixpath", ".", "join", "(", "self", ".", "path", ",", "name", ")", "r", "=", "self", ".", "_jfs", ".", "post", "(", "url", ",", "extra_headers", "=", "{", "'content-type'", ...
45.8
0.012876
def have_same_structure(d1, d2): """ Given two dictionaries (possibly with other nested dictionaries as values), this function checks whether they have the same key structure. >>> from sem import DatabaseManager >>> d1 = {'a': 1, 'b': 2} >>> d2 = {'a': [], 'b': 3} >>> d3 = {'a': 4, 'c': 5} >>> DatabaseManager.have_same_structure(d1, d2) True >>> DatabaseManager.have_same_structure(d1, d3) False >>> d4 = {'a': {'c': 1}, 'b': 2} >>> d5 = {'a': {'c': 3}, 'b': 4} >>> d6 = {'a': {'c': 5, 'd': 6}, 'b': 7} >>> DatabaseManager.have_same_structure(d1, d4) False >>> DatabaseManager.have_same_structure(d4, d5) True >>> DatabaseManager.have_same_structure(d4, d6) False """ # Keys of this level are the same if set(d1.keys()) != set(d2.keys()): return False # Check nested dictionaries for k1, k2 in zip(sorted(d1.keys()), sorted(d2.keys())): # If one of the values is a dictionary and the other is not if isinstance(d1[k1], dict) != isinstance(d2[k2], dict): return False # If both are dictionaries, recur elif isinstance(d1[k1], dict) and isinstance(d2[k2], dict): if not DatabaseManager.have_same_structure(d1[k1], d2[k2]): return False return True
[ "def", "have_same_structure", "(", "d1", ",", "d2", ")", ":", "# Keys of this level are the same", "if", "set", "(", "d1", ".", "keys", "(", ")", ")", "!=", "set", "(", "d2", ".", "keys", "(", ")", ")", ":", "return", "False", "# Check nested dictionaries"...
36.74359
0.00136
def _ord2ymd(n): "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." # n is a 1-based index, starting at 1-Jan-1. The pattern of leap years # repeats exactly every 400 years. The basic strategy is to find the # closest 400-year boundary at or before n, then work with the offset # from that boundary to n. Life is much clearer if we subtract 1 from # n first -- then the values of n at 400-year boundaries are exactly # those divisible by _DI400Y: # # D M Y n n-1 # -- --- ---- ---------- ---------------- # 31 Dec -400 -_DI400Y -_DI400Y -1 # 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary # ... # 30 Dec 000 -1 -2 # 31 Dec 000 0 -1 # 1 Jan 001 1 0 400-year boundary # 2 Jan 001 2 1 # 3 Jan 001 3 2 # ... # 31 Dec 400 _DI400Y _DI400Y -1 # 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary n -= 1 n400, n = divmod(n, _DI400Y) year = n400 * 400 + 1 # ..., -399, 1, 401, ... # Now n is the (non-negative) offset, in days, from January 1 of year, to # the desired date. Now compute how many 100-year cycles precede n. # Note that it's possible for n100 to equal 4! In that case 4 full # 100-year cycles precede the desired day, which implies the desired # day is December 31 at the end of a 400-year cycle. n100, n = divmod(n, _DI100Y) # Now compute how many 4-year cycles precede it. n4, n = divmod(n, _DI4Y) # And now how many single years. Again n1 can be 4, and again meaning # that the desired day is December 31 at the end of the 4-year cycle. n1, n = divmod(n, 365) year += n100 * 100 + n4 * 4 + n1 if n1 == 4 or n100 == 4: assert n == 0 return year-1, 12, 31 # Now the year is correct, and n is the offset from January 1. We find # the month via an estimate that's either exact or one too large. leapyear = n1 == 3 and (n4 != 24 or n100 == 3) assert leapyear == _is_leap(year) month = (n + 50) >> 5 preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) if preceding > n: # estimate is too large month -= 1 preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) n -= preceding assert 0 <= n < _days_in_month(year, month) # Now the year and month are correct, and n is the offset from the # start of that month: we're done! return year, month, n+1
[ "def", "_ord2ymd", "(", "n", ")", ":", "# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years", "# repeats exactly every 400 years. The basic strategy is to find the", "# closest 400-year boundary at or before n, then work with the offset", "# from that boundary to n. Life is m...
43.344262
0.00037
def is_rigid(matrix): """ Check to make sure a homogeonous transformation matrix is a rigid body transform. Parameters ----------- matrix: possibly a transformation matrix Returns ----------- check: bool, True if matrix is a valid (4,4) rigid body transform. """ matrix = np.asanyarray(matrix, dtype=np.float64) if matrix.shape != (4, 4): return False if not np.allclose(matrix[-1], [0, 0, 0, 1]): return False check = np.dot(matrix[:3, :3], matrix[:3, :3].T) return np.allclose(check, np.eye(3))
[ "def", "is_rigid", "(", "matrix", ")", ":", "matrix", "=", "np", ".", "asanyarray", "(", "matrix", ",", "dtype", "=", "np", ".", "float64", ")", "if", "matrix", ".", "shape", "!=", "(", "4", ",", "4", ")", ":", "return", "False", "if", "not", "np...
22.038462
0.001672
def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] rel = '' if src is None: return rel if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: e = 'Could not derive openstack release for '\ 'this Ubuntu release: %s' % ubuntu_rel error_out(e) return rel if src.startswith('cloud:'): ca_rel = src.split(':')[1] ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided if (src.startswith('deb') or src.startswith('ppa') or src.startswith('snap')): for v in OPENSTACK_CODENAMES.values(): if v in src: return v
[ "def", "get_os_codename_install_source", "(", "src", ")", ":", "ubuntu_rel", "=", "lsb_release", "(", ")", "[", "'DISTRIB_CODENAME'", "]", "rel", "=", "''", "if", "src", "is", "None", ":", "return", "rel", "if", "src", "in", "[", "'distro'", ",", "'distro-...
33.444444
0.001076
def join(self, glue=" "): """ Javascript's join implementation """ j = glue.join([str(x) for x in self.obj]) return self._wrap(j)
[ "def", "join", "(", "self", ",", "glue", "=", "\" \"", ")", ":", "j", "=", "glue", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "self", ".", "obj", "]", ")", "return", "self", ".", "_wrap", "(", "j", ")" ]
31.4
0.012422
def handle_input(self, input_hdr): """ This method tries to ensure that the input data has the correct dimensions. INPUTS: input_hdr (no default) Header from which data shape is to be extracted. """ input_slice = input_hdr['NAXIS']*[0] for i in range(input_hdr['NAXIS']): if input_hdr['CTYPE%d'%(i+1)].startswith("RA"): input_slice[-1] = slice(None) if input_hdr['CTYPE%d'%(i+1)].startswith("DEC"): input_slice[-2] = slice(None) return input_slice
[ "def", "handle_input", "(", "self", ",", "input_hdr", ")", ":", "input_slice", "=", "input_hdr", "[", "'NAXIS'", "]", "*", "[", "0", "]", "for", "i", "in", "range", "(", "input_hdr", "[", "'NAXIS'", "]", ")", ":", "if", "input_hdr", "[", "'CTYPE%d'", ...
32.941176
0.010417
def _auth(profile=None): ''' Set up neutron credentials ''' credentials = __salt__['config.option'](profile) kwargs = { 'username': credentials['keystone.user'], 'password': credentials['keystone.password'], 'tenant_name': credentials['keystone.tenant'], 'auth_url': credentials['keystone.auth_url'], 'region_name': credentials.get('keystone.region_name', None), 'service_type': credentials['keystone.service_type'], } return suoneu.SaltNeutron(**kwargs)
[ "def", "_auth", "(", "profile", "=", "None", ")", ":", "credentials", "=", "__salt__", "[", "'config.option'", "]", "(", "profile", ")", "kwargs", "=", "{", "'username'", ":", "credentials", "[", "'keystone.user'", "]", ",", "'password'", ":", "credentials",...
34.466667
0.001883
def LL(n): """constructs the LL context""" if (n<=0):return Context('0') else: LL1=LL(n-1) r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1 r2 = LL1 - LL1 - LL1 return r1 + r2
[ "def", "LL", "(", "n", ")", ":", "if", "(", "n", "<=", "0", ")", ":", "return", "Context", "(", "'0'", ")", "else", ":", "LL1", "=", "LL", "(", "n", "-", "1", ")", "r1", "=", "C1", "(", "3", "**", "(", "n", "-", "1", ")", ",", "2", "*...
25.25
0.028708
def decrypt(receiver_prvhex: str, msg: bytes) -> bytes: """ Decrypt with eth private key Parameters ---------- receiver_pubhex: str Receiver's ethereum private key hex string msg: bytes Data to decrypt Returns ------- bytes Plain text """ pubkey = msg[0:65] # pubkey's length is 65 bytes encrypted = msg[65:] sender_public_key = hex2pub(pubkey.hex()) private_key = hex2prv(receiver_prvhex) aes_key = derive(private_key, sender_public_key) return aes_decrypt(aes_key, encrypted)
[ "def", "decrypt", "(", "receiver_prvhex", ":", "str", ",", "msg", ":", "bytes", ")", "->", "bytes", ":", "pubkey", "=", "msg", "[", "0", ":", "65", "]", "# pubkey's length is 65 bytes", "encrypted", "=", "msg", "[", "65", ":", "]", "sender_public_key", "...
24.863636
0.001761
def reset (self): """ Initialize FTP url data. """ super(FtpUrl, self).reset() # list of files for recursion self.files = [] # last part of URL filename self.filename = None self.filename_encoding = 'iso-8859-1'
[ "def", "reset", "(", "self", ")", ":", "super", "(", "FtpUrl", ",", "self", ")", ".", "reset", "(", ")", "# list of files for recursion", "self", ".", "files", "=", "[", "]", "# last part of URL filename", "self", ".", "filename", "=", "None", "self", ".",...
27.4
0.010601
def get_all(jail=None): ''' Return a list of all available services .. versionchanged:: 2016.3.4 jail: optional jid or jail name CLI Example: .. code-block:: bash salt '*' service.get_all ''' ret = [] service = _cmd(jail) for srv in __salt__['cmd.run']('{0} -l'.format(service)).splitlines(): if not srv.isupper(): ret.append(srv) return sorted(ret)
[ "def", "get_all", "(", "jail", "=", "None", ")", ":", "ret", "=", "[", "]", "service", "=", "_cmd", "(", "jail", ")", "for", "srv", "in", "__salt__", "[", "'cmd.run'", "]", "(", "'{0} -l'", ".", "format", "(", "service", ")", ")", ".", "splitlines"...
20.35
0.002347
def computePCsPlink(plink_path,k,out_dir,bfile,ffile): """ computing the covariance matrix via plink """ print("Using plink to compute principal components") cmd = '%s --bfile %s --pca %d '%(plink_path,bfile,k) cmd+= '--out %s'%(os.path.join(out_dir,'plink')) subprocess.call(cmd,shell=True) plink_fn = os.path.join(out_dir, 'plink.eigenvec') M = sp.loadtxt(plink_fn,dtype=str) U = sp.array(M[:,2:],dtype=float) U-= U.mean(0) U/= U.std(0) sp.savetxt(ffile,U)
[ "def", "computePCsPlink", "(", "plink_path", ",", "k", ",", "out_dir", ",", "bfile", ",", "ffile", ")", ":", "print", "(", "\"Using plink to compute principal components\"", ")", "cmd", "=", "'%s --bfile %s --pca %d '", "%", "(", "plink_path", ",", "bfile", ",", ...
35.5
0.035294
def detect(self, G): """Detect a single core-periphery pair using the Borgatti-Everett algorithm. Parameters ---------- G : NetworkX graph object Examples -------- >>> import networkx as nx >>> import cpalgorithm as cpa >>> G = nx.karate_club_graph() # load the karate club network. >>> be = cpa.BE() >>> be.detect(G) """ node_pairs, w, node2id, id2node = self._to_edge_list(G) cppairs = _cp.detect_be(edges=node_pairs, ws=w, num_of_runs = self.num_runs) N = len(id2node) self.c_ = dict(zip( [id2node[i] for i in range(N)], cppairs[0].astype(int))) self.x_ = dict(zip( [id2node[i] for i in range(N)], cppairs[1])) self.Q_ = cppairs[2][0] self.qs_ = cppairs[3].tolist()
[ "def", "detect", "(", "self", ",", "G", ")", ":", "node_pairs", ",", "w", ",", "node2id", ",", "id2node", "=", "self", ".", "_to_edge_list", "(", "G", ")", "cppairs", "=", "_cp", ".", "detect_be", "(", "edges", "=", "node_pairs", ",", "ws", "=", "w...
26.730769
0.045833
def display_event(div, attributes=[]): """ Function to build a suitable CustomJS to display the current event in the div model. """ style = 'float: left; clear: left; font-size: 10pt' return CustomJS(args=dict(div=div), code=""" var attrs = %s; var args = []; for (var i = 0; i<attrs.length; i++ ) { var val = JSON.stringify(cb_obj[attrs[i]], function(key, val) { return val.toFixed ? Number(val.toFixed(2)) : val; }) args.push(attrs[i] + '=' + val) } var line = "<span style=%r><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n"; var text = div.text.concat(line); var lines = text.split("\\n") if (lines.length > 35) lines.shift(); div.text = lines.join("\\n"); """ % (attributes, style))
[ "def", "display_event", "(", "div", ",", "attributes", "=", "[", "]", ")", ":", "style", "=", "'float: left; clear: left; font-size: 10pt'", "return", "CustomJS", "(", "args", "=", "dict", "(", "div", "=", "div", ")", ",", "code", "=", "\"\"\"\n var att...
38.636364
0.002296
def add_server(self,address,port=default_port,password=None,speed=None,valid_times=None,invalid_times=None): ''' :address: remote address of server, or special string ``local`` to run the command locally :valid_times: times when this server is available, given as a list of tuples of 2 strings of form "HH:MM" that define the start and end times. Alternatively, a list of 7 lists can be given to define times on a per-day-of-week basis E.g.,:: [('4:30','14:30'),('17:00','23:00')] # or [ [('4:30','14:30'),('17:00','23:00')], # S [('4:30','14:30'),('17:00','23:00')], # M [('4:30','14:30'),('17:00','23:00')], # T [('4:30','14:30'),('17:00','23:00')], # W [('4:30','14:30'),('17:00','23:00')], # R [('4:30','14:30'),('17:00','23:00')], # F [('4:30','14:30'),('17:00','23:00')] # S ] :invalid_times: uses the same format as ``valid_times`` but defines times when the server should not be used ''' for t in [valid_times,invalid_times]: if t: if not (self._is_list_of_tuples(t) or self._is_list_of_tuples(t,True)): raise ValueError('valid_times and invalid_times must either be lists of strings or lists') self.servers.append({ 'address':address, 'port':port, 'password':password, 'speed':speed, 'valid_times':valid_times, 'invalid_times':invalid_times })
[ "def", "add_server", "(", "self", ",", "address", ",", "port", "=", "default_port", ",", "password", "=", "None", ",", "speed", "=", "None", ",", "valid_times", "=", "None", ",", "invalid_times", "=", "None", ")", ":", "for", "t", "in", "[", "valid_tim...
48.351351
0.014247
def get(self, jid=None): # pylint: disable=W0221 ''' A convenience URL for getting lists of previously run jobs or getting the return from a single job .. http:get:: /jobs/(jid) List jobs or show a single job from the job cache. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/jobs .. code-block:: text GET /jobs HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: - '20121130104633606931': Arguments: - '3' Function: test.fib Start Time: 2012, Nov 30 10:46:33.606931 Target: jerry Target-type: glob **Example request:** .. code-block:: bash curl -i localhost:8000/jobs/20121130104633606931 .. code-block:: text GET /jobs/20121130104633606931 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml info: - Arguments: - '3' Function: test.fib Minions: - jerry Start Time: 2012, Nov 30 10:46:33.606931 Target: '*' Target-type: glob User: saltdev jid: '20121130104633606931' return: - jerry: - - 0 - 1 - 1 - 2 - 6.9141387939453125e-06 ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return if jid: self.lowstate = [{ 'fun': 'jobs.list_job', 'jid': jid, 'client': 'runner', }] else: self.lowstate = [{ 'fun': 'jobs.list_jobs', 'client': 'runner', }] self.disbatch()
[ "def", "get", "(", "self", ",", "jid", "=", "None", ")", ":", "# pylint: disable=W0221", "# if you aren't authenticated, redirect to login", "if", "not", "self", ".", "_verify_auth", "(", ")", ":", "self", ".", "redirect", "(", "'/login'", ")", "return", "if", ...
24
0.000808
def train_transform(self, images, surpress_mapping_check = False): r""" See also -------- train, transform """ ret = self.train(images) outputs = [self.transform(i, surpress_mapping_check) for i in images] return ret, outputs
[ "def", "train_transform", "(", "self", ",", "images", ",", "surpress_mapping_check", "=", "False", ")", ":", "ret", "=", "self", ".", "train", "(", "images", ")", "outputs", "=", "[", "self", ".", "transform", "(", "i", ",", "surpress_mapping_check", ")", ...
31.222222
0.013841
def queriessam2dalignbed(cfg): """ Processes SAM file to get the genomic coordinates in BED format step#2 :param cfg: configuration dict """ datatmpd=cfg['datatmpd'] alignmentbedp=cfg['alignmentbedp'] dalignbedp=cfg['dalignbedp'] logging.info(basename(dalignbedp)) if not exists(alignmentbedp) or cfg['force']: #input/s queriessamps=glob(f'{datatmpd}/01_queries_queryl*.sam') for queriessamp in queriessamps: if stat(queriessamp).st_size != 0: samfile=pysam.AlignmentFile(queriessamp, "rb") dalignbed=pd.DataFrame(columns=bed_colns) for read in samfile.fetch(): algnids=[] # read_position=read.positions[0] # tag_NM=read.get_tag('NM') if len(read.positions)!=0: read_position=read.positions[0] else: logging.error('no alignments found') print(read) continue # read_position=[None] try: tag_NM=read.get_tag('NM') except: logging.error('no NM tag found') print(read) continue # tag_NM=None algnids.append(f"{read.reference_name}|{'-' if read.is_reverse else '+'}{read_position}|{read.cigarstring}|{tag_NM}") if read.has_tag('XA'): algnids+=['|'.join(s.split(',')) for s in read.get_tag('XA').split(';') if len(s.split(','))>1] # print(len(algnids)) chroms=[] starts=[] ends=[] algnids=algnids[:20] NMs=[] strands=[] for a in algnids: strand=a.split('|')[1][0] chroms.append(a.split('|')[0]) if strand=='+': offset=0 elif strand=='-': offset=0 starts.append(int(a.split('|')[1][1:])+offset) ends.append(int(a.split('|')[1][1:])+str2num(a.split('|')[2])+offset) NMs.append(a.split('|')[3]) strands.append(strand) del strand,offset col2dalignbed={'chromosome':chroms, 'start':starts, 'end':ends, 'id':algnids, 'NM':NMs, 'strand':strands} # col2dalignbed=dict(zip(cols,[a.split('|')[0],a.split('|')[1],a.split('|')[2],a,a.split('|')[3],a.split('|')[4] for a in algnids])) dalignbed_=pd.DataFrame(col2dalignbed) dalignbed_['query id']=read.qname.replace('_',' ') dalignbed = dalignbed.append(dalignbed_,ignore_index=True,sort=True) # break samfile.close() else: logging.warning(f"file is empty: {queriessamp}") dalignbed.to_csv(dalignbedp,sep='\t') from rohan.dandage.io_nums import str2numorstr dalignbed['chromosome']=dalignbed.apply(lambda x : str2numorstr(x['chromosome']),axis=1) dalignbed=dalignbed.sort_values(['chromosome','start','end'], ascending=[True, True, True]) dalignbed.loc[:,bed_colns].to_csv(alignmentbedp,sep='\t', header=False,index=False, chunksize=5000) return cfg
[ "def", "queriessam2dalignbed", "(", "cfg", ")", ":", "datatmpd", "=", "cfg", "[", "'datatmpd'", "]", "alignmentbedp", "=", "cfg", "[", "'alignmentbedp'", "]", "dalignbedp", "=", "cfg", "[", "'dalignbedp'", "]", "logging", ".", "info", "(", "basename", "(", ...
46.851852
0.016258
def is_prune(self): """ Return True, if `git fetch --prune` is allowed. Because of possible incompatibilities, this requires special treatment. """ required_version = "1.6.6" config_value = self.settings['fetch.prune'] if self.git.is_version_min(required_version): return config_value is not False else: # pragma: no cover if config_value == 'true': print(colored( "Warning: fetch.prune is set to 'true' but your git" "version doesn't seem to support it ({0} < {1})." "Defaulting to 'false'.".format(self.git.version, required_version), 'yellow' ))
[ "def", "is_prune", "(", "self", ")", ":", "required_version", "=", "\"1.6.6\"", "config_value", "=", "self", ".", "settings", "[", "'fetch.prune'", "]", "if", "self", ".", "git", ".", "is_version_min", "(", "required_version", ")", ":", "return", "config_value...
38.619048
0.002407
def __we_c(cls, calib, tc, temp, we_v, ae_v): """ Compute weC from sensor temperature compensation of weV, aeV """ we_t = we_v - (calib.we_elc_mv / 1000.0) # remove electronic we zero ae_t = ae_v - (calib.ae_elc_mv / 1000.0) # remove electronic ae zero we_c = tc.correct(calib, temp, we_t, ae_t) # print("A4Datum__we_c: we_t:%f ae_t:%f we_c:%s" % (we_t, ae_t, we_c), file=sys.stderr) return we_c
[ "def", "__we_c", "(", "cls", ",", "calib", ",", "tc", ",", "temp", ",", "we_v", ",", "ae_v", ")", ":", "we_t", "=", "we_v", "-", "(", "calib", ".", "we_elc_mv", "/", "1000.0", ")", "# remove electronic we zero", "ae_t", "=", "ae_v", "-", "(", "calib"...
38.75
0.010504
def _parse_args(func, variables, annotations=None): """Return a list of arguments with the variable it reads. NOTE: Multiple arguments may read the same variable. """ arg_read_var = [] for arg_name, anno in (annotations or func.__annotations__).items(): if arg_name == 'return': continue var, read = _parse_arg(func, variables, arg_name, anno) arg = Argument(name=arg_name, read=read) arg_read_var.append((arg, var)) return arg_read_var
[ "def", "_parse_args", "(", "func", ",", "variables", ",", "annotations", "=", "None", ")", ":", "arg_read_var", "=", "[", "]", "for", "arg_name", ",", "anno", "in", "(", "annotations", "or", "func", ".", "__annotations__", ")", ".", "items", "(", ")", ...
37.923077
0.00198
def get_response(self, url, timeout=None): """Return http request response. """ if not timeout: timeout = self.default_timeout if self.default_sleeptime: time.sleep(self.default_sleeptime) try: return self.auth.get(url, headers=self.default_header, timeout=self.default_timeout) except: return None
[ "def", "get_response", "(", "self", ",", "url", ",", "timeout", "=", "None", ")", ":", "if", "not", "timeout", ":", "timeout", "=", "self", ".", "default_timeout", "if", "self", ".", "default_sleeptime", ":", "time", ".", "sleep", "(", "self", ".", "de...
29.538462
0.010101
def popleft(self): """Removes and returns the oldest read pulse.""" self._mq.send("^", True, type=1) message = self._wait_receive_msg() reply = int(message[0].decode('utf-8')) #print(reply) if reply == -1: raise IndexError("pop from empty list") return reply
[ "def", "popleft", "(", "self", ")", ":", "self", ".", "_mq", ".", "send", "(", "\"^\"", ",", "True", ",", "type", "=", "1", ")", "message", "=", "self", ".", "_wait_receive_msg", "(", ")", "reply", "=", "int", "(", "message", "[", "0", "]", ".", ...
35.333333
0.009202
def _sort_layers(self): """Sort the layers by depth.""" self._layers = OrderedDict(sorted(self._layers.items(), key=lambda t: t[0]))
[ "def", "_sort_layers", "(", "self", ")", ":", "self", ".", "_layers", "=", "OrderedDict", "(", "sorted", "(", "self", ".", "_layers", ".", "items", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", ")" ]
48.666667
0.02027
def is_ambiguous(self, dt, idx=None): """ Whether or not the "wall time" of a given datetime is ambiguous in this zone. :param dt: A :py:class:`datetime.datetime`, naive or time zone aware. :return: Returns ``True`` if ambiguous, ``False`` otherwise. .. versionadded:: 2.6.0 """ if idx is None: idx = self._find_last_transition(dt) # Calculate the difference in offsets from current to previous timestamp = _datetime_to_timestamp(dt) tti = self._get_ttinfo(idx) if idx is None or idx <= 0: return False od = self._get_ttinfo(idx - 1).offset - tti.offset tt = self._trans_list[idx] # Transition time return timestamp < tt + od
[ "def", "is_ambiguous", "(", "self", ",", "dt", ",", "idx", "=", "None", ")", ":", "if", "idx", "is", "None", ":", "idx", "=", "self", ".", "_find_last_transition", "(", "dt", ")", "# Calculate the difference in offsets from current to previous", "timestamp", "="...
27.964286
0.002469
def validatefeatures(self,features): """Returns features in validated form, or raises an Exception. Mostly for internal use""" validatedfeatures = [] for feature in features: if isinstance(feature, int) or isinstance(feature, float): validatedfeatures.append( str(feature) ) elif self.delimiter in feature and not self.sklearn: raise ValueError("Feature contains delimiter: " + feature) elif self.sklearn and isinstance(feature, str): #then is sparse added together validatedfeatures.append(feature) else: validatedfeatures.append(feature) return validatedfeatures
[ "def", "validatefeatures", "(", "self", ",", "features", ")", ":", "validatedfeatures", "=", "[", "]", "for", "feature", "in", "features", ":", "if", "isinstance", "(", "feature", ",", "int", ")", "or", "isinstance", "(", "feature", ",", "float", ")", ":...
53.538462
0.012712
def average_neighbor_distance(points, num_neigh): """! @brief Returns average distance for establish links between specified number of nearest neighbors. @param[in] points (list): Input data, list of points where each point represented by list. @param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation. @return (double) Average distance for establish links between 'num_neigh' in data set 'points'. """ if num_neigh > len(points) - 1: raise NameError('Impossible to calculate average distance to neighbors when number of object is less than number of neighbors.'); dist_matrix = [ [ 0.0 for i in range(len(points)) ] for j in range(len(points)) ]; for i in range(0, len(points), 1): for j in range(i + 1, len(points), 1): distance = euclidean_distance(points[i], points[j]); dist_matrix[i][j] = distance; dist_matrix[j][i] = distance; dist_matrix[i] = sorted(dist_matrix[i]); total_distance = 0; for i in range(0, len(points), 1): # start from 0 - first element is distance to itself. for j in range(0, num_neigh, 1): total_distance += dist_matrix[i][j + 1]; return ( total_distance / (num_neigh * len(points)) );
[ "def", "average_neighbor_distance", "(", "points", ",", "num_neigh", ")", ":", "if", "num_neigh", ">", "len", "(", "points", ")", "-", "1", ":", "raise", "NameError", "(", "'Impossible to calculate average distance to neighbors when number of object is less than number of n...
44.6
0.021214
def convert_to_shape(x): """Converts input to a Shape. Args: x: Shape, str, or None. Returns: Shape or None. Raises: ValueError: If x cannot be converted to a Shape. """ if x is None: return None if isinstance(x, Shape): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x, seconds_to_int=True) return Shape(x)
[ "def", "convert_to_shape", "(", "x", ")", ":", "if", "x", "is", "None", ":", "return", "None", "if", "isinstance", "(", "x", ",", "Shape", ")", ":", "return", "x", "if", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "_parse_string_to_list_o...
18.631579
0.016129
def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated
[ "def", "image_update", "(", "id", "=", "None", ",", "name", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=C0103", "if", "id", ":", "image", "=", "image_show", "(", "id", "=", "id", ",", "profile", "="...
31.3
0.001239
def notify_barriers(self, slot_key, cursor, use_barrier_indexes, max_to_notify=_MAX_BARRIERS_TO_NOTIFY): """Searches for barriers affected by a slot and triggers completed ones. Args: slot_key: db.Key or stringified key of the _SlotRecord that was filled. cursor: Stringified Datastore cursor where the notification query should pick up. use_barrier_indexes: When True, use _BarrierIndex records to determine which _Barriers to trigger by having this _SlotRecord filled. When False, use the old method that queries for _BarrierRecords by the blocking_slots parameter. max_to_notify: Used for testing. Raises: PipelineStatusError: If any of the barriers are in a bad state. """ if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) logging.debug('Notifying slot %r', slot_key) if use_barrier_indexes: # Please see models.py:_BarrierIndex to understand how _BarrierIndex # entities relate to _BarrierRecord entities. query = ( _BarrierIndex.all(cursor=cursor, keys_only=True) .ancestor(slot_key)) barrier_index_list = query.fetch(max_to_notify) barrier_key_list = [ _BarrierIndex.to_barrier_key(key) for key in barrier_index_list] # If there are task and pipeline kickoff retries it's possible for a # _BarrierIndex to exist for a _BarrierRecord that was not successfully # written. It's safe to ignore this because the original task that wrote # the _BarrierIndex and _BarrierRecord would not have made progress to # kick off a real pipeline or child pipeline unless all of the writes for # these dependent entities went through. We assume that the instigator # retried from scratch and somehwere there exists a good _BarrierIndex and # corresponding _BarrierRecord that tries to accomplish the same thing. barriers = db.get(barrier_key_list) results = [] for barrier_key, barrier in zip(barrier_key_list, barriers): if barrier is None: logging.debug('Ignoring that Barrier "%r" is missing, ' 'relies on Slot "%r"', barrier_key, slot_key) else: results.append(barrier) else: # TODO(user): Delete this backwards compatible codepath and # make use_barrier_indexes the assumed default in all cases. query = ( _BarrierRecord.all(cursor=cursor) .filter('blocking_slots =', slot_key)) results = query.fetch(max_to_notify) # Fetch all blocking _SlotRecords for any potentially triggered barriers. blocking_slot_keys = [] for barrier in results: blocking_slot_keys.extend(barrier.blocking_slots) blocking_slot_dict = {} for slot_record in db.get(blocking_slot_keys): if slot_record is None: continue blocking_slot_dict[slot_record.key()] = slot_record task_list = [] updated_barriers = [] for barrier in results: ready_slots = [] for blocking_slot_key in barrier.blocking_slots: slot_record = blocking_slot_dict.get(blocking_slot_key) if slot_record is None: raise UnexpectedPipelineError( 'Barrier "%r" relies on Slot "%r" which is missing.' % (barrier.key(), blocking_slot_key)) if slot_record.status == _SlotRecord.FILLED: ready_slots.append(blocking_slot_key) # When all of the blocking_slots have been filled, consider the barrier # ready to trigger. We'll trigger it regardless of the current # _BarrierRecord status, since there could be task queue failures at any # point in this flow; this rolls forward the state and de-dupes using # the task name tombstones. pending_slots = set(barrier.blocking_slots) - set(ready_slots) if not pending_slots: if barrier.status != _BarrierRecord.FIRED: barrier.status = _BarrierRecord.FIRED barrier.trigger_time = self._gettime() updated_barriers.append(barrier) purpose = barrier.key().name() if purpose == _BarrierRecord.START: path = self.pipeline_handler_path countdown = None else: path = self.finalized_handler_path # NOTE: Wait one second before finalization to prevent # contention on the _PipelineRecord entity. countdown = 1 pipeline_key = _BarrierRecord.target.get_value_for_datastore(barrier) pipeline_record = db.get(pipeline_key) logging.debug('Firing barrier %r', barrier.key()) task_list.append(taskqueue.Task( url=path, countdown=countdown, name='ae-barrier-fire-%s-%s' % (pipeline_key.name(), purpose), params=dict(pipeline_key=pipeline_key, purpose=purpose), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target'])) else: logging.debug('Not firing barrier %r, Waiting for slots: %r', barrier.key(), pending_slots) # Blindly overwrite _BarrierRecords that have an updated status. This is # acceptable because by this point all finalization barriers for # generator children should have already had their final outputs assigned. if updated_barriers: db.put(updated_barriers) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-ae-barrier-notify-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-ae-barrier-notify-%d' % (prefix, end), url=self.barrier_handler_path, params=dict( slot_key=slot_key, cursor=query.cursor(), use_barrier_indexes=use_barrier_indexes))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
[ "def", "notify_barriers", "(", "self", ",", "slot_key", ",", "cursor", ",", "use_barrier_indexes", ",", "max_to_notify", "=", "_MAX_BARRIERS_TO_NOTIFY", ")", ":", "if", "not", "isinstance", "(", "slot_key", ",", "db", ".", "Key", ")", ":", "slot_key", "=", "...
42.275862
0.009563
def dict_to_nvlist(dict): '''Convert a dictionary into a CORBA namevalue list.''' result = [] for item in list(dict.keys()): result.append(SDOPackage.NameValue(item, omniORB.any.to_any(dict[item]))) return result
[ "def", "dict_to_nvlist", "(", "dict", ")", ":", "result", "=", "[", "]", "for", "item", "in", "list", "(", "dict", ".", "keys", "(", ")", ")", ":", "result", ".", "append", "(", "SDOPackage", ".", "NameValue", "(", "item", ",", "omniORB", ".", "any...
39.333333
0.008299
def tcp_server(tcp_addr, settings): """Start up the tcp server, send the settings.""" family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP) sock.bind(tcp_addr) sock.listen(1) logging.info("Waiting for connection on %s", tcp_addr) conn, addr = sock.accept() logging.info("Accepted connection from %s", Addr(*addr)) # Send map_data independently for py2/3 and json encoding reasons. write_tcp(conn, settings["map_data"]) send_settings = {k: v for k, v in settings.items() if k != "map_data"} logging.debug("settings: %s", send_settings) write_tcp(conn, json.dumps(send_settings).encode()) return conn
[ "def", "tcp_server", "(", "tcp_addr", ",", "settings", ")", ":", "family", "=", "socket", ".", "AF_INET6", "if", "\":\"", "in", "tcp_addr", ".", "ip", "else", "socket", ".", "AF_INET", "sock", "=", "socket", ".", "socket", "(", "family", ",", "socket", ...
43.375
0.021157
def split_unescaped(char, string, include_empty_strings=False): ''' :param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements. ''' words = [] pos = len(string) lastpos = pos while pos >= 0: pos = get_last_pos_of_char(char, string[:lastpos]) if pos >= 0: if pos + 1 != lastpos or include_empty_strings: words.append(string[pos + 1: lastpos]) lastpos = pos if lastpos != 0 or include_empty_strings: words.append(string[:lastpos]) words.reverse() return words
[ "def", "split_unescaped", "(", "char", ",", "string", ",", "include_empty_strings", "=", "False", ")", ":", "words", "=", "[", "]", "pos", "=", "len", "(", "string", ")", "lastpos", "=", "pos", "while", "pos", ">=", "0", ":", "pos", "=", "get_last_pos_...
31.148148
0.001153
def _elimination_trees(theta, decision_variables): """From Theta and the decision variables, determine the elimination order and the induced trees. """ # auxiliary variables are any variables that are not decision auxiliary_variables = set(n for n in theta.linear if n not in decision_variables) # get the adjacency of the auxiliary subgraph adj = {v: {u for u in theta.adj[v] if u in auxiliary_variables} for v in theta.adj if v in auxiliary_variables} # get the elimination order that minimizes treewidth tw, order = dnx.treewidth_branch_and_bound(adj) ancestors = {} for n in order: ancestors[n] = set(adj[n]) # now make v simplicial by making its neighborhood a clique, then # continue neighbors = adj[n] for u, v in itertools.combinations(neighbors, 2): adj[u].add(v) adj[v].add(u) for v in neighbors: adj[v].discard(n) del adj[n] roots = {} nodes = {v: {} for v in ancestors} for vidx in range(len(order) - 1, -1, -1): v = order[vidx] if ancestors[v]: for u in order[vidx + 1:]: if u in ancestors[v]: # v is a child of u nodes[u][v] = nodes[v] # nodes[u][v] = children of v break else: roots[v] = nodes[v] # roots[v] = children of v return roots, ancestors
[ "def", "_elimination_trees", "(", "theta", ",", "decision_variables", ")", ":", "# auxiliary variables are any variables that are not decision", "auxiliary_variables", "=", "set", "(", "n", "for", "n", "in", "theta", ".", "linear", "if", "n", "not", "in", "decision_va...
32.930233
0.002058
def unset_value(self, key): # type: (str) -> None """Unset a value in the configuration. """ self._ensure_have_load_only() if key not in self._config[self.load_only]: raise ConfigurationError("No such key - {}".format(key)) fname, parser = self._get_parser_to_modify() if parser is not None: section, name = _disassemble_key(key) # Remove the key in the parser modified_something = False if parser.has_section(section): # Returns whether the option was removed or not modified_something = parser.remove_option(section, name) if modified_something: # name removed from parser, section may now be empty section_iter = iter(parser.items(section)) try: val = six.next(section_iter) except StopIteration: val = None if val is None: parser.remove_section(section) self._mark_as_modified(fname, parser) else: raise ConfigurationError( "Fatal Internal error [id=1]. Please report as a bug." ) del self._config[self.load_only][key]
[ "def", "unset_value", "(", "self", ",", "key", ")", ":", "# type: (str) -> None", "self", ".", "_ensure_have_load_only", "(", ")", "if", "key", "not", "in", "self", ".", "_config", "[", "self", ".", "load_only", "]", ":", "raise", "ConfigurationError", "(", ...
33.710526
0.002276
def wrap_as_node(self, func): 'wrap a function as a node' name = self.get_name(func) @wraps(func) def wrapped(*args, **kwargs): 'wrapped version of func' message = self.get_message_from_call(*args, **kwargs) self.logger.info('calling "%s" with %r', name, message) result = func(message) # functions can return multiple values ("emit" multiple times) # by yielding instead of returning. Handle this case by making # a list of the results and processing them all after the # generator successfully exits. If we were to process them as # they came out of the generator, we might get a partially # processed input sent down the graph. This may be possible in # the future via a flag. if isinstance(result, GeneratorType): results = [ self.wrap_result(name, item) for item in result if item is not NoResult ] self.logger.debug( '%s returned generator yielding %d items', func, len(results) ) [self.route(name, item) for item in results] return tuple(results) # the case of a direct return is simpler. wrap, route, and # return the value. else: if result is NoResult: return result result = self.wrap_result(name, result) self.logger.debug( '%s returned single value %s', func, result ) self.route(name, result) return result return wrapped
[ "def", "wrap_as_node", "(", "self", ",", "func", ")", ":", "name", "=", "self", ".", "get_name", "(", "func", ")", "@", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "'wrapped version of func'", "me...
38.4
0.001693
def add_to_typedef(typedef_curr, obo_line): """Add new fields to the current typedef.""" if obo_line[:4] == "id: ": assert not typedef_curr.item_id item_id = obo_line[4:] typedef_curr.item_id = item_id elif obo_line[:6] == "name: ": assert not typedef_curr.name typedef_curr.name = obo_line[6:] elif obo_line[:11] == "namespace: ": assert not typedef_curr.namespace typedef_curr.namespace = obo_line[11:] elif obo_line[17:] == "transitive_over: ": field_value = obo_line[17:].split('!')[0].rstrip() typedef_curr.transitive_over.append(field_value) elif obo_line[12:] == "inverse_of": assert not typedef_curr.inverse_of field_value = obo_line[12:].split('!')[0].rstrip() typedef_curr.inverse_of = field_value
[ "def", "add_to_typedef", "(", "typedef_curr", ",", "obo_line", ")", ":", "if", "obo_line", "[", ":", "4", "]", "==", "\"id: \"", ":", "assert", "not", "typedef_curr", ".", "item_id", "item_id", "=", "obo_line", "[", "4", ":", "]", "typedef_curr", ".", "i...
42.578947
0.001209
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): """Try import all public attributes from module into global namespace. Existing attributes with name clashes are renamed with prefix. Attributes starting with underscore are ignored by default. Return True on successful import. """ try: module = __import__(module_name) except ImportError: if warn: warnings.warn("Failed to import module " + module_name) else: for attr in dir(module): if ignore and attr.startswith(ignore): continue if prefix: if attr in globals(): globals()[prefix + attr] = globals()[attr] elif warn: warnings.warn("No Python implementation of " + attr) globals()[attr] = getattr(module, attr) return True
[ "def", "_import_module", "(", "module_name", ",", "warn", "=", "True", ",", "prefix", "=", "'_py_'", ",", "ignore", "=", "'_'", ")", ":", "try", ":", "module", "=", "__import__", "(", "module_name", ")", "except", "ImportError", ":", "if", "warn", ":", ...
35
0.001112
def setMinimumPixmapSize(self, size): """ Sets the minimum pixmap size that will be displayed to the user for the dock widget. :param size | <int> """ self._minimumPixmapSize = size position = self.position() self._position = None self.setPosition(position)
[ "def", "setMinimumPixmapSize", "(", "self", ",", "size", ")", ":", "self", ".", "_minimumPixmapSize", "=", "size", "position", "=", "self", ".", "position", "(", ")", "self", ".", "_position", "=", "None", "self", ".", "setPosition", "(", "position", ")" ]
31.090909
0.008523
def set_dword_at_offset(self, offset, dword): """Set the double word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
[ "def", "set_dword_at_offset", "(", "self", ",", "offset", ",", "dword", ")", ":", "return", "self", ".", "set_bytes_at_offset", "(", "offset", ",", "self", ".", "get_data_from_dword", "(", "dword", ")", ")" ]
63.333333
0.015625
def collect(self, dataset_readers_list): """collect results Returns: a list of results """ ret = [ ] for i, collector in enumerate(self.components): report = ProgressReport(name='collecting results', done=(i + 1), total=len(self.components)) alphatwirl.progressbar.report_progress(report) ret.append(collector.collect([(dataset, tuple(r.readers[i] for r in readerComposites)) for dataset, readerComposites in dataset_readers_list])) return ret
[ "def", "collect", "(", "self", ",", "dataset_readers_list", ")", ":", "ret", "=", "[", "]", "for", "i", ",", "collector", "in", "enumerate", "(", "self", ".", "components", ")", ":", "report", "=", "ProgressReport", "(", "name", "=", "'collecting results'"...
35.625
0.010256
def createSynapses(self): """Add an exponentially decaying synapse """ synsoma = h.ExpSyn(self.soma(0.5)) synsoma.tau = 2 synsoma.e = 0 syndend = h.ExpSyn(self.dend(0.5)) syndend.tau = 2 syndend.e = 0 self.synlist.append(synsoma) # synlist is defined in Cell self.synlist.append(syndend)
[ "def", "createSynapses", "(", "self", ")", ":", "synsoma", "=", "h", ".", "ExpSyn", "(", "self", ".", "soma", "(", "0.5", ")", ")", "synsoma", ".", "tau", "=", "2", "synsoma", ".", "e", "=", "0", "syndend", "=", "h", ".", "ExpSyn", "(", "self", ...
35
0.008357
def run_conditional_decorators(self, context): """Evaluate the step decorators to decide whether to run step or not. Use pypyr.dsl.Step.run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # The decorator attributes might contain formatting expressions that # change whether they evaluate True or False, thus apply formatting at # last possible instant. run_me = context.get_formatted_as_type(self.run_me, out_type=bool) skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool) swallow_me = context.get_formatted_as_type(self.swallow_me, out_type=bool) if run_me: if not skip_me: try: if self.retry_decorator: self.retry_decorator.retry_loop(context, self.invoke_step) else: self.invoke_step(context=context) except Exception as ex_info: if swallow_me: logger.error( f"{self.name} Ignoring error because swallow " "is True for this step.\n" f"{type(ex_info).__name__}: {ex_info}") else: raise else: logger.info( f"{self.name} not running because skip is True.") else: logger.info(f"{self.name} not running because run is False.") logger.debug("done")
[ "def", "run_conditional_decorators", "(", "self", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "# The decorator attributes might contain formatting expressions that", "# change whether they evaluate True or False, thus apply formatting at", "# last poss...
41.27907
0.001101
def save_function_tuple(self, func): """ Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later. """ if is_tornado_coroutine(func): self.save_reduce(_rebuild_tornado_coroutine, (func.__wrapped__,), obj=func) return save = self.save write = self.write code, f_globals, defaults, closure_values, dct, base_globals = self.extract_func_data(func) save(_fill_function) # skeleton function updater write(pickle.MARK) # beginning of tuple that _fill_function expects self._save_subimports( code, itertools.chain(f_globals.values(), closure_values or ()), ) # create a skeleton function object and memoize it save(_make_skel_func) save(( code, len(closure_values) if closure_values is not None else -1, base_globals, )) write(pickle.REDUCE) self.memoize(func) # save the rest of the func data needed by _fill_function state = { 'globals': f_globals, 'defaults': defaults, 'dict': dct, 'closure_values': closure_values, 'module': func.__module__, 'name': func.__name__, 'doc': func.__doc__, } if hasattr(func, '__annotations__') and sys.version_info >= (3, 7): state['annotations'] = func.__annotations__ if hasattr(func, '__qualname__'): state['qualname'] = func.__qualname__ save(state) write(pickle.TUPLE) write(pickle.REDUCE)
[ "def", "save_function_tuple", "(", "self", ",", "func", ")", ":", "if", "is_tornado_coroutine", "(", "func", ")", ":", "self", ".", "save_reduce", "(", "_rebuild_tornado_coroutine", ",", "(", "func", ".", "__wrapped__", ",", ")", ",", "obj", "=", "func", "...
38.175439
0.001344
def get_parent_tag(mention): """Return the HTML tag of the Mention's parent. These may be tags such as 'p', 'h2', 'table', 'div', etc. If a candidate is passed in, only the tag of its first Mention is returned. :param mention: The Mention to evaluate :rtype: string """ span = _to_span(mention) i = _get_node(span.sentence) return str(i.getparent().tag) if i.getparent() is not None else None
[ "def", "get_parent_tag", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "i", "=", "_get_node", "(", "span", ".", "sentence", ")", "return", "str", "(", "i", ".", "getparent", "(", ")", ".", "tag", ")", "if", "i", ".", "getp...
34.916667
0.002326
def constrain_centers(self): """ Constrain the centers of linked `EPSFStar` objects (i.e. the same physical star) to have the same sky coordinate. Only `EPSFStar` objects that have not been excluded during the ePSF build process will be used to constrain the centers. The single sky coordinate is calculated as the mean of sky coordinates of the linked stars. """ if len(self._data) < 2: # no linked stars return idx = np.logical_not(self._excluded_from_fit).nonzero()[0] if len(idx) == 0: warnings.warn('Cannot constrain centers of linked stars because ' 'all the stars have been excluded during the ePSF ' 'build process.', AstropyUserWarning) return good_stars = [self._data[i] for i in idx] coords = [] for star in good_stars: coords.append(star.wcs_large.all_pix2world(star.center[0], star.center[1], 0)) # compute mean cartesian coordinates lon, lat = np.transpose(coords) lon *= np.pi / 180. lat *= np.pi / 180. x_mean = np.mean(np.cos(lat) * np.cos(lon)) y_mean = np.mean(np.cos(lat) * np.sin(lon)) z_mean = np.mean(np.sin(lat)) # convert mean cartesian coordinates back to spherical hypot = np.hypot(x_mean, y_mean) lon = np.arctan2(y_mean, x_mean) lat = np.arctan2(z_mean, hypot) lon *= 180. / np.pi lat *= 180. / np.pi # convert mean sky coordinates back to center pixel coordinates # for each star for star in good_stars: center = np.array(star.wcs_large.all_world2pix(lon, lat, 0)) star.cutout_center = center - star.origin
[ "def", "constrain_centers", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_data", ")", "<", "2", ":", "# no linked stars", "return", "idx", "=", "np", ".", "logical_not", "(", "self", ".", "_excluded_from_fit", ")", ".", "nonzero", "(", ")", "...
37.102041
0.001072
def _get_type(points, soma_class): '''get the type of the soma Args: points: Soma points soma_class(str): one of 'contour' or 'cylinder' to specify the type ''' assert soma_class in (SOMA_CONTOUR, SOMA_CYLINDER) npoints = len(points) if soma_class == SOMA_CONTOUR: return {0: None, 1: SomaSinglePoint, 2: None}.get(npoints, SomaSimpleContour) if(npoints == 3 and points[0][COLS.P] == -1 and points[1][COLS.P] == 1 and points[2][COLS.P] == 1): L.warning('Using neuromorpho 3-Point soma') # NeuroMorpho is the main provider of morphologies, but they # with SWC as their default file format: they convert all # uploads to SWC. In the process of conversion, they turn all # somas into their custom 'Three-point soma representation': # http://neuromorpho.org/SomaFormat.html return SomaNeuromorphoThreePointCylinders return {0: None, 1: SomaSinglePoint}.get(npoints, SomaCylinders)
[ "def", "_get_type", "(", "points", ",", "soma_class", ")", ":", "assert", "soma_class", "in", "(", "SOMA_CONTOUR", ",", "SOMA_CYLINDER", ")", "npoints", "=", "len", "(", "points", ")", "if", "soma_class", "==", "SOMA_CONTOUR", ":", "return", "{", "0", ":",...
34.366667
0.000943
def restore_catalog_to_ckan(catalog, origin_portal_url, destination_portal_url, apikey, download_strategy=None, generate_new_access_url=None): """Restaura los datasets de un catálogo original al portal pasado por parámetro. Si hay temas presentes en el DataJson que no están en el portal de CKAN, los genera. Args: catalog (DataJson): El catálogo de origen que se restaura. origin_portal_url (str): La URL del portal CKAN de origen. destination_portal_url (str): La URL del portal CKAN de destino. apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar el dataset. download_strategy(callable): Una función (catálogo, distribución)-> bool. Sobre las distribuciones que evalúa True, descarga el recurso en el downloadURL y lo sube al portal de destino. Por default no sube ninguna distribución. generate_new_access_url(list): Se pasan los ids de las distribuciones cuyo accessURL se regenerar en el portal de destino. Para el resto, el portal debe mantiene el valor pasado en el DataJson. Returns: dict: Diccionario con key organización y value la lista de ids de datasets subidos a esa organización """ catalog['homepage'] = catalog.get('homepage') or origin_portal_url res = {} origin_portal = RemoteCKAN(origin_portal_url) try: org_list = origin_portal.action.organization_list() except CKANAPIError as e: logger.exception( 'Ocurrió un error buscando las organizaciones del portal {}: {}' .format(origin_portal_url, str(e))) print(e) return res for org in org_list: print("Restaurando organizacion {}".format(org)) response = origin_portal.action.organization_show( id=org, include_datasets=True) datasets = [package['id'] for package in response['packages']] pushed_datasets = restore_organization_to_ckan( catalog, org, destination_portal_url, apikey, dataset_list=datasets, download_strategy=download_strategy, generate_new_access_url=generate_new_access_url ) res[org] = pushed_datasets return res
[ "def", "restore_catalog_to_ckan", "(", "catalog", ",", "origin_portal_url", ",", "destination_portal_url", ",", "apikey", ",", "download_strategy", "=", "None", ",", "generate_new_access_url", "=", "None", ")", ":", "catalog", "[", "'homepage'", "]", "=", "catalog",...
45.218182
0.000394
def howPlotArgs(goodFormat): '''plots using argparse if can, if not uses howPlotask() Arguments: goodFormat {dict} -- module : [results for module] ''' if args.exportplots is not None: exportPlotsPath = pathlib.Path(args.exportplots) if args.showplots: plotter(exportPlotsPath, True, goodFormat) else: plotter(exportPlotsPath, False, goodFormat) elif args.showplots: plotter(None, True, goodFormat) else: howPlotAsk(goodFormat)
[ "def", "howPlotArgs", "(", "goodFormat", ")", ":", "if", "args", ".", "exportplots", "is", "not", "None", ":", "exportPlotsPath", "=", "pathlib", ".", "Path", "(", "args", ".", "exportplots", ")", "if", "args", ".", "showplots", ":", "plotter", "(", "exp...
29.941176
0.001905