text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def send_put(self, mri, attribute_name, value): """Abstract method to dispatch a Put to the server Args: mri (str): The mri of the Block attribute_name (str): The name of the Attribute within the Block value: The value to put """ path = attribute_name + ".value" typ, value = convert_to_type_tuple_value(serialize_object(value)) if isinstance(typ, tuple): # Structure, make into a Value _, typeid, fields = typ value = Value(Type(fields, typeid), value) try: self._ctxt.put(mri, {path: value}, path) except RemoteError: if attribute_name == "exports": # TODO: use a tag instead of a name # This will change the structure of the block # Wait for reconnect self._queues[mri].get(timeout=DEFAULT_TIMEOUT) else: # Not expected, raise raise
[ "def", "send_put", "(", "self", ",", "mri", ",", "attribute_name", ",", "value", ")", ":", "path", "=", "attribute_name", "+", "\".value\"", "typ", ",", "value", "=", "convert_to_type_tuple_value", "(", "serialize_object", "(", "value", ")", ")", "if", "isinstance", "(", "typ", ",", "tuple", ")", ":", "# Structure, make into a Value", "_", ",", "typeid", ",", "fields", "=", "typ", "value", "=", "Value", "(", "Type", "(", "fields", ",", "typeid", ")", ",", "value", ")", "try", ":", "self", ".", "_ctxt", ".", "put", "(", "mri", ",", "{", "path", ":", "value", "}", ",", "path", ")", "except", "RemoteError", ":", "if", "attribute_name", "==", "\"exports\"", ":", "# TODO: use a tag instead of a name", "# This will change the structure of the block", "# Wait for reconnect", "self", ".", "_queues", "[", "mri", "]", ".", "get", "(", "timeout", "=", "DEFAULT_TIMEOUT", ")", "else", ":", "# Not expected, raise", "raise" ]
39.2
0.001992
def iter_qs(qs, adapter): '''Safely iterate over a DB QuerySet yielding ES documents''' for obj in qs.no_cache().no_dereference().timeout(False): if adapter.is_indexable(obj): try: doc = adapter.from_model(obj).to_dict(include_meta=True) yield doc except Exception as e: model = adapter.model.__name__ log.error('Unable to index %s "%s": %s', model, str(obj.id), str(e), exc_info=True)
[ "def", "iter_qs", "(", "qs", ",", "adapter", ")", ":", "for", "obj", "in", "qs", ".", "no_cache", "(", ")", ".", "no_dereference", "(", ")", ".", "timeout", "(", "False", ")", ":", "if", "adapter", ".", "is_indexable", "(", "obj", ")", ":", "try", ":", "doc", "=", "adapter", ".", "from_model", "(", "obj", ")", ".", "to_dict", "(", "include_meta", "=", "True", ")", "yield", "doc", "except", "Exception", "as", "e", ":", "model", "=", "adapter", ".", "model", ".", "__name__", "log", ".", "error", "(", "'Unable to index %s \"%s\": %s'", ",", "model", ",", "str", "(", "obj", ".", "id", ")", ",", "str", "(", "e", ")", ",", "exc_info", "=", "True", ")" ]
45.909091
0.001942
def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None): """This is used by the interact() method. """ while self.isalive(): r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], []) if self.child_fd in r: data = self.__interact_read(self.child_fd) if output_filter: data = output_filter(data) if self.logfile is not None: self.logfile.write (data) self.logfile.flush() os.write(self.STDOUT_FILENO, data) if self.STDIN_FILENO in r: data = self.__interact_read(self.STDIN_FILENO) if input_filter: data = input_filter(data) i = data.rfind(escape_character) if i != -1: data = data[:i] self.__interact_writen(self.child_fd, data) break self.__interact_writen(self.child_fd, data)
[ "def", "__interact_copy", "(", "self", ",", "escape_character", "=", "None", ",", "input_filter", "=", "None", ",", "output_filter", "=", "None", ")", ":", "while", "self", ".", "isalive", "(", ")", ":", "r", ",", "w", ",", "e", "=", "self", ".", "__select", "(", "[", "self", ".", "child_fd", ",", "self", ".", "STDIN_FILENO", "]", ",", "[", "]", ",", "[", "]", ")", "if", "self", ".", "child_fd", "in", "r", ":", "data", "=", "self", ".", "__interact_read", "(", "self", ".", "child_fd", ")", "if", "output_filter", ":", "data", "=", "output_filter", "(", "data", ")", "if", "self", ".", "logfile", "is", "not", "None", ":", "self", ".", "logfile", ".", "write", "(", "data", ")", "self", ".", "logfile", ".", "flush", "(", ")", "os", ".", "write", "(", "self", ".", "STDOUT_FILENO", ",", "data", ")", "if", "self", ".", "STDIN_FILENO", "in", "r", ":", "data", "=", "self", ".", "__interact_read", "(", "self", ".", "STDIN_FILENO", ")", "if", "input_filter", ":", "data", "=", "input_filter", "(", "data", ")", "i", "=", "data", ".", "rfind", "(", "escape_character", ")", "if", "i", "!=", "-", "1", ":", "data", "=", "data", "[", ":", "i", "]", "self", ".", "__interact_writen", "(", "self", ".", "child_fd", ",", "data", ")", "break", "self", ".", "__interact_writen", "(", "self", ".", "child_fd", ",", "data", ")" ]
43.73913
0.013619
def import_package(rel_path_to_package, package_name): """Imports a python package into the current namespace. Parameters ---------- rel_path_to_package : str Path to the package containing director relative from this script's directory. package_name : str The name of the package to be imported. Returns --------- package : The imported package object. """ try: curr_dir = os.path.dirname(os.path.realpath(__file__)) except NameError: curr_dir = os.path.dirname(os.path.realpath(os.getcwd())) package_path = os.path.join(curr_dir, rel_path_to_package) if package_path not in sys.path: sys.path = [package_path] + sys.path package = __import__(package_name) return package
[ "def", "import_package", "(", "rel_path_to_package", ",", "package_name", ")", ":", "try", ":", "curr_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "except", "NameError", ":", "curr_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "os", ".", "getcwd", "(", ")", ")", ")", "package_path", "=", "os", ".", "path", ".", "join", "(", "curr_dir", ",", "rel_path_to_package", ")", "if", "package_path", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", "=", "[", "package_path", "]", "+", "sys", ".", "path", "package", "=", "__import__", "(", "package_name", ")", "return", "package" ]
30.36
0.001277
def create_incidence_matrix(self, weights=None, fmt='coo', drop_zeros=False): r""" Creates a weighted incidence matrix in the desired sparse format Parameters ---------- weights : array_like, optional An array containing the throat values to enter into the matrix (In graph theory these are known as the 'weights'). If omitted, ones are used to create a standard incidence matrix representing connectivity only. fmt : string, optional The sparse storage format to return. Options are: **'coo'** : (default) This is the native format of OpenPNMs data **'lil'** : Enables row-wise slice of the matrix **'csr'** : Favored by most linear algebra routines **'dok'** : Enables subscript access of locations drop_zeros : boolean (default is ``False``) If ``True``, applies the ``eliminate_zeros`` method of the sparse array to remove all zero locations. Returns ------- An incidence matrix in the specified sparse format Notes ----- The incidence matrix is a cousin to the adjacency matrix, and used by OpenPNM for finding the throats connected to a give pore or set of pores. Specifically, an incidence matrix has Np rows and Nt columns, and each row represents a pore, containing non-zero values at the locations corresponding to the indices of the throats connected to that pore. The ``weights`` argument indicates what value to place at each location, with the default being 1's to simply indicate connections. Another useful option is throat indices, such that the data values on each row indicate which throats are connected to the pore, though this is redundant as it is identical to the locations of non-zeros. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> weights = sp.rand(pn.num_throats(), ) < 0.5 >>> im = pn.create_incidence_matrix(weights=weights, fmt='csr') """ # Check if provided data is valid if weights is None: weights = sp.ones((self.Nt,), dtype=int) elif sp.shape(weights)[0] != self.Nt: raise Exception('Received dataset of incorrect length') conn = self['throat.conns'] row = conn[:, 0] row = sp.append(row, conn[:, 1]) col = sp.arange(self.Nt) col = sp.append(col, col) weights = sp.append(weights, weights) temp = sprs.coo.coo_matrix((weights, (row, col)), (self.Np, self.Nt)) if drop_zeros: temp.eliminate_zeros() # Convert to requested format if fmt == 'coo': pass # temp is already in coo format elif fmt == 'csr': temp = temp.tocsr() elif fmt == 'lil': temp = temp.tolil() elif fmt == 'dok': temp = temp.todok() return temp
[ "def", "create_incidence_matrix", "(", "self", ",", "weights", "=", "None", ",", "fmt", "=", "'coo'", ",", "drop_zeros", "=", "False", ")", ":", "# Check if provided data is valid", "if", "weights", "is", "None", ":", "weights", "=", "sp", ".", "ones", "(", "(", "self", ".", "Nt", ",", ")", ",", "dtype", "=", "int", ")", "elif", "sp", ".", "shape", "(", "weights", ")", "[", "0", "]", "!=", "self", ".", "Nt", ":", "raise", "Exception", "(", "'Received dataset of incorrect length'", ")", "conn", "=", "self", "[", "'throat.conns'", "]", "row", "=", "conn", "[", ":", ",", "0", "]", "row", "=", "sp", ".", "append", "(", "row", ",", "conn", "[", ":", ",", "1", "]", ")", "col", "=", "sp", ".", "arange", "(", "self", ".", "Nt", ")", "col", "=", "sp", ".", "append", "(", "col", ",", "col", ")", "weights", "=", "sp", ".", "append", "(", "weights", ",", "weights", ")", "temp", "=", "sprs", ".", "coo", ".", "coo_matrix", "(", "(", "weights", ",", "(", "row", ",", "col", ")", ")", ",", "(", "self", ".", "Np", ",", "self", ".", "Nt", ")", ")", "if", "drop_zeros", ":", "temp", ".", "eliminate_zeros", "(", ")", "# Convert to requested format", "if", "fmt", "==", "'coo'", ":", "pass", "# temp is already in coo format", "elif", "fmt", "==", "'csr'", ":", "temp", "=", "temp", ".", "tocsr", "(", ")", "elif", "fmt", "==", "'lil'", ":", "temp", "=", "temp", ".", "tolil", "(", ")", "elif", "fmt", "==", "'dok'", ":", "temp", "=", "temp", ".", "todok", "(", ")", "return", "temp" ]
37.604938
0.00096
def _eval_call(self, node): """ Evaluate a function call :param node: Node to eval :return: Result of node """ try: func = self.functions[node.func.id] except KeyError: raise NameError(node.func.id) value = func( *(self._eval(a) for a in node.args), **dict(self._eval(k) for k in node.keywords) ) if value is True: return 1 elif value is False: return 0 else: return value
[ "def", "_eval_call", "(", "self", ",", "node", ")", ":", "try", ":", "func", "=", "self", ".", "functions", "[", "node", ".", "func", ".", "id", "]", "except", "KeyError", ":", "raise", "NameError", "(", "node", ".", "func", ".", "id", ")", "value", "=", "func", "(", "*", "(", "self", ".", "_eval", "(", "a", ")", "for", "a", "in", "node", ".", "args", ")", ",", "*", "*", "dict", "(", "self", ".", "_eval", "(", "k", ")", "for", "k", "in", "node", ".", "keywords", ")", ")", "if", "value", "is", "True", ":", "return", "1", "elif", "value", "is", "False", ":", "return", "0", "else", ":", "return", "value" ]
23.173913
0.003604
def evalPDF(self, u_values): '''Returns the PDF of the uncertain parameter evaluated at the values provided in u_values. :param iterable u_values: values of the uncertain parameter at which to evaluate the PDF *Example Usage* :: >>> u = UniformParameter() >>> X = numpy.linspace(-1, 1, 100) >>> Y = [u.evalPDF(x) for x in X] ''' if isinstance(u_values, np.ndarray): return self._evalPDF(u_values) else: try: iter(u_values) return [self._evalPDF(u) for u in u_values] except: return self._evalPDF(u_values)
[ "def", "evalPDF", "(", "self", ",", "u_values", ")", ":", "if", "isinstance", "(", "u_values", ",", "np", ".", "ndarray", ")", ":", "return", "self", ".", "_evalPDF", "(", "u_values", ")", "else", ":", "try", ":", "iter", "(", "u_values", ")", "return", "[", "self", ".", "_evalPDF", "(", "u", ")", "for", "u", "in", "u_values", "]", "except", ":", "return", "self", ".", "_evalPDF", "(", "u_values", ")" ]
29.391304
0.004298
def format_py2js(cls, datetime_format): """Convert python datetime format to moment datetime format.""" for js_format, py_format in cls.format_map: datetime_format = datetime_format.replace(py_format, js_format) return datetime_format
[ "def", "format_py2js", "(", "cls", ",", "datetime_format", ")", ":", "for", "js_format", ",", "py_format", "in", "cls", ".", "format_map", ":", "datetime_format", "=", "datetime_format", ".", "replace", "(", "py_format", ",", "js_format", ")", "return", "datetime_format" ]
53.2
0.007407
def get_catalog_by_name(name): """ Grabs a catalog by name, if its there on the api key. Otherwise, an error is thrown (mirroring the API) """ kwargs = { 'name' : name, } result = util.callm("%s/%s" % ('catalog', 'profile'), kwargs) return Catalog(**util.fix(result['response']['catalog']))
[ "def", "get_catalog_by_name", "(", "name", ")", ":", "kwargs", "=", "{", "'name'", ":", "name", ",", "}", "result", "=", "util", ".", "callm", "(", "\"%s/%s\"", "%", "(", "'catalog'", ",", "'profile'", ")", ",", "kwargs", ")", "return", "Catalog", "(", "*", "*", "util", ".", "fix", "(", "result", "[", "'response'", "]", "[", "'catalog'", "]", ")", ")" ]
32.9
0.005917
def distribution_files(self) -> Iterator[str]: """Find distribution packages.""" # This is verbatim from flake8 if self.distribution.packages: package_dirs = self.distribution.package_dir or {} for package in self.distribution.packages: pkg_dir = package if package in package_dirs: pkg_dir = package_dirs[package] elif '' in package_dirs: pkg_dir = package_dirs[''] + os.path.sep + pkg_dir yield pkg_dir.replace('.', os.path.sep) if self.distribution.py_modules: for filename in self.distribution.py_modules: yield "%s.py" % filename # Don't miss the setup.py file itself yield "setup.py"
[ "def", "distribution_files", "(", "self", ")", "->", "Iterator", "[", "str", "]", ":", "# This is verbatim from flake8", "if", "self", ".", "distribution", ".", "packages", ":", "package_dirs", "=", "self", ".", "distribution", ".", "package_dir", "or", "{", "}", "for", "package", "in", "self", ".", "distribution", ".", "packages", ":", "pkg_dir", "=", "package", "if", "package", "in", "package_dirs", ":", "pkg_dir", "=", "package_dirs", "[", "package", "]", "elif", "''", "in", "package_dirs", ":", "pkg_dir", "=", "package_dirs", "[", "''", "]", "+", "os", ".", "path", ".", "sep", "+", "pkg_dir", "yield", "pkg_dir", ".", "replace", "(", "'.'", ",", "os", ".", "path", ".", "sep", ")", "if", "self", ".", "distribution", ".", "py_modules", ":", "for", "filename", "in", "self", ".", "distribution", ".", "py_modules", ":", "yield", "\"%s.py\"", "%", "filename", "# Don't miss the setup.py file itself", "yield", "\"setup.py\"" ]
43.166667
0.002519
def _do_report(self, report, in_port, msg): """the process when the querier received a REPORT message.""" datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION: size = 65535 else: size = ofproto.OFPCML_MAX update = False self._mcast.setdefault(report.address, {}) if in_port not in self._mcast[report.address]: update = True self._mcast[report.address][in_port] = True if update: actions = [] for port in self._mcast[report.address]: actions.append(parser.OFPActionOutput(port)) self._set_flow_entry( datapath, actions, self.server_port, report.address) self._set_flow_entry( datapath, [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, size)], in_port, report.address)
[ "def", "_do_report", "(", "self", ",", "report", ",", "in_port", ",", "msg", ")", ":", "datapath", "=", "msg", ".", "datapath", "ofproto", "=", "datapath", ".", "ofproto", "parser", "=", "datapath", ".", "ofproto_parser", "if", "ofproto", ".", "OFP_VERSION", "==", "ofproto_v1_0", ".", "OFP_VERSION", ":", "size", "=", "65535", "else", ":", "size", "=", "ofproto", ".", "OFPCML_MAX", "update", "=", "False", "self", ".", "_mcast", ".", "setdefault", "(", "report", ".", "address", ",", "{", "}", ")", "if", "in_port", "not", "in", "self", ".", "_mcast", "[", "report", ".", "address", "]", ":", "update", "=", "True", "self", ".", "_mcast", "[", "report", ".", "address", "]", "[", "in_port", "]", "=", "True", "if", "update", ":", "actions", "=", "[", "]", "for", "port", "in", "self", ".", "_mcast", "[", "report", ".", "address", "]", ":", "actions", ".", "append", "(", "parser", ".", "OFPActionOutput", "(", "port", ")", ")", "self", ".", "_set_flow_entry", "(", "datapath", ",", "actions", ",", "self", ".", "server_port", ",", "report", ".", "address", ")", "self", ".", "_set_flow_entry", "(", "datapath", ",", "[", "parser", ".", "OFPActionOutput", "(", "ofproto", ".", "OFPP_CONTROLLER", ",", "size", ")", "]", ",", "in_port", ",", "report", ".", "address", ")" ]
36.185185
0.001994
def _find_detections(cum_net_resp, nodes, threshold, thresh_type, samp_rate, realstations, length): """ Find detections within the cumulative network response. :type cum_net_resp: numpy.ndarray :param cum_net_resp: Array of cumulative network response for nodes :type nodes: list :param nodes: Nodes associated with the source of energy in the \ cum_net_resp :type threshold: float :param threshold: Threshold value :type thresh_type: str :param thresh_type: Either MAD (Median Absolute Deviation) or abs \ (absolute) or RMS (Root Mean Squared) :type samp_rate: float :param samp_rate: Sampling rate in Hz :type realstations: list :param realstations: List of stations used to make the cumulative network response, will be reported in the :class:`eqcorrscan.core.match_filter.Detection` :type length: float :param length: Maximum length of peak to look for in seconds :returns: Detections as :class:`eqcorrscan.core.match_filter.Detection` objects. :rtype: list """ cum_net_resp = np.nan_to_num(cum_net_resp) # Force no NaNs if np.isnan(cum_net_resp).any(): raise ValueError("Nans present") print('Mean of data is: ' + str(np.median(cum_net_resp))) print('RMS of data is: ' + str(np.sqrt(np.mean(np.square(cum_net_resp))))) print('MAD of data is: ' + str(np.median(np.abs(cum_net_resp)))) if thresh_type == 'MAD': thresh = (np.median(np.abs(cum_net_resp)) * threshold) elif thresh_type == 'abs': thresh = threshold elif thresh_type == 'RMS': thresh = _rms(cum_net_resp) * threshold print('Threshold is set to: ' + str(thresh)) print('Max of data is: ' + str(max(cum_net_resp))) peaks = findpeaks.find_peaks2_short(cum_net_resp, thresh, length * samp_rate, debug=0) detections = [] if peaks: for peak in peaks: node = nodes[peak[1]] detections.append( Detection(template_name=str(node[0]) + '_' + str(node[1]) + '_' + str(node[2]), detect_time=peak[1] / samp_rate, no_chans=len(realstations), detect_val=peak[0], threshold=thresh, typeofdet='brightness', chans=realstations, id=str(node[0]) + '_' + str(node[1]) + '_' + str(node[2]) + str(peak[1] / samp_rate), threshold_type=thresh_type, threshold_input=threshold)) else: detections = [] print('I have found ' + str(len(peaks)) + ' possible detections') return detections
[ "def", "_find_detections", "(", "cum_net_resp", ",", "nodes", ",", "threshold", ",", "thresh_type", ",", "samp_rate", ",", "realstations", ",", "length", ")", ":", "cum_net_resp", "=", "np", ".", "nan_to_num", "(", "cum_net_resp", ")", "# Force no NaNs", "if", "np", ".", "isnan", "(", "cum_net_resp", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"Nans present\"", ")", "print", "(", "'Mean of data is: '", "+", "str", "(", "np", ".", "median", "(", "cum_net_resp", ")", ")", ")", "print", "(", "'RMS of data is: '", "+", "str", "(", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "np", ".", "square", "(", "cum_net_resp", ")", ")", ")", ")", ")", "print", "(", "'MAD of data is: '", "+", "str", "(", "np", ".", "median", "(", "np", ".", "abs", "(", "cum_net_resp", ")", ")", ")", ")", "if", "thresh_type", "==", "'MAD'", ":", "thresh", "=", "(", "np", ".", "median", "(", "np", ".", "abs", "(", "cum_net_resp", ")", ")", "*", "threshold", ")", "elif", "thresh_type", "==", "'abs'", ":", "thresh", "=", "threshold", "elif", "thresh_type", "==", "'RMS'", ":", "thresh", "=", "_rms", "(", "cum_net_resp", ")", "*", "threshold", "print", "(", "'Threshold is set to: '", "+", "str", "(", "thresh", ")", ")", "print", "(", "'Max of data is: '", "+", "str", "(", "max", "(", "cum_net_resp", ")", ")", ")", "peaks", "=", "findpeaks", ".", "find_peaks2_short", "(", "cum_net_resp", ",", "thresh", ",", "length", "*", "samp_rate", ",", "debug", "=", "0", ")", "detections", "=", "[", "]", "if", "peaks", ":", "for", "peak", "in", "peaks", ":", "node", "=", "nodes", "[", "peak", "[", "1", "]", "]", "detections", ".", "append", "(", "Detection", "(", "template_name", "=", "str", "(", "node", "[", "0", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "1", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "2", "]", ")", ",", "detect_time", "=", "peak", "[", "1", "]", "/", "samp_rate", ",", "no_chans", "=", "len", "(", "realstations", ")", ",", "detect_val", "=", "peak", "[", "0", "]", ",", "threshold", "=", "thresh", ",", "typeofdet", "=", "'brightness'", ",", "chans", "=", "realstations", ",", "id", "=", "str", "(", "node", "[", "0", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "1", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "2", "]", ")", "+", "str", "(", "peak", "[", "1", "]", "/", "samp_rate", ")", ",", "threshold_type", "=", "thresh_type", ",", "threshold_input", "=", "threshold", ")", ")", "else", ":", "detections", "=", "[", "]", "print", "(", "'I have found '", "+", "str", "(", "len", "(", "peaks", ")", ")", "+", "' possible detections'", ")", "return", "detections" ]
43.253968
0.000359
def _message_hostgroup_parse(self, message): """ Parse given message and return list of group names and socket information. Socket information is parsed in :meth:`.WBeaconGouverneurMessenger._message_address_parse` method :param message: bytes :return: tuple of list of group names and WIPV4SocketInfo """ splitter_count = message.count(WHostgroupBeaconMessenger.__message_groups_splitter__) if splitter_count == 0: return [], WBeaconGouverneurMessenger._message_address_parse(self, message) elif splitter_count == 1: splitter_pos = message.find(WHostgroupBeaconMessenger.__message_groups_splitter__) groups = [] group_splitter = WHostgroupBeaconMessenger.__group_splitter__ for group_name in message[(splitter_pos + 1):].split(group_splitter): groups.append(group_name.strip()) address = WBeaconGouverneurMessenger._message_address_parse(self, message[:splitter_pos]) return groups, address else: raise ValueError('Invalid message. Too many separators')
[ "def", "_message_hostgroup_parse", "(", "self", ",", "message", ")", ":", "splitter_count", "=", "message", ".", "count", "(", "WHostgroupBeaconMessenger", ".", "__message_groups_splitter__", ")", "if", "splitter_count", "==", "0", ":", "return", "[", "]", ",", "WBeaconGouverneurMessenger", ".", "_message_address_parse", "(", "self", ",", "message", ")", "elif", "splitter_count", "==", "1", ":", "splitter_pos", "=", "message", ".", "find", "(", "WHostgroupBeaconMessenger", ".", "__message_groups_splitter__", ")", "groups", "=", "[", "]", "group_splitter", "=", "WHostgroupBeaconMessenger", ".", "__group_splitter__", "for", "group_name", "in", "message", "[", "(", "splitter_pos", "+", "1", ")", ":", "]", ".", "split", "(", "group_splitter", ")", ":", "groups", ".", "append", "(", "group_name", ".", "strip", "(", ")", ")", "address", "=", "WBeaconGouverneurMessenger", ".", "_message_address_parse", "(", "self", ",", "message", "[", ":", "splitter_pos", "]", ")", "return", "groups", ",", "address", "else", ":", "raise", "ValueError", "(", "'Invalid message. Too many separators'", ")" ]
49.1
0.024975
def get_env(): """ Read environment from ENV and mangle it to a (lower case) representation Note: gcdt.utils get_env() is used in many cloudformation.py templates :return: Environment as lower case string (or None if not matched) """ env = os.getenv('ENV', os.getenv('env', None)) if env: env = env.lower() return env
[ "def", "get_env", "(", ")", ":", "env", "=", "os", ".", "getenv", "(", "'ENV'", ",", "os", ".", "getenv", "(", "'env'", ",", "None", ")", ")", "if", "env", ":", "env", "=", "env", ".", "lower", "(", ")", "return", "env" ]
34.8
0.002801
def UpdateAcqEraEndDate(self, acquisition_era_name ="", end_date=0): """ Input dictionary has to have the following keys: acquisition_era_name, end_date. """ if acquisition_era_name =="" or end_date==0: dbsExceptionHandler('dbsException-invalid-input', "acquisition_era_name and end_date are required") conn = self.dbi.connection() tran = conn.begin() try: self.acqud.execute(conn, acquisition_era_name, end_date, tran) if tran:tran.commit() tran = None finally: if tran:tran.rollback() if conn:conn.close()
[ "def", "UpdateAcqEraEndDate", "(", "self", ",", "acquisition_era_name", "=", "\"\"", ",", "end_date", "=", "0", ")", ":", "if", "acquisition_era_name", "==", "\"\"", "or", "end_date", "==", "0", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "\"acquisition_era_name and end_date are required\"", ")", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "tran", "=", "conn", ".", "begin", "(", ")", "try", ":", "self", ".", "acqud", ".", "execute", "(", "conn", ",", "acquisition_era_name", ",", "end_date", ",", "tran", ")", "if", "tran", ":", "tran", ".", "commit", "(", ")", "tran", "=", "None", "finally", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
39.75
0.018433
def store_policy(self, pol_id, policy): """Store the policy. Policy is maintained as a dictionary of pol ID. """ if pol_id not in self.policies: self.policies[pol_id] = policy self.policy_cnt += 1
[ "def", "store_policy", "(", "self", ",", "pol_id", ",", "policy", ")", ":", "if", "pol_id", "not", "in", "self", ".", "policies", ":", "self", ".", "policies", "[", "pol_id", "]", "=", "policy", "self", ".", "policy_cnt", "+=", "1" ]
30.75
0.007905
def InitMapping(self, values): """Initializes with a map from value to probability. values: map from value to probability """ for value, prob in values.iteritems(): self.Set(value, prob)
[ "def", "InitMapping", "(", "self", ",", "values", ")", ":", "for", "value", ",", "prob", "in", "values", ".", "iteritems", "(", ")", ":", "self", ".", "Set", "(", "value", ",", "prob", ")" ]
32.142857
0.008658
def compare_networks(self, other): """Compare two IP objects. This is only concerned about the comparison of the integer representation of the network addresses. This means that the host bits aren't considered at all in this method. If you want to compare host bits, you can easily enough do a 'HostA._ip < HostB._ip' Args: other: An IP object. Returns: If the IP versions of self and other are the same, returns: -1 if self < other: eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24') IPv6('1080::200C:417A') < IPv6('1080::200B:417B') 0 if self == other eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24') IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96') 1 if self > other eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24') IPv6('1080::1:200C:417A/112') > IPv6('1080::0:200C:417A/112') If the IP versions of self and other are different, returns: -1 if self._version < other._version eg: IPv4('10.0.0.1/24') < IPv6('::1/128') 1 if self._version > other._version eg: IPv6('::1/128') > IPv4('255.255.255.0/24') """ if self._version < other._version: return -1 if self._version > other._version: return 1 # self._version == other._version below here: if self.network < other.network: return -1 if self.network > other.network: return 1 # self.network == other.network below here: if self.netmask < other.netmask: return -1 if self.netmask > other.netmask: return 1 # self.network == other.network and self.netmask == other.netmask return 0
[ "def", "compare_networks", "(", "self", ",", "other", ")", ":", "if", "self", ".", "_version", "<", "other", ".", "_version", ":", "return", "-", "1", "if", "self", ".", "_version", ">", "other", ".", "_version", ":", "return", "1", "# self._version == other._version below here:", "if", "self", ".", "network", "<", "other", ".", "network", ":", "return", "-", "1", "if", "self", ".", "network", ">", "other", ".", "network", ":", "return", "1", "# self.network == other.network below here:", "if", "self", ".", "netmask", "<", "other", ".", "netmask", ":", "return", "-", "1", "if", "self", ".", "netmask", ">", "other", ".", "netmask", ":", "return", "1", "# self.network == other.network and self.netmask == other.netmask", "return", "0" ]
36.58
0.001065
def record_list_projects(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /record-xxxx/listProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Cloning#API-method%3A-%2Fclass-xxxx%2FlistProjects """ return DXHTTPRequest('/%s/listProjects' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "record_list_projects", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/listProjects'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
54.428571
0.010336
def add_tip_labels_to_axes(self): """ Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting. """ # get tip-coords and replace if using fixed_order if self.style.orient in ("up", "down"): ypos = np.zeros(self.ntips) xpos = np.arange(self.ntips) if self.style.orient in ("right", "left"): xpos = np.zeros(self.ntips) ypos = np.arange(self.ntips) # pop fill from color dict if using color if self.style.tip_labels_colors: self.style.tip_labels_style.pop("fill") # fill anchor shift if None # (Toytrees fill this at draw() normally when tip_labels != None) if self.style.tip_labels_style["-toyplot-anchor-shift"] is None: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" # add tip names to coordinates calculated above self.axes.text( xpos, ypos, self.tip_labels, angle=(0 if self.style.orient in ("right", "left") else -90), style=self.style.tip_labels_style, color=self.style.tip_labels_colors, ) # get stroke-width for aligned tip-label lines (optional) # copy stroke-width from the edge_style unless user set it if not self.style.edge_align_style.get("stroke-width"): self.style.edge_align_style['stroke-width'] = ( self.style.edge_style['stroke-width'])
[ "def", "add_tip_labels_to_axes", "(", "self", ")", ":", "# get tip-coords and replace if using fixed_order", "if", "self", ".", "style", ".", "orient", "in", "(", "\"up\"", ",", "\"down\"", ")", ":", "ypos", "=", "np", ".", "zeros", "(", "self", ".", "ntips", ")", "xpos", "=", "np", ".", "arange", "(", "self", ".", "ntips", ")", "if", "self", ".", "style", ".", "orient", "in", "(", "\"right\"", ",", "\"left\"", ")", ":", "xpos", "=", "np", ".", "zeros", "(", "self", ".", "ntips", ")", "ypos", "=", "np", ".", "arange", "(", "self", ".", "ntips", ")", "# pop fill from color dict if using color", "if", "self", ".", "style", ".", "tip_labels_colors", ":", "self", ".", "style", ".", "tip_labels_style", ".", "pop", "(", "\"fill\"", ")", "# fill anchor shift if None ", "# (Toytrees fill this at draw() normally when tip_labels != None)", "if", "self", ".", "style", ".", "tip_labels_style", "[", "\"-toyplot-anchor-shift\"", "]", "is", "None", ":", "self", ".", "style", ".", "tip_labels_style", "[", "\"-toyplot-anchor-shift\"", "]", "=", "\"15px\"", "# add tip names to coordinates calculated above", "self", ".", "axes", ".", "text", "(", "xpos", ",", "ypos", ",", "self", ".", "tip_labels", ",", "angle", "=", "(", "0", "if", "self", ".", "style", ".", "orient", "in", "(", "\"right\"", ",", "\"left\"", ")", "else", "-", "90", ")", ",", "style", "=", "self", ".", "style", ".", "tip_labels_style", ",", "color", "=", "self", ".", "style", ".", "tip_labels_colors", ",", ")", "# get stroke-width for aligned tip-label lines (optional)", "# copy stroke-width from the edge_style unless user set it", "if", "not", "self", ".", "style", ".", "edge_align_style", ".", "get", "(", "\"stroke-width\"", ")", ":", "self", ".", "style", ".", "edge_align_style", "[", "'stroke-width'", "]", "=", "(", "self", ".", "style", ".", "edge_style", "[", "'stroke-width'", "]", ")" ]
41.27027
0.003199
def clear_rtag(opts): ''' Remove the rtag file ''' try: os.remove(rtag(opts)) except OSError as exc: if exc.errno != errno.ENOENT: # Using __str__() here to get the fully-formatted error message # (error number, error message, path) log.warning('Encountered error removing rtag: %s', exc.__str__())
[ "def", "clear_rtag", "(", "opts", ")", ":", "try", ":", "os", ".", "remove", "(", "rtag", "(", "opts", ")", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "# Using __str__() here to get the fully-formatted error message", "# (error number, error message, path)", "log", ".", "warning", "(", "'Encountered error removing rtag: %s'", ",", "exc", ".", "__str__", "(", ")", ")" ]
32.727273
0.002703
def setbit(self, name, offset, value): """ Flag the ``offset`` in the key as ``value``. Returns a boolean indicating the previous value of ``offset``. :param name: str the name of the redis key :param offset: int :param value: :return: Future() """ with self.pipe as pipe: return pipe.setbit(self.redis_key(name), offset, value)
[ "def", "setbit", "(", "self", ",", "name", ",", "offset", ",", "value", ")", ":", "with", "self", ".", "pipe", "as", "pipe", ":", "return", "pipe", ".", "setbit", "(", "self", ".", "redis_key", "(", "name", ")", ",", "offset", ",", "value", ")" ]
33.916667
0.004785
def _overwrite(self, filename, func, force=False): """Overwrite a file with the specified contents. Write times are tracked, too-frequent overwrites are skipped, for performance reasons. :param filename: The path under the html dir to write to. :param func: A no-arg function that returns the contents to write. :param force: Whether to force a write now, regardless of the last overwrite time. """ now = int(time.time() * 1000) last_overwrite_time = self._last_overwrite_time.get(filename) or now # Overwrite only once per second. if (now - last_overwrite_time >= 1000) or force: if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all. with open(os.path.join(self._html_dir, filename), 'w') as f: f.write(func()) self._last_overwrite_time[filename] = now
[ "def", "_overwrite", "(", "self", ",", "filename", ",", "func", ",", "force", "=", "False", ")", ":", "now", "=", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "last_overwrite_time", "=", "self", ".", "_last_overwrite_time", ".", "get", "(", "filename", ")", "or", "now", "# Overwrite only once per second.", "if", "(", "now", "-", "last_overwrite_time", ">=", "1000", ")", "or", "force", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_html_dir", ")", ":", "# Make sure we're not immediately after a clean-all.", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_html_dir", ",", "filename", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "func", "(", ")", ")", "self", ".", "_last_overwrite_time", "[", "filename", "]", "=", "now" ]
49.647059
0.00814
def compute(self): """ This method can be used for computing one MaxSAT solution, i.e. for computing an assignment satisfying all hard clauses of the input formula and maximizing the sum of weights of satisfied soft clauses. It is a wrapper for the internal :func:`compute_` method, which does the job, followed by the model extraction. Note that the method returns ``None`` if no MaxSAT model exists. The method can be called multiple times, each being followed by blocking the last model. This way one can enumerate top-:math:`k` MaxSAT solutions (this can also be done by calling :meth:`enumerate()`). :returns: a MaxSAT model :rtype: list(int) .. code-block:: python >>> from pysat.examples.rc2 import RC2 >>> from pysat.formula import WCNF >>> >>> rc2 = RC2(WCNF()) # passing an empty WCNF() formula >>> rc2.add_clause([-1, -2]) >>> rc2.add_clause([-1, -3]) >>> rc2.add_clause([-2, -3]) >>> >>> rc2.add_clause([1], weight=1) >>> rc2.add_clause([2], weight=1) >>> rc2.add_clause([3], weight=1) >>> >>> model = rc2.compute() >>> print model [-1, -2, 3] >>> print rc2.cost 2 >>> rc2.delete() """ # simply apply MaxSAT only once res = self.compute_() if res: # extracting a model self.model = self.oracle.get_model() self.model = filter(lambda l: abs(l) in self.vmap.i2e, self.model) self.model = map(lambda l: int(copysign(self.vmap.i2e[abs(l)], l)), self.model) self.model = sorted(self.model, key=lambda l: abs(l)) return self.model
[ "def", "compute", "(", "self", ")", ":", "# simply apply MaxSAT only once", "res", "=", "self", ".", "compute_", "(", ")", "if", "res", ":", "# extracting a model", "self", ".", "model", "=", "self", ".", "oracle", ".", "get_model", "(", ")", "self", ".", "model", "=", "filter", "(", "lambda", "l", ":", "abs", "(", "l", ")", "in", "self", ".", "vmap", ".", "i2e", ",", "self", ".", "model", ")", "self", ".", "model", "=", "map", "(", "lambda", "l", ":", "int", "(", "copysign", "(", "self", ".", "vmap", ".", "i2e", "[", "abs", "(", "l", ")", "]", ",", "l", ")", ")", ",", "self", ".", "model", ")", "self", ".", "model", "=", "sorted", "(", "self", ".", "model", ",", "key", "=", "lambda", "l", ":", "abs", "(", "l", ")", ")", "return", "self", ".", "model" ]
38.470588
0.001491
def get_doc_id(document_pb, expected_prefix): """Parse a document ID from a document protobuf. Args: document_pb (google.cloud.proto.firestore.v1beta1.\ document_pb2.Document): A protobuf for a document that was created in a ``CreateDocument`` RPC. expected_prefix (str): The expected collection prefix for the fully-qualified document name. Returns: str: The document ID from the protobuf. Raises: ValueError: If the name does not begin with the prefix. """ prefix, document_id = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1) if prefix != expected_prefix: raise ValueError( "Unexpected document name", document_pb.name, "Expected to begin with", expected_prefix, ) return document_id
[ "def", "get_doc_id", "(", "document_pb", ",", "expected_prefix", ")", ":", "prefix", ",", "document_id", "=", "document_pb", ".", "name", ".", "rsplit", "(", "DOCUMENT_PATH_DELIMITER", ",", "1", ")", "if", "prefix", "!=", "expected_prefix", ":", "raise", "ValueError", "(", "\"Unexpected document name\"", ",", "document_pb", ".", "name", ",", "\"Expected to begin with\"", ",", "expected_prefix", ",", ")", "return", "document_id" ]
32.038462
0.001166
def run(self, background=False): """ Method to start the worker. Parameters ---------- background: bool If set to False (Default). the worker is executed in the current thread. If True, a new daemon thread is created that runs the worker. This is useful in a single worker scenario/when the compute function only simulates work. """ if background: self.worker_id += str(threading.get_ident()) self.thread = threading.Thread(target=self._run, name='worker %s thread'%self.worker_id) self.thread.daemon=True self.thread.start() else: self._run()
[ "def", "run", "(", "self", ",", "background", "=", "False", ")", ":", "if", "background", ":", "self", ".", "worker_id", "+=", "str", "(", "threading", ".", "get_ident", "(", ")", ")", "self", ".", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run", ",", "name", "=", "'worker %s thread'", "%", "self", ".", "worker_id", ")", "self", ".", "thread", ".", "daemon", "=", "True", "self", ".", "thread", ".", "start", "(", ")", "else", ":", "self", ".", "_run", "(", ")" ]
30.315789
0.040404
def _prepare_if_args(stmt): """Removes parentheses from an "if" directive's arguments""" args = stmt['args'] if args and args[0].startswith('(') and args[-1].endswith(')'): args[0] = args[0][1:].lstrip() args[-1] = args[-1][:-1].rstrip() start = int(not args[0]) end = len(args) - int(not args[-1]) args[:] = args[start:end]
[ "def", "_prepare_if_args", "(", "stmt", ")", ":", "args", "=", "stmt", "[", "'args'", "]", "if", "args", "and", "args", "[", "0", "]", ".", "startswith", "(", "'('", ")", "and", "args", "[", "-", "1", "]", ".", "endswith", "(", "')'", ")", ":", "args", "[", "0", "]", "=", "args", "[", "0", "]", "[", "1", ":", "]", ".", "lstrip", "(", ")", "args", "[", "-", "1", "]", "=", "args", "[", "-", "1", "]", "[", ":", "-", "1", "]", ".", "rstrip", "(", ")", "start", "=", "int", "(", "not", "args", "[", "0", "]", ")", "end", "=", "len", "(", "args", ")", "-", "int", "(", "not", "args", "[", "-", "1", "]", ")", "args", "[", ":", "]", "=", "args", "[", "start", ":", "end", "]" ]
40.888889
0.00266
def size(self): """Calculate and return the file size in bytes.""" old = self.__file.tell() # old position self.__file.seek(0, 2) # end of file n_bytes = self.__file.tell() # file size in bytes self.__file.seek(old) # back to old position return n_bytes
[ "def", "size", "(", "self", ")", ":", "old", "=", "self", ".", "__file", ".", "tell", "(", ")", "# old position", "self", ".", "__file", ".", "seek", "(", "0", ",", "2", ")", "# end of file", "n_bytes", "=", "self", ".", "__file", ".", "tell", "(", ")", "# file size in bytes", "self", ".", "__file", ".", "seek", "(", "old", ")", "# back to old position", "return", "n_bytes" ]
32.4
0.006006
def parse_logical_form(self, logical_form: str, remove_var_function: bool = True) -> Expression: """ Takes a logical form as a string, maps its tokens using the mapping and returns a parsed expression. Parameters ---------- logical_form : ``str`` Logical form to parse remove_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the usage of a variable. If your language uses it, and you do not want to include it in the parsed expression, set this flag. You may want to do this if you are generating an action sequence from this parsed expression, because it is easier to let the decoder not produce this function due to the way constrained decoding is currently implemented. """ if not logical_form.startswith("("): logical_form = f"({logical_form})" if remove_var_function: # Replace "(x)" with "x" logical_form = re.sub(r'\(([x-z])\)', r'\1', logical_form) # Replace "(var x)" with "(x)" logical_form = re.sub(r'\(var ([x-z])\)', r'(\1)', logical_form) parsed_lisp = semparse_util.lisp_to_nested_expression(logical_form) translated_string = self._process_nested_expression(parsed_lisp) type_signature = self.local_type_signatures.copy() type_signature.update(self.global_type_signatures) return self._logic_parser.parse(translated_string, signature=type_signature)
[ "def", "parse_logical_form", "(", "self", ",", "logical_form", ":", "str", ",", "remove_var_function", ":", "bool", "=", "True", ")", "->", "Expression", ":", "if", "not", "logical_form", ".", "startswith", "(", "\"(\"", ")", ":", "logical_form", "=", "f\"({logical_form})\"", "if", "remove_var_function", ":", "# Replace \"(x)\" with \"x\"", "logical_form", "=", "re", ".", "sub", "(", "r'\\(([x-z])\\)'", ",", "r'\\1'", ",", "logical_form", ")", "# Replace \"(var x)\" with \"(x)\"", "logical_form", "=", "re", ".", "sub", "(", "r'\\(var ([x-z])\\)'", ",", "r'(\\1)'", ",", "logical_form", ")", "parsed_lisp", "=", "semparse_util", ".", "lisp_to_nested_expression", "(", "logical_form", ")", "translated_string", "=", "self", ".", "_process_nested_expression", "(", "parsed_lisp", ")", "type_signature", "=", "self", ".", "local_type_signatures", ".", "copy", "(", ")", "type_signature", ".", "update", "(", "self", ".", "global_type_signatures", ")", "return", "self", ".", "_logic_parser", ".", "parse", "(", "translated_string", ",", "signature", "=", "type_signature", ")" ]
54.066667
0.006663
def __expand_meta_datas(meta_datas, meta_datas_expanded): """ expand meta_datas to one level Args: meta_datas (dict/list): maybe in nested format Returns: list: expanded list in one level Examples: >>> meta_datas = [ [ dict1, dict2 ], dict3 ] >>> meta_datas_expanded = [] >>> __expand_meta_datas(meta_datas, meta_datas_expanded) >>> print(meta_datas_expanded) [dict1, dict2, dict3] """ if isinstance(meta_datas, dict): meta_datas_expanded.append(meta_datas) elif isinstance(meta_datas, list): for meta_data in meta_datas: __expand_meta_datas(meta_data, meta_datas_expanded)
[ "def", "__expand_meta_datas", "(", "meta_datas", ",", "meta_datas_expanded", ")", ":", "if", "isinstance", "(", "meta_datas", ",", "dict", ")", ":", "meta_datas_expanded", ".", "append", "(", "meta_datas", ")", "elif", "isinstance", "(", "meta_datas", ",", "list", ")", ":", "for", "meta_data", "in", "meta_datas", ":", "__expand_meta_datas", "(", "meta_data", ",", "meta_datas_expanded", ")" ]
27.357143
0.001261
def server_bind(self): """ Called by constructor to bind the socket. """ if self.allow_reuse_address: self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(self.server_address)
[ "def", "server_bind", "(", "self", ")", ":", "if", "self", ".", "allow_reuse_address", ":", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "self", ".", "socket", ".", "bind", "(", "self", ".", "server_address", ")" ]
31.375
0.007752
def undelay(self): '''resolves all delayed arguments''' i = 0 while i < len(self): op = self[i] i += 1 if hasattr(op, 'arg1'): if isinstance(op.arg1,DelayedArg): op.arg1 = op.arg1.resolve() if isinstance(op.arg1,CodeBlock): op.arg1.undelay()
[ "def", "undelay", "(", "self", ")", ":", "i", "=", "0", "while", "i", "<", "len", "(", "self", ")", ":", "op", "=", "self", "[", "i", "]", "i", "+=", "1", "if", "hasattr", "(", "op", ",", "'arg1'", ")", ":", "if", "isinstance", "(", "op", ".", "arg1", ",", "DelayedArg", ")", ":", "op", ".", "arg1", "=", "op", ".", "arg1", ".", "resolve", "(", ")", "if", "isinstance", "(", "op", ".", "arg1", ",", "CodeBlock", ")", ":", "op", ".", "arg1", ".", "undelay", "(", ")" ]
33.909091
0.010444
def choice(self, question, choices, default=None, attempts=None, multiple=False): """ Give the user a single choice from an list of answers. """ question = ChoiceQuestion(question, choices, default) question.set_max_attempts(attempts) question.set_multi_select(multiple) return self._io.ask_question(question)
[ "def", "choice", "(", "self", ",", "question", ",", "choices", ",", "default", "=", "None", ",", "attempts", "=", "None", ",", "multiple", "=", "False", ")", ":", "question", "=", "ChoiceQuestion", "(", "question", ",", "choices", ",", "default", ")", "question", ".", "set_max_attempts", "(", "attempts", ")", "question", ".", "set_multi_select", "(", "multiple", ")", "return", "self", ".", "_io", ".", "ask_question", "(", "question", ")" ]
35.8
0.008174
def _return_response_and_status_code(response, json_results=True): """ Output the requests response content or content as json and status code :rtype : dict :param response: requests response object :param json_results: Should return JSON or raw content :return: dict containing the response content and/or the status code with error string. """ if response.status_code == requests.codes.ok: return dict(results=response.json() if json_results else response.content, response_code=response.status_code) elif response.status_code == 400: return dict( error='package sent is either malformed or not within the past 24 hours.', response_code=response.status_code) elif response.status_code == 204: return dict( error='You exceeded the public API request rate limit (4 requests of any nature per minute)', response_code=response.status_code) elif response.status_code == 403: return dict( error='You tried to perform calls to functions for which you require a Private API key.', response_code=response.status_code) elif response.status_code == 404: return dict(error='File not found.', response_code=response.status_code) else: return dict(response_code=response.status_code)
[ "def", "_return_response_and_status_code", "(", "response", ",", "json_results", "=", "True", ")", ":", "if", "response", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "return", "dict", "(", "results", "=", "response", ".", "json", "(", ")", "if", "json_results", "else", "response", ".", "content", ",", "response_code", "=", "response", ".", "status_code", ")", "elif", "response", ".", "status_code", "==", "400", ":", "return", "dict", "(", "error", "=", "'package sent is either malformed or not within the past 24 hours.'", ",", "response_code", "=", "response", ".", "status_code", ")", "elif", "response", ".", "status_code", "==", "204", ":", "return", "dict", "(", "error", "=", "'You exceeded the public API request rate limit (4 requests of any nature per minute)'", ",", "response_code", "=", "response", ".", "status_code", ")", "elif", "response", ".", "status_code", "==", "403", ":", "return", "dict", "(", "error", "=", "'You tried to perform calls to functions for which you require a Private API key.'", ",", "response_code", "=", "response", ".", "status_code", ")", "elif", "response", ".", "status_code", "==", "404", ":", "return", "dict", "(", "error", "=", "'File not found.'", ",", "response_code", "=", "response", ".", "status_code", ")", "else", ":", "return", "dict", "(", "response_code", "=", "response", ".", "status_code", ")" ]
50.884615
0.005193
def serialize_dict(self, attr, dict_type, **kwargs): """Serialize a dictionary of objects. :param dict attr: Object to be serialized. :param str dict_type: Type of object in the dictionary. :param bool required: Whether the objects in the dictionary must not be None or empty. :rtype: dict """ serialization_ctxt = kwargs.get("serialization_ctxt", {}) serialized = {} for key, value in attr.items(): try: serialized[self.serialize_unicode(key)] = self.serialize_data( value, dict_type, **kwargs) except ValueError: serialized[self.serialize_unicode(key)] = None if 'xml' in serialization_ctxt: # XML serialization is more complicated xml_desc = serialization_ctxt['xml'] xml_name = xml_desc['name'] final_result = _create_xml_node( xml_name, xml_desc.get('prefix', None), xml_desc.get('ns', None) ) for key, value in serialized.items(): ET.SubElement(final_result, key).text = value return final_result return serialized
[ "def", "serialize_dict", "(", "self", ",", "attr", ",", "dict_type", ",", "*", "*", "kwargs", ")", ":", "serialization_ctxt", "=", "kwargs", ".", "get", "(", "\"serialization_ctxt\"", ",", "{", "}", ")", "serialized", "=", "{", "}", "for", "key", ",", "value", "in", "attr", ".", "items", "(", ")", ":", "try", ":", "serialized", "[", "self", ".", "serialize_unicode", "(", "key", ")", "]", "=", "self", ".", "serialize_data", "(", "value", ",", "dict_type", ",", "*", "*", "kwargs", ")", "except", "ValueError", ":", "serialized", "[", "self", ".", "serialize_unicode", "(", "key", ")", "]", "=", "None", "if", "'xml'", "in", "serialization_ctxt", ":", "# XML serialization is more complicated", "xml_desc", "=", "serialization_ctxt", "[", "'xml'", "]", "xml_name", "=", "xml_desc", "[", "'name'", "]", "final_result", "=", "_create_xml_node", "(", "xml_name", ",", "xml_desc", ".", "get", "(", "'prefix'", ",", "None", ")", ",", "xml_desc", ".", "get", "(", "'ns'", ",", "None", ")", ")", "for", "key", ",", "value", "in", "serialized", ".", "items", "(", ")", ":", "ET", ".", "SubElement", "(", "final_result", ",", "key", ")", ".", "text", "=", "value", "return", "final_result", "return", "serialized" ]
36.757576
0.001606
def disable_host_event_handler(self, host): """Disable event handlers for a host Format of the line that triggers function call:: DISABLE_HOST_EVENT_HANDLER;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.event_handler_enabled: host.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value host.event_handler_enabled = False self.send_an_element(host.get_update_status_brok())
[ "def", "disable_host_event_handler", "(", "self", ",", "host", ")", ":", "if", "host", ".", "event_handler_enabled", ":", "host", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_EVENT_HANDLER_ENABLED\"", "]", ".", "value", "host", ".", "event_handler_enabled", "=", "False", "self", ".", "send_an_element", "(", "host", ".", "get_update_status_brok", "(", ")", ")" ]
38.357143
0.005455
def make_wheelfile_inner(base_name, base_dir='.'): """Create a whl file from all the files under 'base_dir'. Places .dist-info at the end of the archive.""" zip_filename = base_name + ".whl" log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) # XXX support bz2, xz when available zip = zipfile.ZipFile(open(zip_filename, "wb+"), "w", compression=zipfile.ZIP_DEFLATED) score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3} deferred = [] def writefile(path): zip.write(path, path) log.info("adding '%s'" % path) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): if dirpath.endswith('.dist-info'): deferred.append((score.get(name, 0), path)) else: writefile(path) deferred.sort() for score, path in deferred: writefile(path) zip.close() return zip_filename
[ "def", "make_wheelfile_inner", "(", "base_name", ",", "base_dir", "=", "'.'", ")", ":", "zip_filename", "=", "base_name", "+", "\".whl\"", "log", ".", "info", "(", "\"creating '%s' and adding '%s' to it\"", ",", "zip_filename", ",", "base_dir", ")", "# XXX support bz2, xz when available", "zip", "=", "zipfile", ".", "ZipFile", "(", "open", "(", "zip_filename", ",", "\"wb+\"", ")", ",", "\"w\"", ",", "compression", "=", "zipfile", ".", "ZIP_DEFLATED", ")", "score", "=", "{", "'WHEEL'", ":", "1", ",", "'METADATA'", ":", "2", ",", "'RECORD'", ":", "3", "}", "deferred", "=", "[", "]", "def", "writefile", "(", "path", ")", ":", "zip", ".", "write", "(", "path", ",", "path", ")", "log", ".", "info", "(", "\"adding '%s'\"", "%", "path", ")", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "base_dir", ")", ":", "for", "name", "in", "filenames", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "name", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "if", "dirpath", ".", "endswith", "(", "'.dist-info'", ")", ":", "deferred", ".", "append", "(", "(", "score", ".", "get", "(", "name", ",", "0", ")", ",", "path", ")", ")", "else", ":", "writefile", "(", "path", ")", "deferred", ".", "sort", "(", ")", "for", "score", ",", "path", "in", "deferred", ":", "writefile", "(", "path", ")", "zip", ".", "close", "(", ")", "return", "zip_filename" ]
28.621622
0.000913
def isdir(self, path, follow_symlinks=True): """Determine if path identifies a directory. Args: path: Path to filesystem object. Returns: `True` if path points to a directory (following symlinks). Raises: TypeError: if path is None. """ return self._is_of_type(path, S_IFDIR, follow_symlinks)
[ "def", "isdir", "(", "self", ",", "path", ",", "follow_symlinks", "=", "True", ")", ":", "return", "self", ".", "_is_of_type", "(", "path", ",", "S_IFDIR", ",", "follow_symlinks", ")" ]
28.230769
0.005277
def _set_cam_share(self, v, load=False): """ Setter method for cam_share, mapped from YANG variable /hardware/profile/tcam/cam_share (container) If this variable is read-only (config: false) in the source YANG file, then _set_cam_share is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cam_share() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=cam_share.cam_share, is_container='container', presence=False, yang_name="cam-share", rest_name="cam-share", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable cam-sharing for features'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cam_share must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=cam_share.cam_share, is_container='container', presence=False, yang_name="cam-share", rest_name="cam-share", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable cam-sharing for features'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""", }) self.__cam_share = t if hasattr(self, '_set'): self._set()
[ "def", "_set_cam_share", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "cam_share", ".", "cam_share", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"cam-share\"", ",", "rest_name", "=", "\"cam-share\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Enable cam-sharing for features'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-hardware'", ",", "defining_module", "=", "'brocade-hardware'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"cam_share must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=cam_share.cam_share, is_container='container', presence=False, yang_name=\"cam-share\", rest_name=\"cam-share\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable cam-sharing for features'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__cam_share", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
73.227273
0.006127
def load_module(name, globals_dict=None, symb_list=None): """Loads a Scapy module to make variables, objects and functions available globally. """ _load("scapy.modules." + name, globals_dict=globals_dict, symb_list=symb_list)
[ "def", "load_module", "(", "name", ",", "globals_dict", "=", "None", ",", "symb_list", "=", "None", ")", ":", "_load", "(", "\"scapy.modules.\"", "+", "name", ",", "globals_dict", "=", "globals_dict", ",", "symb_list", "=", "symb_list", ")" ]
35.142857
0.003968
def url_mod(url: str, new_params: dict) -> str: """ Modifies existing URL by setting/overriding specified query string parameters. Note: Does not support multiple querystring parameters with identical name. :param url: Base URL/path to modify :param new_params: Querystring parameters to set/override (dict) :return: New URL/path """ from urllib.parse import urlparse, parse_qsl, urlunparse, urlencode res = urlparse(url) query_params = dict(parse_qsl(res.query)) for k, v in new_params.items(): if v is None: query_params[str(k)] = '' else: query_params[str(k)] = str(v) parts = list(res) parts[4] = urlencode(query_params) return urlunparse(parts)
[ "def", "url_mod", "(", "url", ":", "str", ",", "new_params", ":", "dict", ")", "->", "str", ":", "from", "urllib", ".", "parse", "import", "urlparse", ",", "parse_qsl", ",", "urlunparse", ",", "urlencode", "res", "=", "urlparse", "(", "url", ")", "query_params", "=", "dict", "(", "parse_qsl", "(", "res", ".", "query", ")", ")", "for", "k", ",", "v", "in", "new_params", ".", "items", "(", ")", ":", "if", "v", "is", "None", ":", "query_params", "[", "str", "(", "k", ")", "]", "=", "''", "else", ":", "query_params", "[", "str", "(", "k", ")", "]", "=", "str", "(", "v", ")", "parts", "=", "list", "(", "res", ")", "parts", "[", "4", "]", "=", "urlencode", "(", "query_params", ")", "return", "urlunparse", "(", "parts", ")" ]
38.263158
0.002685
def _post_start(self): """Set stdout to non-blocking VLC does not always return a newline when reading status so in order to be lazy and still use the read API without caring about how much output there is we switch stdout to nonblocking mode and just read a large chunk of datin order to be lazy and still use the read API without caring about how much output there is we switch stdout to nonblocking mode and just read a large chunk of data. """ flags = fcntl.fcntl(self._process.stdout, fcntl.F_GETFL) fcntl.fcntl(self._process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
[ "def", "_post_start", "(", "self", ")", ":", "flags", "=", "fcntl", ".", "fcntl", "(", "self", ".", "_process", ".", "stdout", ",", "fcntl", ".", "F_GETFL", ")", "fcntl", ".", "fcntl", "(", "self", ".", "_process", ".", "stdout", ",", "fcntl", ".", "F_SETFL", ",", "flags", "|", "os", ".", "O_NONBLOCK", ")" ]
53.833333
0.003044
def multiplyQuats(q1, q2): """q1, q2 must be [scalar, x, y, z] but those may be arrays or scalars""" return np.array([ q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3], q1[2]*q2[3] - q2[2]*q1[3] + q1[0]*q2[1] + q2[0]*q1[1], q1[3]*q2[1] - q2[3]*q1[1] + q1[0]*q2[2] + q2[0]*q1[2], q1[1]*q2[2] - q2[1]*q1[2] + q1[0]*q2[3] + q2[0]*q1[3]])
[ "def", "multiplyQuats", "(", "q1", ",", "q2", ")", ":", "return", "np", ".", "array", "(", "[", "q1", "[", "0", "]", "*", "q2", "[", "0", "]", "-", "q1", "[", "1", "]", "*", "q2", "[", "1", "]", "-", "q1", "[", "2", "]", "*", "q2", "[", "2", "]", "-", "q1", "[", "3", "]", "*", "q2", "[", "3", "]", ",", "q1", "[", "2", "]", "*", "q2", "[", "3", "]", "-", "q2", "[", "2", "]", "*", "q1", "[", "3", "]", "+", "q1", "[", "0", "]", "*", "q2", "[", "1", "]", "+", "q2", "[", "0", "]", "*", "q1", "[", "1", "]", ",", "q1", "[", "3", "]", "*", "q2", "[", "1", "]", "-", "q2", "[", "3", "]", "*", "q1", "[", "1", "]", "+", "q1", "[", "0", "]", "*", "q2", "[", "2", "]", "+", "q2", "[", "0", "]", "*", "q1", "[", "2", "]", ",", "q1", "[", "1", "]", "*", "q2", "[", "2", "]", "-", "q2", "[", "1", "]", "*", "q1", "[", "2", "]", "+", "q1", "[", "0", "]", "*", "q2", "[", "3", "]", "+", "q2", "[", "0", "]", "*", "q1", "[", "3", "]", "]", ")" ]
55.571429
0.002532
def _update_position(self, change): """ Keep position in sync with x,y,z """ if change['type']!='update': return pt = gp_Pnt(self.x,self.y,self.z) if not pt.IsEqual(self.position,self.tolerance): self.position = pt
[ "def", "_update_position", "(", "self", ",", "change", ")", ":", "if", "change", "[", "'type'", "]", "!=", "'update'", ":", "return", "pt", "=", "gp_Pnt", "(", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "z", ")", "if", "not", "pt", ".", "IsEqual", "(", "self", ".", "position", ",", "self", ".", "tolerance", ")", ":", "self", ".", "position", "=", "pt" ]
38.571429
0.021739
def _get_nadir_pixel(earth_mask, sector): """Find the nadir pixel Args: earth_mask: Mask identifying earth and space pixels sector: Specifies the scanned sector Returns: nadir row, nadir column """ if sector == FULL_DISC: logger.debug('Computing nadir pixel') # The earth is not centered in the image, compute bounding box # of the earth disc first rmin, rmax, cmin, cmax = bbox(earth_mask) # The nadir pixel is approximately at the centre of the earth disk nadir_row = rmin + (rmax - rmin) // 2 nadir_col = cmin + (cmax - cmin) // 2 return nadir_row, nadir_col return None, None
[ "def", "_get_nadir_pixel", "(", "earth_mask", ",", "sector", ")", ":", "if", "sector", "==", "FULL_DISC", ":", "logger", ".", "debug", "(", "'Computing nadir pixel'", ")", "# The earth is not centered in the image, compute bounding box", "# of the earth disc first", "rmin", ",", "rmax", ",", "cmin", ",", "cmax", "=", "bbox", "(", "earth_mask", ")", "# The nadir pixel is approximately at the centre of the earth disk", "nadir_row", "=", "rmin", "+", "(", "rmax", "-", "rmin", ")", "//", "2", "nadir_col", "=", "cmin", "+", "(", "cmax", "-", "cmin", ")", "//", "2", "return", "nadir_row", ",", "nadir_col", "return", "None", ",", "None" ]
32.26087
0.002618
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( neutron_config.database.connection, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()
[ "def", "run_migrations_online", "(", ")", ":", "engine", "=", "create_engine", "(", "neutron_config", ".", "database", ".", "connection", ",", "poolclass", "=", "pool", ".", "NullPool", ")", "connection", "=", "engine", ".", "connect", "(", ")", "context", ".", "configure", "(", "connection", "=", "connection", ",", "target_metadata", "=", "target_metadata", ")", "try", ":", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")", "finally", ":", "connection", ".", "close", "(", ")" ]
24.761905
0.001852
def _prt_line_detail(self, prt, line, lnum=""): """Print each field and its value.""" data = zip(self.flds, line.split('\t')) txt = ["{:2}) {:13} {}".format(i, hdr, val) for i, (hdr, val) in enumerate(data)] prt.write("{LNUM}\n{TXT}\n".format(LNUM=lnum, TXT='\n'.join(txt)))
[ "def", "_prt_line_detail", "(", "self", ",", "prt", ",", "line", ",", "lnum", "=", "\"\"", ")", ":", "data", "=", "zip", "(", "self", ".", "flds", ",", "line", ".", "split", "(", "'\\t'", ")", ")", "txt", "=", "[", "\"{:2}) {:13} {}\"", ".", "format", "(", "i", ",", "hdr", ",", "val", ")", "for", "i", ",", "(", "hdr", ",", "val", ")", "in", "enumerate", "(", "data", ")", "]", "prt", ".", "write", "(", "\"{LNUM}\\n{TXT}\\n\"", ".", "format", "(", "LNUM", "=", "lnum", ",", "TXT", "=", "'\\n'", ".", "join", "(", "txt", ")", ")", ")" ]
60.4
0.009804
def get_indices(self, axis=0, index_func=None, old_blocks=None): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object. """ ErrorMessage.catch_bugs_and_request_email(not callable(index_func)) func = self.preprocess_func(index_func) if axis == 0: # We grab the first column of blocks and extract the indices # Note: We use _partitions_cache in the context of this function to make # sure that none of the partitions are modified or filtered out before we # get the index information. # DO NOT CHANGE TO self.partitions under any circumstance. new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else [] ) # This is important because sometimes we have resized the data. The new # sizes will not be valid if we are trying to compute the index on a # new object that has a different length. if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum() else: cumulative_block_lengths = np.array(self.block_lengths).cumsum() else: new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else [] ) if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum() else: cumulative_block_lengths = np.array(self.block_widths).cumsum() full_indices = new_indices[0] if len(new_indices) else new_indices if old_blocks is not None: for i in range(len(new_indices)): # If the length is 0 there is nothing to append. if i == 0 or len(new_indices[i]) == 0: continue # The try-except here is intended to catch issues where we are # trying to get a string index out of the internal index. try: append_val = new_indices[i] + cumulative_block_lengths[i - 1] except TypeError: append_val = new_indices[i] full_indices = full_indices.append(append_val) else: full_indices = full_indices.append(new_indices[1:]) return full_indices
[ "def", "get_indices", "(", "self", ",", "axis", "=", "0", ",", "index_func", "=", "None", ",", "old_blocks", "=", "None", ")", ":", "ErrorMessage", ".", "catch_bugs_and_request_email", "(", "not", "callable", "(", "index_func", ")", ")", "func", "=", "self", ".", "preprocess_func", "(", "index_func", ")", "if", "axis", "==", "0", ":", "# We grab the first column of blocks and extract the indices", "# Note: We use _partitions_cache in the context of this function to make", "# sure that none of the partitions are modified or filtered out before we", "# get the index information.", "# DO NOT CHANGE TO self.partitions under any circumstance.", "new_indices", "=", "(", "[", "idx", ".", "apply", "(", "func", ")", ".", "get", "(", ")", "for", "idx", "in", "self", ".", "_partitions_cache", ".", "T", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ".", "T", ")", "else", "[", "]", ")", "# This is important because sometimes we have resized the data. The new", "# sizes will not be valid if we are trying to compute the index on a", "# new object that has a different length.", "if", "old_blocks", "is", "not", "None", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "old_blocks", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "else", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "self", ".", "block_lengths", ")", ".", "cumsum", "(", ")", "else", ":", "new_indices", "=", "(", "[", "idx", ".", "apply", "(", "func", ")", ".", "get", "(", ")", "for", "idx", "in", "self", ".", "_partitions_cache", "[", "0", "]", "]", "if", "len", "(", "self", ".", "_partitions_cache", ")", "else", "[", "]", ")", "if", "old_blocks", "is", "not", "None", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "old_blocks", ".", "block_widths", ")", ".", "cumsum", "(", ")", "else", ":", "cumulative_block_lengths", "=", "np", ".", "array", "(", "self", ".", "block_widths", ")", ".", "cumsum", "(", ")", "full_indices", "=", "new_indices", "[", "0", "]", "if", "len", "(", "new_indices", ")", "else", "new_indices", "if", "old_blocks", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "new_indices", ")", ")", ":", "# If the length is 0 there is nothing to append.", "if", "i", "==", "0", "or", "len", "(", "new_indices", "[", "i", "]", ")", "==", "0", ":", "continue", "# The try-except here is intended to catch issues where we are", "# trying to get a string index out of the internal index.", "try", ":", "append_val", "=", "new_indices", "[", "i", "]", "+", "cumulative_block_lengths", "[", "i", "-", "1", "]", "except", "TypeError", ":", "append_val", "=", "new_indices", "[", "i", "]", "full_indices", "=", "full_indices", ".", "append", "(", "append_val", ")", "else", ":", "full_indices", "=", "full_indices", ".", "append", "(", "new_indices", "[", "1", ":", "]", ")", "return", "full_indices" ]
47.203125
0.003243
def _handle_invite(self, room_id: _RoomID, state: dict): """ Join rooms invited by whitelisted partners """ if self._stop_event.ready(): return self.log.debug('Got invite', room_id=room_id) invite_events = [ event for event in state['events'] if event['type'] == 'm.room.member' and event['content'].get('membership') == 'invite' and event['state_key'] == self._user_id ] if not invite_events: self.log.debug('Invite: no invite event found', room_id=room_id) return # there should always be one and only one invite membership event for us invite_event = invite_events[0] sender = invite_event['sender'] sender_join_events = [ event for event in state['events'] if event['type'] == 'm.room.member' and event['content'].get('membership') == 'join' and event['state_key'] == sender ] if not sender_join_events: self.log.debug('Invite: no sender join event', room_id=room_id) return # there should always be one and only one join membership event for the sender sender_join_event = sender_join_events[0] user = self._get_user(sender) user.displayname = sender_join_event['content'].get('displayname') or user.displayname peer_address = validate_userid_signature(user) if not peer_address: self.log.debug( 'Got invited to a room by invalid signed user - ignoring', room_id=room_id, user=user, ) return if not self._address_mgr.is_address_known(peer_address): self.log.debug( 'Got invited by a non-whitelisted user - ignoring', room_id=room_id, user=user, ) return join_rules_events = [ event for event in state['events'] if event['type'] == 'm.room.join_rules' ] # room privacy as seen from the event private_room: bool = False if join_rules_events: join_rules_event = join_rules_events[0] private_room = join_rules_event['content'].get('join_rule') == 'invite' # we join room and _set_room_id_for_address despite room privacy and requirements, # _get_room_ids_for_address will take care of returning only matching rooms and # _leave_unused_rooms will clear it in the future, if and when needed room: Room = None last_ex: Optional[Exception] = None retry_interval = 0.1 for _ in range(JOIN_RETRIES): try: room = self._client.join_room(room_id) except MatrixRequestError as e: last_ex = e if self._stop_event.wait(retry_interval): break retry_interval = retry_interval * 2 else: break else: assert last_ex is not None raise last_ex # re-raise if couldn't succeed in retries if not room.listeners: room.add_listener(self._handle_message, 'm.room.message') # room state may not populated yet, so we populate 'invite_only' from event room.invite_only = private_room self._set_room_id_for_address(address=peer_address, room_id=room_id) self.log.debug( 'Joined from invite', room_id=room_id, aliases=room.aliases, peer=to_checksum_address(peer_address), )
[ "def", "_handle_invite", "(", "self", ",", "room_id", ":", "_RoomID", ",", "state", ":", "dict", ")", ":", "if", "self", ".", "_stop_event", ".", "ready", "(", ")", ":", "return", "self", ".", "log", ".", "debug", "(", "'Got invite'", ",", "room_id", "=", "room_id", ")", "invite_events", "=", "[", "event", "for", "event", "in", "state", "[", "'events'", "]", "if", "event", "[", "'type'", "]", "==", "'m.room.member'", "and", "event", "[", "'content'", "]", ".", "get", "(", "'membership'", ")", "==", "'invite'", "and", "event", "[", "'state_key'", "]", "==", "self", ".", "_user_id", "]", "if", "not", "invite_events", ":", "self", ".", "log", ".", "debug", "(", "'Invite: no invite event found'", ",", "room_id", "=", "room_id", ")", "return", "# there should always be one and only one invite membership event for us", "invite_event", "=", "invite_events", "[", "0", "]", "sender", "=", "invite_event", "[", "'sender'", "]", "sender_join_events", "=", "[", "event", "for", "event", "in", "state", "[", "'events'", "]", "if", "event", "[", "'type'", "]", "==", "'m.room.member'", "and", "event", "[", "'content'", "]", ".", "get", "(", "'membership'", ")", "==", "'join'", "and", "event", "[", "'state_key'", "]", "==", "sender", "]", "if", "not", "sender_join_events", ":", "self", ".", "log", ".", "debug", "(", "'Invite: no sender join event'", ",", "room_id", "=", "room_id", ")", "return", "# there should always be one and only one join membership event for the sender", "sender_join_event", "=", "sender_join_events", "[", "0", "]", "user", "=", "self", ".", "_get_user", "(", "sender", ")", "user", ".", "displayname", "=", "sender_join_event", "[", "'content'", "]", ".", "get", "(", "'displayname'", ")", "or", "user", ".", "displayname", "peer_address", "=", "validate_userid_signature", "(", "user", ")", "if", "not", "peer_address", ":", "self", ".", "log", ".", "debug", "(", "'Got invited to a room by invalid signed user - ignoring'", ",", "room_id", "=", "room_id", ",", "user", "=", "user", ",", ")", "return", "if", "not", "self", ".", "_address_mgr", ".", "is_address_known", "(", "peer_address", ")", ":", "self", ".", "log", ".", "debug", "(", "'Got invited by a non-whitelisted user - ignoring'", ",", "room_id", "=", "room_id", ",", "user", "=", "user", ",", ")", "return", "join_rules_events", "=", "[", "event", "for", "event", "in", "state", "[", "'events'", "]", "if", "event", "[", "'type'", "]", "==", "'m.room.join_rules'", "]", "# room privacy as seen from the event", "private_room", ":", "bool", "=", "False", "if", "join_rules_events", ":", "join_rules_event", "=", "join_rules_events", "[", "0", "]", "private_room", "=", "join_rules_event", "[", "'content'", "]", ".", "get", "(", "'join_rule'", ")", "==", "'invite'", "# we join room and _set_room_id_for_address despite room privacy and requirements,", "# _get_room_ids_for_address will take care of returning only matching rooms and", "# _leave_unused_rooms will clear it in the future, if and when needed", "room", ":", "Room", "=", "None", "last_ex", ":", "Optional", "[", "Exception", "]", "=", "None", "retry_interval", "=", "0.1", "for", "_", "in", "range", "(", "JOIN_RETRIES", ")", ":", "try", ":", "room", "=", "self", ".", "_client", ".", "join_room", "(", "room_id", ")", "except", "MatrixRequestError", "as", "e", ":", "last_ex", "=", "e", "if", "self", ".", "_stop_event", ".", "wait", "(", "retry_interval", ")", ":", "break", "retry_interval", "=", "retry_interval", "*", "2", "else", ":", "break", "else", ":", "assert", "last_ex", "is", "not", "None", "raise", "last_ex", "# re-raise if couldn't succeed in retries", "if", "not", "room", ".", "listeners", ":", "room", ".", "add_listener", "(", "self", ".", "_handle_message", ",", "'m.room.message'", ")", "# room state may not populated yet, so we populate 'invite_only' from event", "room", ".", "invite_only", "=", "private_room", "self", ".", "_set_room_id_for_address", "(", "address", "=", "peer_address", ",", "room_id", "=", "room_id", ")", "self", ".", "log", ".", "debug", "(", "'Joined from invite'", ",", "room_id", "=", "room_id", ",", "aliases", "=", "room", ".", "aliases", ",", "peer", "=", "to_checksum_address", "(", "peer_address", ")", ",", ")" ]
37.28125
0.00245
def invert(self): """Inverts the layer. """ alpha = self.img.split()[3] self.img = self.img.convert("RGB") self.img = ImageOps.invert(self.img) self.img = self.img.convert("RGBA") self.img.putalpha(alpha)
[ "def", "invert", "(", "self", ")", ":", "alpha", "=", "self", ".", "img", ".", "split", "(", ")", "[", "3", "]", "self", ".", "img", "=", "self", ".", "img", ".", "convert", "(", "\"RGB\"", ")", "self", ".", "img", "=", "ImageOps", ".", "invert", "(", "self", ".", "img", ")", "self", ".", "img", "=", "self", ".", "img", ".", "convert", "(", "\"RGBA\"", ")", "self", ".", "img", ".", "putalpha", "(", "alpha", ")" ]
24.090909
0.018182
def path_url(self): """Build the path URL to use.""" url = [] p = urlsplit(self.full_url) # Proxies use full URLs. if p.scheme in self.proxies: return self.full_url path = p.path if not path: path = '/' url.append(path) query = p.query if query: url.append('?') url.append(query) return ''.join(url)
[ "def", "path_url", "(", "self", ")", ":", "url", "=", "[", "]", "p", "=", "urlsplit", "(", "self", ".", "full_url", ")", "# Proxies use full URLs.", "if", "p", ".", "scheme", "in", "self", ".", "proxies", ":", "return", "self", ".", "full_url", "path", "=", "p", ".", "path", "if", "not", "path", ":", "path", "=", "'/'", "url", ".", "append", "(", "path", ")", "query", "=", "p", ".", "query", "if", "query", ":", "url", ".", "append", "(", "'?'", ")", "url", ".", "append", "(", "query", ")", "return", "''", ".", "join", "(", "url", ")" ]
19.090909
0.004535
def thread_exists(self, thread_id): """Check if a thread exists or has 404'd. Args: thread_id (int): Thread ID Returns: bool: Whether the given thread exists on this board. """ return self._requests_session.head( self._url.thread_api_url( thread_id=thread_id ) ).ok
[ "def", "thread_exists", "(", "self", ",", "thread_id", ")", ":", "return", "self", ".", "_requests_session", ".", "head", "(", "self", ".", "_url", ".", "thread_api_url", "(", "thread_id", "=", "thread_id", ")", ")", ".", "ok" ]
26.428571
0.005222
def write_numpy_to_dense_tensor(file, array, labels=None): """Writes a numpy array to a dense tensor""" # Validate shape of array and labels, resolve array and label types if not len(array.shape) == 2: raise ValueError("Array must be a Matrix") if labels is not None: if not len(labels.shape) == 1: raise ValueError("Labels must be a Vector") if labels.shape[0] not in array.shape: raise ValueError("Label shape {} not compatible with array shape {}".format( labels.shape, array.shape)) resolved_label_type = _resolve_type(labels.dtype) resolved_type = _resolve_type(array.dtype) # Write each vector in array into a Record in the file object record = Record() for index, vector in enumerate(array): record.Clear() _write_feature_tensor(resolved_type, record, vector) if labels is not None: _write_label_tensor(resolved_label_type, record, labels[index]) _write_recordio(file, record.SerializeToString())
[ "def", "write_numpy_to_dense_tensor", "(", "file", ",", "array", ",", "labels", "=", "None", ")", ":", "# Validate shape of array and labels, resolve array and label types", "if", "not", "len", "(", "array", ".", "shape", ")", "==", "2", ":", "raise", "ValueError", "(", "\"Array must be a Matrix\"", ")", "if", "labels", "is", "not", "None", ":", "if", "not", "len", "(", "labels", ".", "shape", ")", "==", "1", ":", "raise", "ValueError", "(", "\"Labels must be a Vector\"", ")", "if", "labels", ".", "shape", "[", "0", "]", "not", "in", "array", ".", "shape", ":", "raise", "ValueError", "(", "\"Label shape {} not compatible with array shape {}\"", ".", "format", "(", "labels", ".", "shape", ",", "array", ".", "shape", ")", ")", "resolved_label_type", "=", "_resolve_type", "(", "labels", ".", "dtype", ")", "resolved_type", "=", "_resolve_type", "(", "array", ".", "dtype", ")", "# Write each vector in array into a Record in the file object", "record", "=", "Record", "(", ")", "for", "index", ",", "vector", "in", "enumerate", "(", "array", ")", ":", "record", ".", "Clear", "(", ")", "_write_feature_tensor", "(", "resolved_type", ",", "record", ",", "vector", ")", "if", "labels", "is", "not", "None", ":", "_write_label_tensor", "(", "resolved_label_type", ",", "record", ",", "labels", "[", "index", "]", ")", "_write_recordio", "(", "file", ",", "record", ".", "SerializeToString", "(", ")", ")" ]
45.391304
0.001876
def to_geojson(products): """Return the products from a query response as a GeoJSON with the values in their appropriate Python types. """ feature_list = [] for i, (product_id, props) in enumerate(products.items()): props = props.copy() props['id'] = product_id poly = geomet.wkt.loads(props['footprint']) del props['footprint'] del props['gmlfootprint'] # Fix "'datetime' is not JSON serializable" for k, v in props.items(): if isinstance(v, (date, datetime)): props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ') feature_list.append( geojson.Feature(geometry=poly, id=i, properties=props) ) return geojson.FeatureCollection(feature_list)
[ "def", "to_geojson", "(", "products", ")", ":", "feature_list", "=", "[", "]", "for", "i", ",", "(", "product_id", ",", "props", ")", "in", "enumerate", "(", "products", ".", "items", "(", ")", ")", ":", "props", "=", "props", ".", "copy", "(", ")", "props", "[", "'id'", "]", "=", "product_id", "poly", "=", "geomet", ".", "wkt", ".", "loads", "(", "props", "[", "'footprint'", "]", ")", "del", "props", "[", "'footprint'", "]", "del", "props", "[", "'gmlfootprint'", "]", "# Fix \"'datetime' is not JSON serializable\"", "for", "k", ",", "v", "in", "props", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "(", "date", ",", "datetime", ")", ")", ":", "props", "[", "k", "]", "=", "v", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", "feature_list", ".", "append", "(", "geojson", ".", "Feature", "(", "geometry", "=", "poly", ",", "id", "=", "i", ",", "properties", "=", "props", ")", ")", "return", "geojson", ".", "FeatureCollection", "(", "feature_list", ")" ]
43.315789
0.003567
def get_loaded_project(self, project_id): """ Returns a project or raise a 404 error. If project is not finished to load wait for it """ project = self.get_project(project_id) yield from project.wait_loaded() return project
[ "def", "get_loaded_project", "(", "self", ",", "project_id", ")", ":", "project", "=", "self", ".", "get_project", "(", "project_id", ")", "yield", "from", "project", ".", "wait_loaded", "(", ")", "return", "project" ]
30.222222
0.007143
def check_modpath_has_init(path, mod_path): """check there are some __init__.py all along the way""" modpath = [] for part in mod_path: modpath.append(part) path = os.path.join(path, part) if not _has_init(path): old_namespace = util.is_namespace(".".join(modpath)) if not old_namespace: return False return True
[ "def", "check_modpath_has_init", "(", "path", ",", "mod_path", ")", ":", "modpath", "=", "[", "]", "for", "part", "in", "mod_path", ":", "modpath", ".", "append", "(", "part", ")", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "part", ")", "if", "not", "_has_init", "(", "path", ")", ":", "old_namespace", "=", "util", ".", "is_namespace", "(", "\".\"", ".", "join", "(", "modpath", ")", ")", "if", "not", "old_namespace", ":", "return", "False", "return", "True" ]
34.727273
0.002551
def info_player(self,pid): '''' Get info football player using a ID @return: [name,position,team,points,price] ''' headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/team_news.phtml',"User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/tradableInfo.phtml?tid='+pid,headers=headers).content soup = BeautifulSoup(req) info = [] info.append(soup.title.text.strip()) for i in soup.find('table',cellspacing=1).find_all('tr'): info.append(i.find_all('td')[1].text.replace(".","")) return info
[ "def", "info_player", "(", "self", ",", "pid", ")", ":", "headers", "=", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"Accept\"", ":", "\"text/plain\"", ",", "'Referer'", ":", "'http://'", "+", "self", ".", "domain", "+", "'/team_news.phtml'", ",", "\"User-Agent\"", ":", "user_agent", "}", "req", "=", "self", ".", "session", ".", "get", "(", "'http://'", "+", "self", ".", "domain", "+", "'/tradableInfo.phtml?tid='", "+", "pid", ",", "headers", "=", "headers", ")", ".", "content", "soup", "=", "BeautifulSoup", "(", "req", ")", "info", "=", "[", "]", "info", ".", "append", "(", "soup", ".", "title", ".", "text", ".", "strip", "(", ")", ")", "for", "i", "in", "soup", ".", "find", "(", "'table'", ",", "cellspacing", "=", "1", ")", ".", "find_all", "(", "'tr'", ")", ":", "info", ".", "append", "(", "i", ".", "find_all", "(", "'td'", ")", "[", "1", "]", ".", "text", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ")", "return", "info" ]
51.076923
0.016272
def language(self, lang): """ Set the language to use; attempts to change the API URL """ lang = lang.lower() if self._lang == lang: return url = self._api_url tmp = url.replace("/{0}.".format(self._lang), "/{0}.".format(lang)) self._api_url = tmp self._lang = lang self.clear_memoized()
[ "def", "language", "(", "self", ",", "lang", ")", ":", "lang", "=", "lang", ".", "lower", "(", ")", "if", "self", ".", "_lang", "==", "lang", ":", "return", "url", "=", "self", ".", "_api_url", "tmp", "=", "url", ".", "replace", "(", "\"/{0}.\"", ".", "format", "(", "self", ".", "_lang", ")", ",", "\"/{0}.\"", ".", "format", "(", "lang", ")", ")", "self", ".", "_api_url", "=", "tmp", "self", ".", "_lang", "=", "lang", "self", ".", "clear_memoized", "(", ")" ]
29.5
0.005479
def convert_svg_transform(self, transform): """ Converts a string representing a SVG transform into AffineTransform fields. See https://www.w3.org/TR/SVG/coords.html#TransformAttribute for the specification of the transform strings. skewX and skewY are not supported. Raises: ValueError: If transform is not a valid and supported SVG transform. """ tr, args = transform[:-1].split('(') a = map(float, args.split(' ')) # Handle various string tranformations if tr == 'matrix': pass elif tr == 'translate': a = [1.0, 0.0, 0.0, 1.0, a[0], a[1] if len(a) > 1 else 0.0] elif tr == 'scale': a = [a[0], 0.0, 0.0, a[-1], 0.0, 0.0] elif tr == 'rotate': x = a[1] if len(a) > 1 else 0.0 y = a[2] if len(a) > 1 else 0.0 rad = radians(a[0]) s = sin(rad) c = cos(rad) a = [ c, s, -s, c, x * (1 - c) + y * s, -x * s + y * (1 - c), ] else: raise ValueError('Unknown transformation "%s"' % transform) self._svg_transform = transform self._a00 = a[0] self._a10 = a[1] self._a01 = a[2] self._a11 = a[3] self._a02 = a[4] self._a12 = a[5]
[ "def", "convert_svg_transform", "(", "self", ",", "transform", ")", ":", "tr", ",", "args", "=", "transform", "[", ":", "-", "1", "]", ".", "split", "(", "'('", ")", "a", "=", "map", "(", "float", ",", "args", ".", "split", "(", "' '", ")", ")", "# Handle various string tranformations", "if", "tr", "==", "'matrix'", ":", "pass", "elif", "tr", "==", "'translate'", ":", "a", "=", "[", "1.0", ",", "0.0", ",", "0.0", ",", "1.0", ",", "a", "[", "0", "]", ",", "a", "[", "1", "]", "if", "len", "(", "a", ")", ">", "1", "else", "0.0", "]", "elif", "tr", "==", "'scale'", ":", "a", "=", "[", "a", "[", "0", "]", ",", "0.0", ",", "0.0", ",", "a", "[", "-", "1", "]", ",", "0.0", ",", "0.0", "]", "elif", "tr", "==", "'rotate'", ":", "x", "=", "a", "[", "1", "]", "if", "len", "(", "a", ")", ">", "1", "else", "0.0", "y", "=", "a", "[", "2", "]", "if", "len", "(", "a", ")", ">", "1", "else", "0.0", "rad", "=", "radians", "(", "a", "[", "0", "]", ")", "s", "=", "sin", "(", "rad", ")", "c", "=", "cos", "(", "rad", ")", "a", "=", "[", "c", ",", "s", ",", "-", "s", ",", "c", ",", "x", "*", "(", "1", "-", "c", ")", "+", "y", "*", "s", ",", "-", "x", "*", "s", "+", "y", "*", "(", "1", "-", "c", ")", ",", "]", "else", ":", "raise", "ValueError", "(", "'Unknown transformation \"%s\"'", "%", "transform", ")", "self", ".", "_svg_transform", "=", "transform", "self", ".", "_a00", "=", "a", "[", "0", "]", "self", ".", "_a10", "=", "a", "[", "1", "]", "self", ".", "_a01", "=", "a", "[", "2", "]", "self", ".", "_a11", "=", "a", "[", "3", "]", "self", ".", "_a02", "=", "a", "[", "4", "]", "self", ".", "_a12", "=", "a", "[", "5", "]" ]
30.673913
0.001374
def _get_specifications(specifications): """ Computes the list of strings corresponding to the given specifications :param specifications: A string, a class or a list of specifications :return: A list of strings :raise ValueError: Invalid specification found """ if not specifications or specifications is object: raise ValueError("No specifications given") elif inspect.isclass(specifications): if Provides.USE_MODULE_QUALNAME: if sys.version_info < (3, 3, 0): raise ValueError( "Qualified name capability requires Python 3.3+" ) # Get the name of the class if not specifications.__module__: return [specifications.__qualname__] return [ "{0}.{1}".format( specifications.__module__, specifications.__qualname__ ) ] else: # Legacy behavior return [specifications.__name__] elif is_string(specifications): # Specification name specifications = specifications.strip() if not specifications: raise ValueError("Empty specification given") return [specifications] elif isinstance(specifications, (list, tuple)): # List given: normalize its content results = [] for specification in specifications: results.extend(_get_specifications(specification)) return results else: raise ValueError( "Unhandled specifications type : {0}".format( type(specifications).__name__ ) )
[ "def", "_get_specifications", "(", "specifications", ")", ":", "if", "not", "specifications", "or", "specifications", "is", "object", ":", "raise", "ValueError", "(", "\"No specifications given\"", ")", "elif", "inspect", ".", "isclass", "(", "specifications", ")", ":", "if", "Provides", ".", "USE_MODULE_QUALNAME", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "3", ",", "0", ")", ":", "raise", "ValueError", "(", "\"Qualified name capability requires Python 3.3+\"", ")", "# Get the name of the class", "if", "not", "specifications", ".", "__module__", ":", "return", "[", "specifications", ".", "__qualname__", "]", "return", "[", "\"{0}.{1}\"", ".", "format", "(", "specifications", ".", "__module__", ",", "specifications", ".", "__qualname__", ")", "]", "else", ":", "# Legacy behavior", "return", "[", "specifications", ".", "__name__", "]", "elif", "is_string", "(", "specifications", ")", ":", "# Specification name", "specifications", "=", "specifications", ".", "strip", "(", ")", "if", "not", "specifications", ":", "raise", "ValueError", "(", "\"Empty specification given\"", ")", "return", "[", "specifications", "]", "elif", "isinstance", "(", "specifications", ",", "(", "list", ",", "tuple", ")", ")", ":", "# List given: normalize its content", "results", "=", "[", "]", "for", "specification", "in", "specifications", ":", "results", ".", "extend", "(", "_get_specifications", "(", "specification", ")", ")", "return", "results", "else", ":", "raise", "ValueError", "(", "\"Unhandled specifications type : {0}\"", ".", "format", "(", "type", "(", "specifications", ")", ".", "__name__", ")", ")" ]
34.787234
0.000595
def poly(self, return_coeffs=False): """returns the quadratic as a Polynomial object.""" p = self.bpoints() coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0]) if return_coeffs: return coeffs else: return np.poly1d(coeffs)
[ "def", "poly", "(", "self", ",", "return_coeffs", "=", "False", ")", ":", "p", "=", "self", ".", "bpoints", "(", ")", "coeffs", "=", "(", "p", "[", "0", "]", "-", "2", "*", "p", "[", "1", "]", "+", "p", "[", "2", "]", ",", "2", "*", "(", "p", "[", "1", "]", "-", "p", "[", "0", "]", ")", ",", "p", "[", "0", "]", ")", "if", "return_coeffs", ":", "return", "coeffs", "else", ":", "return", "np", ".", "poly1d", "(", "coeffs", ")" ]
35.25
0.00692
def get_shebang(tokens): """ Returns the shebang string in *tokens* if it exists. None if not. """ # This (short) loop preserves shebangs and encoding strings: for tok in tokens[0:4]: # Will always be in the first four tokens line = tok[4] # Save the first comment line if it starts with a shebang # (e.g. '#!/usr/bin/env python') if shebang.match(line): # Must be first line return line
[ "def", "get_shebang", "(", "tokens", ")", ":", "# This (short) loop preserves shebangs and encoding strings:", "for", "tok", "in", "tokens", "[", "0", ":", "4", "]", ":", "# Will always be in the first four tokens", "line", "=", "tok", "[", "4", "]", "# Save the first comment line if it starts with a shebang", "# (e.g. '#!/usr/bin/env python')", "if", "shebang", ".", "match", "(", "line", ")", ":", "# Must be first line", "return", "line" ]
40.181818
0.006637
def set_color(self, color, alpha = 1): """set active color. You can use hex colors like "#aaa", or you can use normalized RGB tripplets (where every value is in range 0..1), or you can do the same thing in range 0..65535. also consider skipping this operation and specify the color on stroke and fill. """ color = self.colors.parse(color) # parse whatever we have there into a normalized triplet if len(color) == 4 and alpha is None: alpha = color[3] r, g, b = color[:3] self._add_instruction("set_color", r, g, b, alpha)
[ "def", "set_color", "(", "self", ",", "color", ",", "alpha", "=", "1", ")", ":", "color", "=", "self", ".", "colors", ".", "parse", "(", "color", ")", "# parse whatever we have there into a normalized triplet", "if", "len", "(", "color", ")", "==", "4", "and", "alpha", "is", "None", ":", "alpha", "=", "color", "[", "3", "]", "r", ",", "g", ",", "b", "=", "color", "[", ":", "3", "]", "self", ".", "_add_instruction", "(", "\"set_color\"", ",", "r", ",", "g", ",", "b", ",", "alpha", ")" ]
50.166667
0.011419
def data_ptr(self): """ Returns the address of the payload of the chunk. """ raise NotImplementedError("%s not implemented for %s" % (self.data_ptr.__func__.__name__, self.__class__.__name__))
[ "def", "data_ptr", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "\"%s not implemented for %s\"", "%", "(", "self", ".", "data_ptr", ".", "__func__", ".", "__name__", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
47.333333
0.013841
def read_params(filename, asheader=False, verbosity=0) -> Dict[str, Union[int, float, bool, str, None]]: """Read parameter dictionary from text file. Assumes that parameters are specified in the format: par1 = value1 par2 = value2 Comments that start with '#' are allowed. Parameters ---------- filename : str, Path Filename of data file. asheader : bool, optional Read the dictionary from the header (comment section) of a file. Returns ------- Dictionary that stores parameters. """ filename = str(filename) # allow passing pathlib.Path objects from collections import OrderedDict params = OrderedDict([]) for line in open(filename): if '=' in line: if not asheader or line.startswith('#'): line = line[1:] if line.startswith('#') else line key, val = line.split('=') key = key.strip() val = val.strip() params[key] = convert_string(val) return params
[ "def", "read_params", "(", "filename", ",", "asheader", "=", "False", ",", "verbosity", "=", "0", ")", "->", "Dict", "[", "str", ",", "Union", "[", "int", ",", "float", ",", "bool", ",", "str", ",", "None", "]", "]", ":", "filename", "=", "str", "(", "filename", ")", "# allow passing pathlib.Path objects", "from", "collections", "import", "OrderedDict", "params", "=", "OrderedDict", "(", "[", "]", ")", "for", "line", "in", "open", "(", "filename", ")", ":", "if", "'='", "in", "line", ":", "if", "not", "asheader", "or", "line", ".", "startswith", "(", "'#'", ")", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", ".", "startswith", "(", "'#'", ")", "else", "line", "key", ",", "val", "=", "line", ".", "split", "(", "'='", ")", "key", "=", "key", ".", "strip", "(", ")", "val", "=", "val", ".", "strip", "(", ")", "params", "[", "key", "]", "=", "convert_string", "(", "val", ")", "return", "params" ]
31.96875
0.001898
def _get_biodata(base_file, args): """Retrieve biodata genome targets customized by install parameters. """ with open(base_file) as in_handle: config = yaml.safe_load(in_handle) config["install_liftover"] = False config["genome_indexes"] = args.aligners ann_groups = config.pop("annotation_groups", {}) config["genomes"] = [_setup_genome_annotations(g, args, ann_groups) for g in config["genomes"] if g["dbkey"] in args.genomes] return config
[ "def", "_get_biodata", "(", "base_file", ",", "args", ")", ":", "with", "open", "(", "base_file", ")", "as", "in_handle", ":", "config", "=", "yaml", ".", "safe_load", "(", "in_handle", ")", "config", "[", "\"install_liftover\"", "]", "=", "False", "config", "[", "\"genome_indexes\"", "]", "=", "args", ".", "aligners", "ann_groups", "=", "config", ".", "pop", "(", "\"annotation_groups\"", ",", "{", "}", ")", "config", "[", "\"genomes\"", "]", "=", "[", "_setup_genome_annotations", "(", "g", ",", "args", ",", "ann_groups", ")", "for", "g", "in", "config", "[", "\"genomes\"", "]", "if", "g", "[", "\"dbkey\"", "]", "in", "args", ".", "genomes", "]", "return", "config" ]
45.181818
0.003945
def cut_selection(self): """ Return a (:class:`.Document`, :class:`.ClipboardData`) tuple, where the document represents the new document when the selection is cut, and the clipboard data, represents whatever has to be put on the clipboard. """ if self.selection: cut_parts = [] remaining_parts = [] new_cursor_position = self.cursor_position last_to = 0 for from_, to in self.selection_ranges(): if last_to == 0: new_cursor_position = from_ remaining_parts.append(self.text[last_to:from_]) cut_parts.append(self.text[from_:to + 1]) last_to = to + 1 remaining_parts.append(self.text[last_to:]) cut_text = '\n'.join(cut_parts) remaining_text = ''.join(remaining_parts) # In case of a LINES selection, don't include the trailing newline. if self.selection.type == SelectionType.LINES and cut_text.endswith('\n'): cut_text = cut_text[:-1] return (Document(text=remaining_text, cursor_position=new_cursor_position), ClipboardData(cut_text, self.selection.type)) else: return self, ClipboardData('')
[ "def", "cut_selection", "(", "self", ")", ":", "if", "self", ".", "selection", ":", "cut_parts", "=", "[", "]", "remaining_parts", "=", "[", "]", "new_cursor_position", "=", "self", ".", "cursor_position", "last_to", "=", "0", "for", "from_", ",", "to", "in", "self", ".", "selection_ranges", "(", ")", ":", "if", "last_to", "==", "0", ":", "new_cursor_position", "=", "from_", "remaining_parts", ".", "append", "(", "self", ".", "text", "[", "last_to", ":", "from_", "]", ")", "cut_parts", ".", "append", "(", "self", ".", "text", "[", "from_", ":", "to", "+", "1", "]", ")", "last_to", "=", "to", "+", "1", "remaining_parts", ".", "append", "(", "self", ".", "text", "[", "last_to", ":", "]", ")", "cut_text", "=", "'\\n'", ".", "join", "(", "cut_parts", ")", "remaining_text", "=", "''", ".", "join", "(", "remaining_parts", ")", "# In case of a LINES selection, don't include the trailing newline.", "if", "self", ".", "selection", ".", "type", "==", "SelectionType", ".", "LINES", "and", "cut_text", ".", "endswith", "(", "'\\n'", ")", ":", "cut_text", "=", "cut_text", "[", ":", "-", "1", "]", "return", "(", "Document", "(", "text", "=", "remaining_text", ",", "cursor_position", "=", "new_cursor_position", ")", ",", "ClipboardData", "(", "cut_text", ",", "self", ".", "selection", ".", "type", ")", ")", "else", ":", "return", "self", ",", "ClipboardData", "(", "''", ")" ]
39.030303
0.00303
def correct(text: str, matches: [Match]) -> str: """Automatically apply suggestions to the text.""" ltext = list(text) matches = [match for match in matches if match.replacements] errors = [ltext[match.offset:match.offset + match.errorlength] for match in matches] correct_offset = 0 for n, match in enumerate(matches): frompos, topos = (correct_offset + match.offset, correct_offset + match.offset + match.errorlength) if ltext[frompos:topos] != errors[n]: continue repl = match.replacements[0] ltext[frompos:topos] = list(repl) correct_offset += len(repl) - len(errors[n]) return ''.join(ltext)
[ "def", "correct", "(", "text", ":", "str", ",", "matches", ":", "[", "Match", "]", ")", "->", "str", ":", "ltext", "=", "list", "(", "text", ")", "matches", "=", "[", "match", "for", "match", "in", "matches", "if", "match", ".", "replacements", "]", "errors", "=", "[", "ltext", "[", "match", ".", "offset", ":", "match", ".", "offset", "+", "match", ".", "errorlength", "]", "for", "match", "in", "matches", "]", "correct_offset", "=", "0", "for", "n", ",", "match", "in", "enumerate", "(", "matches", ")", ":", "frompos", ",", "topos", "=", "(", "correct_offset", "+", "match", ".", "offset", ",", "correct_offset", "+", "match", ".", "offset", "+", "match", ".", "errorlength", ")", "if", "ltext", "[", "frompos", ":", "topos", "]", "!=", "errors", "[", "n", "]", ":", "continue", "repl", "=", "match", ".", "replacements", "[", "0", "]", "ltext", "[", "frompos", ":", "topos", "]", "=", "list", "(", "repl", ")", "correct_offset", "+=", "len", "(", "repl", ")", "-", "len", "(", "errors", "[", "n", "]", ")", "return", "''", ".", "join", "(", "ltext", ")" ]
43.8125
0.001397
def format_endpoint(schema, addr, port, api_version): """Return a formatted keystone endpoint @param schema: http or https @param addr: ipv4/ipv6 host of the keystone service @param port: port of the keystone service @param api_version: 2 or 3 @returns a fully formatted keystone endpoint """ return '{}://{}:{}/{}/'.format(schema, addr, port, get_api_suffix(api_version))
[ "def", "format_endpoint", "(", "schema", ",", "addr", ",", "port", ",", "api_version", ")", ":", "return", "'{}://{}:{}/{}/'", ".", "format", "(", "schema", ",", "addr", ",", "port", ",", "get_api_suffix", "(", "api_version", ")", ")" ]
43
0.002278
def srfcss(code, bodstr, srflen=_default_len_out): """ Translate a surface ID code, together with a body string, to the corresponding surface name. If no such surface name exists, return a string representation of the surface ID code. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfcss_c.html :param code: Integer surface ID code to translate to a string. :type code: int :param bodstr: Name or ID of body associated with surface. :type bodstr: str :param srflen: Available space in output string. :param srflen: int :return: String corresponding to surface ID code. :rtype: str """ code = ctypes.c_int(code) bodstr = stypes.stringToCharP(bodstr) srfstr = stypes.stringToCharP(srflen) srflen = ctypes.c_int(srflen) isname = ctypes.c_int() libspice.srfcss_c(code, bodstr, srflen, srfstr, ctypes.byref(isname)) return stypes.toPythonString(srfstr), bool(isname.value)
[ "def", "srfcss", "(", "code", ",", "bodstr", ",", "srflen", "=", "_default_len_out", ")", ":", "code", "=", "ctypes", ".", "c_int", "(", "code", ")", "bodstr", "=", "stypes", ".", "stringToCharP", "(", "bodstr", ")", "srfstr", "=", "stypes", ".", "stringToCharP", "(", "srflen", ")", "srflen", "=", "ctypes", ".", "c_int", "(", "srflen", ")", "isname", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "srfcss_c", "(", "code", ",", "bodstr", ",", "srflen", ",", "srfstr", ",", "ctypes", ".", "byref", "(", "isname", ")", ")", "return", "stypes", ".", "toPythonString", "(", "srfstr", ")", ",", "bool", "(", "isname", ".", "value", ")" ]
39.708333
0.007172
def segwit_addr_encode(witprog_bin, hrp=bech32_prefix, witver=bech32_witver): """ Encode a segwit script hash to a bech32 address. Returns the bech32-encoded string on success """ witprog_bytes = [ord(c) for c in witprog_bin] ret = bech32_encode(hrp, [int(witver)] + convertbits(witprog_bytes, 8, 5)) assert segwit_addr_decode(hrp, ret) is not (None, None) return ret
[ "def", "segwit_addr_encode", "(", "witprog_bin", ",", "hrp", "=", "bech32_prefix", ",", "witver", "=", "bech32_witver", ")", ":", "witprog_bytes", "=", "[", "ord", "(", "c", ")", "for", "c", "in", "witprog_bin", "]", "ret", "=", "bech32_encode", "(", "hrp", ",", "[", "int", "(", "witver", ")", "]", "+", "convertbits", "(", "witprog_bytes", ",", "8", ",", "5", ")", ")", "assert", "segwit_addr_decode", "(", "hrp", ",", "ret", ")", "is", "not", "(", "None", ",", "None", ")", "return", "ret" ]
43.444444
0.002506
def remove_row(self, row_num=None): """ Remove a row from the grid """ #DeleteRows(self, pos, numRows, updateLabel if not row_num and row_num != 0: row_num = self.GetNumberRows() - 1 label = self.GetCellValue(row_num, 0) self.DeleteRows(pos=row_num, numRows=1, updateLabels=True) # remove label from row_labels try: self.row_labels.remove(label) except ValueError: # if label name hasn't been saved yet, simply truncate row_labels self.row_labels = self.row_labels[:-1] self.row_items.pop(row_num) if not self.changes: self.changes = set() self.changes.add(-1) # fix #s for rows edited: self.update_changes_after_row_delete(row_num)
[ "def", "remove_row", "(", "self", ",", "row_num", "=", "None", ")", ":", "#DeleteRows(self, pos, numRows, updateLabel", "if", "not", "row_num", "and", "row_num", "!=", "0", ":", "row_num", "=", "self", ".", "GetNumberRows", "(", ")", "-", "1", "label", "=", "self", ".", "GetCellValue", "(", "row_num", ",", "0", ")", "self", ".", "DeleteRows", "(", "pos", "=", "row_num", ",", "numRows", "=", "1", ",", "updateLabels", "=", "True", ")", "# remove label from row_labels", "try", ":", "self", ".", "row_labels", ".", "remove", "(", "label", ")", "except", "ValueError", ":", "# if label name hasn't been saved yet, simply truncate row_labels", "self", ".", "row_labels", "=", "self", ".", "row_labels", "[", ":", "-", "1", "]", "self", ".", "row_items", ".", "pop", "(", "row_num", ")", "if", "not", "self", ".", "changes", ":", "self", ".", "changes", "=", "set", "(", ")", "self", ".", "changes", ".", "add", "(", "-", "1", ")", "# fix #s for rows edited:", "self", ".", "update_changes_after_row_delete", "(", "row_num", ")" ]
36
0.00369
def assertFileTypeNotEqual(self, filename, extension, msg=None): '''Fail if ``filename`` has the given ``extension`` as determined by the ``!=`` operator. Parameters ---------- filename : str, bytes, file-like extension : str, bytes msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like. ''' ftype = self._get_file_type(filename) self.assertNotEqual(ftype, extension, msg=msg)
[ "def", "assertFileTypeNotEqual", "(", "self", ",", "filename", ",", "extension", ",", "msg", "=", "None", ")", ":", "ftype", "=", "self", ".", "_get_file_type", "(", "filename", ")", "self", ".", "assertNotEqual", "(", "ftype", ",", "extension", ",", "msg", "=", "msg", ")" ]
32.5
0.00299
def set_value(self, visual_property, value): """Set a single Visual Property Value :param visual_property: Visual Property ID :param value: New value for the VP :return: None """ if visual_property is None or value is None: raise ValueError('Both VP and value are required.') new_value = [ { 'visualProperty': visual_property, "value": value } ] requests.put(self.url, data=json.dumps(new_value), headers=HEADERS)
[ "def", "set_value", "(", "self", ",", "visual_property", ",", "value", ")", ":", "if", "visual_property", "is", "None", "or", "value", "is", "None", ":", "raise", "ValueError", "(", "'Both VP and value are required.'", ")", "new_value", "=", "[", "{", "'visualProperty'", ":", "visual_property", ",", "\"value\"", ":", "value", "}", "]", "requests", ".", "put", "(", "self", ".", "url", ",", "data", "=", "json", ".", "dumps", "(", "new_value", ")", ",", "headers", "=", "HEADERS", ")" ]
31.705882
0.003604
def to_gnuplot_datafile(self, datafilepath): """Dumps the TimeSeries into a gnuplot compatible data file. :param string datafilepath: Path used to create the file. If that file already exists, it will be overwritten! :return: Returns :py:const:`True` if the data could be written, :py:const:`False` otherwise. :rtype: boolean """ try: datafile = file(datafilepath, "wb") except Exception: return False if self._timestampFormat is None: self._timestampFormat = _STR_EPOCHS datafile.write("# time_as_<%s> value\n" % self._timestampFormat) convert = TimeSeries.convert_epoch_to_timestamp for datapoint in self._timeseriesData: timestamp, value = datapoint if self._timestampFormat is not None: timestamp = convert(timestamp, self._timestampFormat) datafile.write("%s %s\n" % (timestamp, value)) datafile.close() return True
[ "def", "to_gnuplot_datafile", "(", "self", ",", "datafilepath", ")", ":", "try", ":", "datafile", "=", "file", "(", "datafilepath", ",", "\"wb\"", ")", "except", "Exception", ":", "return", "False", "if", "self", ".", "_timestampFormat", "is", "None", ":", "self", ".", "_timestampFormat", "=", "_STR_EPOCHS", "datafile", ".", "write", "(", "\"# time_as_<%s> value\\n\"", "%", "self", ".", "_timestampFormat", ")", "convert", "=", "TimeSeries", ".", "convert_epoch_to_timestamp", "for", "datapoint", "in", "self", ".", "_timeseriesData", ":", "timestamp", ",", "value", "=", "datapoint", "if", "self", ".", "_timestampFormat", "is", "not", "None", ":", "timestamp", "=", "convert", "(", "timestamp", ",", "self", ".", "_timestampFormat", ")", "datafile", ".", "write", "(", "\"%s %s\\n\"", "%", "(", "timestamp", ",", "value", ")", ")", "datafile", ".", "close", "(", ")", "return", "True" ]
34.862069
0.00385
def get_artists(self, limit=50, cacheable=True): """ Returns a sequence of Album objects if limit==None it will return all (may take a while) """ seq = [] for node in _collect_nodes( limit, self, self.ws_prefix + ".getArtists", cacheable ): name = _extract(node, "name") playcount = _number(_extract(node, "playcount")) tagcount = _number(_extract(node, "tagcount")) seq.append(LibraryItem(Artist(name, self.network), playcount, tagcount)) return seq
[ "def", "get_artists", "(", "self", ",", "limit", "=", "50", ",", "cacheable", "=", "True", ")", ":", "seq", "=", "[", "]", "for", "node", "in", "_collect_nodes", "(", "limit", ",", "self", ",", "self", ".", "ws_prefix", "+", "\".getArtists\"", ",", "cacheable", ")", ":", "name", "=", "_extract", "(", "node", ",", "\"name\"", ")", "playcount", "=", "_number", "(", "_extract", "(", "node", ",", "\"playcount\"", ")", ")", "tagcount", "=", "_number", "(", "_extract", "(", "node", ",", "\"tagcount\"", ")", ")", "seq", ".", "append", "(", "LibraryItem", "(", "Artist", "(", "name", ",", "self", ".", "network", ")", ",", "playcount", ",", "tagcount", ")", ")", "return", "seq" ]
31.166667
0.00519
def confirmation_pdf(self, confirmation_id): """ Opens a pdf of a confirmation :param confirmation_id: the confirmation id :return: dict """ return self._create_get_request(resource=CONFIRMATIONS, billomat_id=confirmation_id, command=PDF)
[ "def", "confirmation_pdf", "(", "self", ",", "confirmation_id", ")", ":", "return", "self", ".", "_create_get_request", "(", "resource", "=", "CONFIRMATIONS", ",", "billomat_id", "=", "confirmation_id", ",", "command", "=", "PDF", ")" ]
35
0.010453
def write_layout(_path): """ Write a valid gentoo layout file to :path:. Args: path - The output path of the layout.conf """ path.mkdir_uchroot("/etc/portage/metadata") path.mkfile_uchroot("/etc/portage/metadata/layout.conf") with open(_path, 'w') as layoutconf: lines = '''masters = gentoo''' layoutconf.write(lines)
[ "def", "write_layout", "(", "_path", ")", ":", "path", ".", "mkdir_uchroot", "(", "\"/etc/portage/metadata\"", ")", "path", ".", "mkfile_uchroot", "(", "\"/etc/portage/metadata/layout.conf\"", ")", "with", "open", "(", "_path", ",", "'w'", ")", "as", "layoutconf", ":", "lines", "=", "'''masters = gentoo'''", "layoutconf", ".", "write", "(", "lines", ")" ]
27.615385
0.002695
def return_markers(self): """Return all the markers (also called triggers or events). Returns ------- list of dict where each dict contains 'name' as str, 'start' and 'end' as float in seconds from the start of the recordings, and 'chan' as list of str with the channels involved (if not of relevance, it's None). """ markers = [] for v in self.mrk['Marker Infos'].values(): if v[0] == 'New Segment': continue markers.append({ 'name': v[1], 'start': float(v[2]) / self.s_freq, 'end': (float(v[2]) + float(v[3])) / self.s_freq, }) return markers
[ "def", "return_markers", "(", "self", ")", ":", "markers", "=", "[", "]", "for", "v", "in", "self", ".", "mrk", "[", "'Marker Infos'", "]", ".", "values", "(", ")", ":", "if", "v", "[", "0", "]", "==", "'New Segment'", ":", "continue", "markers", ".", "append", "(", "{", "'name'", ":", "v", "[", "1", "]", ",", "'start'", ":", "float", "(", "v", "[", "2", "]", ")", "/", "self", ".", "s_freq", ",", "'end'", ":", "(", "float", "(", "v", "[", "2", "]", ")", "+", "float", "(", "v", "[", "3", "]", ")", ")", "/", "self", ".", "s_freq", ",", "}", ")", "return", "markers" ]
33.181818
0.002663
def _ixor(self, other): """Set self to the symmetric difference between the sets. if isinstance(other, _basebag): This runs in O(other.num_unique_elements()) else: This runs in O(len(other)) """ if isinstance(other, _basebag): for elem, other_count in other.counts(): count = abs(self.count(elem) - other_count) self._set_count(elem, count) else: # Let a = self.count(elem) and b = other.count(elem) # if a >= b then elem is removed from self b times leaving a - b # if a < b then elem is removed from self a times then added (b - a) # times leaving a - a + (b - a) = b - a for elem in other: try: self._increment_count(elem, -1) except ValueError: self._increment_count(elem, 1) return self
[ "def", "_ixor", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "_basebag", ")", ":", "for", "elem", ",", "other_count", "in", "other", ".", "counts", "(", ")", ":", "count", "=", "abs", "(", "self", ".", "count", "(", "elem", ")", "-", "other_count", ")", "self", ".", "_set_count", "(", "elem", ",", "count", ")", "else", ":", "# Let a = self.count(elem) and b = other.count(elem)", "# if a >= b then elem is removed from self b times leaving a - b", "# if a < b then elem is removed from self a times then added (b - a)", "# times leaving a - a + (b - a) = b - a", "for", "elem", "in", "other", ":", "try", ":", "self", ".", "_increment_count", "(", "elem", ",", "-", "1", ")", "except", "ValueError", ":", "self", ".", "_increment_count", "(", "elem", ",", "1", ")", "return", "self" ]
31.913043
0.030423
def remix(self, remix_dictionary=None, num_output_channels=None): '''Remix the channels of an audio file. Note: volume options are not yet implemented Parameters ---------- remix_dictionary : dict or None Dictionary mapping output channel to list of input channel(s). Empty lists indicate the corresponding output channel should be empty. If None, mixes all channels down to a single mono file. num_output_channels : int or None The number of channels in the output file. If None, the number of output channels is equal to the largest key in remix_dictionary. If remix_dictionary is None, this variable is ignored. Examples -------- Remix a 4-channel input file. The output file will have input channel 2 in channel 1, a mixdown of input channels 1 an 3 in channel 2, an empty channel 3, and a copy of input channel 4 in channel 4. >>> import sox >>> tfm = sox.Transformer() >>> remix_dictionary = {1: [2], 2: [1, 3], 4: [4]} >>> tfm.remix(remix_dictionary) ''' if not (isinstance(remix_dictionary, dict) or remix_dictionary is None): raise ValueError("remix_dictionary must be a dictionary or None.") if remix_dictionary is not None: if not all([isinstance(i, int) and i > 0 for i in remix_dictionary.keys()]): raise ValueError( "remix dictionary must have positive integer keys." ) if not all([isinstance(v, list) for v in remix_dictionary.values()]): raise ValueError("remix dictionary values must be lists.") for v_list in remix_dictionary.values(): if not all([isinstance(v, int) and v > 0 for v in v_list]): raise ValueError( "elements of remix dictionary values must " "be positive integers" ) if not ((isinstance(num_output_channels, int) and num_output_channels > 0) or num_output_channels is None): raise ValueError( "num_output_channels must be a positive integer or None." ) effect_args = ['remix'] if remix_dictionary is None: effect_args.append('-') else: if num_output_channels is None: num_output_channels = max(remix_dictionary.keys()) for channel in range(1, num_output_channels + 1): if channel in remix_dictionary.keys(): out_channel = ','.join( [str(i) for i in remix_dictionary[channel]] ) else: out_channel = '0' effect_args.append(out_channel) self.effects.extend(effect_args) self.effects_log.append('remix') return self
[ "def", "remix", "(", "self", ",", "remix_dictionary", "=", "None", ",", "num_output_channels", "=", "None", ")", ":", "if", "not", "(", "isinstance", "(", "remix_dictionary", ",", "dict", ")", "or", "remix_dictionary", "is", "None", ")", ":", "raise", "ValueError", "(", "\"remix_dictionary must be a dictionary or None.\"", ")", "if", "remix_dictionary", "is", "not", "None", ":", "if", "not", "all", "(", "[", "isinstance", "(", "i", ",", "int", ")", "and", "i", ">", "0", "for", "i", "in", "remix_dictionary", ".", "keys", "(", ")", "]", ")", ":", "raise", "ValueError", "(", "\"remix dictionary must have positive integer keys.\"", ")", "if", "not", "all", "(", "[", "isinstance", "(", "v", ",", "list", ")", "for", "v", "in", "remix_dictionary", ".", "values", "(", ")", "]", ")", ":", "raise", "ValueError", "(", "\"remix dictionary values must be lists.\"", ")", "for", "v_list", "in", "remix_dictionary", ".", "values", "(", ")", ":", "if", "not", "all", "(", "[", "isinstance", "(", "v", ",", "int", ")", "and", "v", ">", "0", "for", "v", "in", "v_list", "]", ")", ":", "raise", "ValueError", "(", "\"elements of remix dictionary values must \"", "\"be positive integers\"", ")", "if", "not", "(", "(", "isinstance", "(", "num_output_channels", ",", "int", ")", "and", "num_output_channels", ">", "0", ")", "or", "num_output_channels", "is", "None", ")", ":", "raise", "ValueError", "(", "\"num_output_channels must be a positive integer or None.\"", ")", "effect_args", "=", "[", "'remix'", "]", "if", "remix_dictionary", "is", "None", ":", "effect_args", ".", "append", "(", "'-'", ")", "else", ":", "if", "num_output_channels", "is", "None", ":", "num_output_channels", "=", "max", "(", "remix_dictionary", ".", "keys", "(", ")", ")", "for", "channel", "in", "range", "(", "1", ",", "num_output_channels", "+", "1", ")", ":", "if", "channel", "in", "remix_dictionary", ".", "keys", "(", ")", ":", "out_channel", "=", "','", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "remix_dictionary", "[", "channel", "]", "]", ")", "else", ":", "out_channel", "=", "'0'", "effect_args", ".", "append", "(", "out_channel", ")", "self", ".", "effects", ".", "extend", "(", "effect_args", ")", "self", ".", "effects_log", ".", "append", "(", "'remix'", ")", "return", "self" ]
37.810127
0.000653
def create(self, name, **kwds): """ Endpoint: /album/create.json Creates a new album and returns it. """ result = self._client.post("/album/create.json", name=name, **kwds)["result"] return Album(self._client, result)
[ "def", "create", "(", "self", ",", "name", ",", "*", "*", "kwds", ")", ":", "result", "=", "self", ".", "_client", ".", "post", "(", "\"/album/create.json\"", ",", "name", "=", "name", ",", "*", "*", "kwds", ")", "[", "\"result\"", "]", "return", "Album", "(", "self", ".", "_client", ",", "result", ")" ]
32.555556
0.006645
def program_unitary(program, n_qubits): """ Return the unitary of a pyQuil program. :param program: A program consisting only of :py:class:`Gate`.: :return: a unitary corresponding to the composition of the program's gates. """ umat = np.eye(2 ** n_qubits) for instruction in program: if isinstance(instruction, Gate): unitary = lifted_gate(gate=instruction, n_qubits=n_qubits) umat = unitary.dot(umat) else: raise ValueError("Can only compute program unitary for programs composed of `Gate`s") return umat
[ "def", "program_unitary", "(", "program", ",", "n_qubits", ")", ":", "umat", "=", "np", ".", "eye", "(", "2", "**", "n_qubits", ")", "for", "instruction", "in", "program", ":", "if", "isinstance", "(", "instruction", ",", "Gate", ")", ":", "unitary", "=", "lifted_gate", "(", "gate", "=", "instruction", ",", "n_qubits", "=", "n_qubits", ")", "umat", "=", "unitary", ".", "dot", "(", "umat", ")", "else", ":", "raise", "ValueError", "(", "\"Can only compute program unitary for programs composed of `Gate`s\"", ")", "return", "umat" ]
38.466667
0.003384
def rgb_to_hsl(r, g=None, b=None): """Convert the color from RGB coordinates to HSL. Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] Returns: The color as an (h, s, l) tuple in the range: h[0...360], s[0...1], l[0...1] >>> rgb_to_hsl(1, 0.5, 0) (30.0, 1.0, 0.5) """ if type(r) in [list,tuple]: r, g, b = r minVal = min(r, g, b) # min RGB value maxVal = max(r, g, b) # max RGB value l = (maxVal + minVal) / 2.0 if minVal==maxVal: return (0.0, 0.0, l) # achromatic (gray) d = maxVal - minVal # delta RGB value if l < 0.5: s = d / (maxVal + minVal) else: s = d / (2.0 - maxVal - minVal) dr, dg, db = [(maxVal-val) / d for val in (r, g, b)] if r==maxVal: h = db - dg elif g==maxVal: h = 2.0 + dr - db else: h = 4.0 + dg - dr h = (h*60.0) % 360.0 return (h, s, l)
[ "def", "rgb_to_hsl", "(", "r", ",", "g", "=", "None", ",", "b", "=", "None", ")", ":", "if", "type", "(", "r", ")", "in", "[", "list", ",", "tuple", "]", ":", "r", ",", "g", ",", "b", "=", "r", "minVal", "=", "min", "(", "r", ",", "g", ",", "b", ")", "# min RGB value", "maxVal", "=", "max", "(", "r", ",", "g", ",", "b", ")", "# max RGB value", "l", "=", "(", "maxVal", "+", "minVal", ")", "/", "2.0", "if", "minVal", "==", "maxVal", ":", "return", "(", "0.0", ",", "0.0", ",", "l", ")", "# achromatic (gray)", "d", "=", "maxVal", "-", "minVal", "# delta RGB value", "if", "l", "<", "0.5", ":", "s", "=", "d", "/", "(", "maxVal", "+", "minVal", ")", "else", ":", "s", "=", "d", "/", "(", "2.0", "-", "maxVal", "-", "minVal", ")", "dr", ",", "dg", ",", "db", "=", "[", "(", "maxVal", "-", "val", ")", "/", "d", "for", "val", "in", "(", "r", ",", "g", ",", "b", ")", "]", "if", "r", "==", "maxVal", ":", "h", "=", "db", "-", "dg", "elif", "g", "==", "maxVal", ":", "h", "=", "2.0", "+", "dr", "-", "db", "else", ":", "h", "=", "4.0", "+", "dg", "-", "dr", "h", "=", "(", "h", "*", "60.0", ")", "%", "360.0", "return", "(", "h", ",", "s", ",", "l", ")" ]
19.723404
0.023638
def _check_vpcs_version(self): """ Checks if the VPCS executable version is >= 0.8b or == 0.6.1. """ try: output = yield from subprocess_check_output(self._vpcs_path(), "-v", cwd=self.working_dir) match = re.search("Welcome to Virtual PC Simulator, version ([0-9a-z\.]+)", output) if match: version = match.group(1) self._vpcs_version = parse_version(version) if self._vpcs_version < parse_version("0.6.1"): raise VPCSError("VPCS executable version must be >= 0.6.1 but not a 0.8") else: raise VPCSError("Could not determine the VPCS version for {}".format(self._vpcs_path())) except (OSError, subprocess.SubprocessError) as e: raise VPCSError("Error while looking for the VPCS version: {}".format(e))
[ "def", "_check_vpcs_version", "(", "self", ")", ":", "try", ":", "output", "=", "yield", "from", "subprocess_check_output", "(", "self", ".", "_vpcs_path", "(", ")", ",", "\"-v\"", ",", "cwd", "=", "self", ".", "working_dir", ")", "match", "=", "re", ".", "search", "(", "\"Welcome to Virtual PC Simulator, version ([0-9a-z\\.]+)\"", ",", "output", ")", "if", "match", ":", "version", "=", "match", ".", "group", "(", "1", ")", "self", ".", "_vpcs_version", "=", "parse_version", "(", "version", ")", "if", "self", ".", "_vpcs_version", "<", "parse_version", "(", "\"0.6.1\"", ")", ":", "raise", "VPCSError", "(", "\"VPCS executable version must be >= 0.6.1 but not a 0.8\"", ")", "else", ":", "raise", "VPCSError", "(", "\"Could not determine the VPCS version for {}\"", ".", "format", "(", "self", ".", "_vpcs_path", "(", ")", ")", ")", "except", "(", "OSError", ",", "subprocess", ".", "SubprocessError", ")", "as", "e", ":", "raise", "VPCSError", "(", "\"Error while looking for the VPCS version: {}\"", ".", "format", "(", "e", ")", ")" ]
54.375
0.00904
def turn_on_switch(self, device_id, name): """Create the message to turn switch on.""" msg = "!%sF1|Turn On|%s" % (device_id, name) self._send_message(msg)
[ "def", "turn_on_switch", "(", "self", ",", "device_id", ",", "name", ")", ":", "msg", "=", "\"!%sF1|Turn On|%s\"", "%", "(", "device_id", ",", "name", ")", "self", ".", "_send_message", "(", "msg", ")" ]
44
0.011173
def values(self): """ Returns an `np.array` of type `self.dtype` containing some values from the domain. For domains where ``is_finite`` is ``True``, all elements of the domain will be yielded exactly once. :rtype: `np.ndarray` """ # This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565 partition_array = np.empty((self.n_members, self.n_elements), dtype=int) masks = np.identity(self.n_elements, dtype=int) for i, c in enumerate(combinations_with_replacement(masks, self.n_meas)): partition_array[i,:] = sum(c) # Convert to dtype before returning return self.from_regular_array(partition_array)
[ "def", "values", "(", "self", ")", ":", "# This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565", "partition_array", "=", "np", ".", "empty", "(", "(", "self", ".", "n_members", ",", "self", ".", "n_elements", ")", ",", "dtype", "=", "int", ")", "masks", "=", "np", ".", "identity", "(", "self", ".", "n_elements", ",", "dtype", "=", "int", ")", "for", "i", ",", "c", "in", "enumerate", "(", "combinations_with_replacement", "(", "masks", ",", "self", ".", "n_meas", ")", ")", ":", "partition_array", "[", "i", ",", ":", "]", "=", "sum", "(", "c", ")", "# Convert to dtype before returning", "return", "self", ".", "from_regular_array", "(", "partition_array", ")" ]
40.277778
0.008086
def dotplot(args): """ %prog dotplot map.csv ref.fasta Make dotplot between chromosomes and linkage maps. The input map is csv formatted, for example: ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition scaffold_2707,11508,1,0 scaffold_2707,11525,1,1.2 """ from jcvi.assembly.allmaps import CSVMapLine from jcvi.formats.sizes import Sizes from jcvi.utils.natsort import natsorted from jcvi.graphics.base import shorten from jcvi.graphics.dotplot import plt, savefig, markup, normalize_axes, \ downsample, plot_breaks_and_labels, thousands p = OptionParser(dotplot.__doc__) p.set_outfile(outfile=None) opts, args, iopts = p.set_image_options(args, figsize="8x8", style="dark", dpi=90, cmap="copper") if len(args) != 2: sys.exit(not p.print_help()) csvfile, fastafile = args sizes = natsorted(Sizes(fastafile).mapping.items()) seen = set() raw_data = [] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) # the whole canvas ax = fig.add_axes([.1, .1, .8, .8]) # the dot plot fp = must_open(csvfile) for row in fp: m = CSVMapLine(row) seen.add(m.seqid) raw_data.append(m) # X-axis is the genome assembly ctgs, ctg_sizes = zip(*sizes) xsize = sum(ctg_sizes) qb = list(np.cumsum(ctg_sizes)) qbreaks = list(zip(ctgs, [0] + qb, qb)) qstarts = dict(zip(ctgs, [0] + qb)) # Y-axis is the map key = lambda x: x.lg raw_data.sort(key=key) ssizes = {} for lg, d in groupby(raw_data, key=key): ssizes[lg] = max([x.cm for x in d]) ssizes = natsorted(ssizes.items()) lgs, lg_sizes = zip(*ssizes) ysize = sum(lg_sizes) sb = list(np.cumsum(lg_sizes)) sbreaks = list(zip([("LG" + x) for x in lgs], [0] + sb, sb)) sstarts = dict(zip(lgs, [0] + sb)) # Re-code all the scatter dots data = [(qstarts[x.seqid] + x.pos, sstarts[x.lg] + x.cm, 'g') \ for x in raw_data if (x.seqid in qstarts)] npairs = downsample(data) x, y, c = zip(*data) ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0) # Flip X-Y label gy, gx = op.basename(csvfile).split(".")[:2] gx, gy = shorten(gx, maxchar=30), shorten(gy, maxchar=30) xlim, ylim = plot_breaks_and_labels(fig, root, ax, gx, gy, xsize, ysize, qbreaks, sbreaks) ax.set_xlim(xlim) ax.set_ylim(ylim) title = "Alignment: {} vs {}".format(gx, gy) title += " ({} markers)".format(thousands(npairs)) root.set_title(markup(title), x=.5, y=.96, color="k") logging.debug(title) normalize_axes(root) image_name = opts.outfile or \ (csvfile.rsplit(".", 1)[0] + "." + iopts.format) savefig(image_name, dpi=iopts.dpi, iopts=iopts) fig.clear()
[ "def", "dotplot", "(", "args", ")", ":", "from", "jcvi", ".", "assembly", ".", "allmaps", "import", "CSVMapLine", "from", "jcvi", ".", "formats", ".", "sizes", "import", "Sizes", "from", "jcvi", ".", "utils", ".", "natsort", "import", "natsorted", "from", "jcvi", ".", "graphics", ".", "base", "import", "shorten", "from", "jcvi", ".", "graphics", ".", "dotplot", "import", "plt", ",", "savefig", ",", "markup", ",", "normalize_axes", ",", "downsample", ",", "plot_breaks_and_labels", ",", "thousands", "p", "=", "OptionParser", "(", "dotplot", ".", "__doc__", ")", "p", ".", "set_outfile", "(", "outfile", "=", "None", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"8x8\"", ",", "style", "=", "\"dark\"", ",", "dpi", "=", "90", ",", "cmap", "=", "\"copper\"", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "csvfile", ",", "fastafile", "=", "args", "sizes", "=", "natsorted", "(", "Sizes", "(", "fastafile", ")", ".", "mapping", ".", "items", "(", ")", ")", "seen", "=", "set", "(", ")", "raw_data", "=", "[", "]", "fig", "=", "plt", ".", "figure", "(", "1", ",", "(", "iopts", ".", "w", ",", "iopts", ".", "h", ")", ")", "root", "=", "fig", ".", "add_axes", "(", "[", "0", ",", "0", ",", "1", ",", "1", "]", ")", "# the whole canvas", "ax", "=", "fig", ".", "add_axes", "(", "[", ".1", ",", ".1", ",", ".8", ",", ".8", "]", ")", "# the dot plot", "fp", "=", "must_open", "(", "csvfile", ")", "for", "row", "in", "fp", ":", "m", "=", "CSVMapLine", "(", "row", ")", "seen", ".", "add", "(", "m", ".", "seqid", ")", "raw_data", ".", "append", "(", "m", ")", "# X-axis is the genome assembly", "ctgs", ",", "ctg_sizes", "=", "zip", "(", "*", "sizes", ")", "xsize", "=", "sum", "(", "ctg_sizes", ")", "qb", "=", "list", "(", "np", ".", "cumsum", "(", "ctg_sizes", ")", ")", "qbreaks", "=", "list", "(", "zip", "(", "ctgs", ",", "[", "0", "]", "+", "qb", ",", "qb", ")", ")", "qstarts", "=", "dict", "(", "zip", "(", "ctgs", ",", "[", "0", "]", "+", "qb", ")", ")", "# Y-axis is the map", "key", "=", "lambda", "x", ":", "x", ".", "lg", "raw_data", ".", "sort", "(", "key", "=", "key", ")", "ssizes", "=", "{", "}", "for", "lg", ",", "d", "in", "groupby", "(", "raw_data", ",", "key", "=", "key", ")", ":", "ssizes", "[", "lg", "]", "=", "max", "(", "[", "x", ".", "cm", "for", "x", "in", "d", "]", ")", "ssizes", "=", "natsorted", "(", "ssizes", ".", "items", "(", ")", ")", "lgs", ",", "lg_sizes", "=", "zip", "(", "*", "ssizes", ")", "ysize", "=", "sum", "(", "lg_sizes", ")", "sb", "=", "list", "(", "np", ".", "cumsum", "(", "lg_sizes", ")", ")", "sbreaks", "=", "list", "(", "zip", "(", "[", "(", "\"LG\"", "+", "x", ")", "for", "x", "in", "lgs", "]", ",", "[", "0", "]", "+", "sb", ",", "sb", ")", ")", "sstarts", "=", "dict", "(", "zip", "(", "lgs", ",", "[", "0", "]", "+", "sb", ")", ")", "# Re-code all the scatter dots", "data", "=", "[", "(", "qstarts", "[", "x", ".", "seqid", "]", "+", "x", ".", "pos", ",", "sstarts", "[", "x", ".", "lg", "]", "+", "x", ".", "cm", ",", "'g'", ")", "for", "x", "in", "raw_data", "if", "(", "x", ".", "seqid", "in", "qstarts", ")", "]", "npairs", "=", "downsample", "(", "data", ")", "x", ",", "y", ",", "c", "=", "zip", "(", "*", "data", ")", "ax", ".", "scatter", "(", "x", ",", "y", ",", "c", "=", "c", ",", "edgecolors", "=", "\"none\"", ",", "s", "=", "2", ",", "lw", "=", "0", ")", "# Flip X-Y label", "gy", ",", "gx", "=", "op", ".", "basename", "(", "csvfile", ")", ".", "split", "(", "\".\"", ")", "[", ":", "2", "]", "gx", ",", "gy", "=", "shorten", "(", "gx", ",", "maxchar", "=", "30", ")", ",", "shorten", "(", "gy", ",", "maxchar", "=", "30", ")", "xlim", ",", "ylim", "=", "plot_breaks_and_labels", "(", "fig", ",", "root", ",", "ax", ",", "gx", ",", "gy", ",", "xsize", ",", "ysize", ",", "qbreaks", ",", "sbreaks", ")", "ax", ".", "set_xlim", "(", "xlim", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "title", "=", "\"Alignment: {} vs {}\"", ".", "format", "(", "gx", ",", "gy", ")", "title", "+=", "\" ({} markers)\"", ".", "format", "(", "thousands", "(", "npairs", ")", ")", "root", ".", "set_title", "(", "markup", "(", "title", ")", ",", "x", "=", ".5", ",", "y", "=", ".96", ",", "color", "=", "\"k\"", ")", "logging", ".", "debug", "(", "title", ")", "normalize_axes", "(", "root", ")", "image_name", "=", "opts", ".", "outfile", "or", "(", "csvfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "+", "\".\"", "+", "iopts", ".", "format", ")", "savefig", "(", "image_name", ",", "dpi", "=", "iopts", ".", "dpi", ",", "iopts", "=", "iopts", ")", "fig", ".", "clear", "(", ")" ]
32.37931
0.002756
def query(self, expr): """ Query the data with a boolean expression. :param expr: the query string, you can use '@' character refer to environment variables. :return: new collection :rtype: :class:`odps.df.expr.expressions.CollectionExpr` """ from .query import CollectionVisitor if not isinstance(expr, six.string_types): raise ValueError('expr must be a string') frame = sys._getframe(2).f_locals try: env = frame.copy() finally: del frame visitor = CollectionVisitor(self, env) predicate = visitor.eval(expr) return self.filter(predicate)
[ "def", "query", "(", "self", ",", "expr", ")", ":", "from", ".", "query", "import", "CollectionVisitor", "if", "not", "isinstance", "(", "expr", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "'expr must be a string'", ")", "frame", "=", "sys", ".", "_getframe", "(", "2", ")", ".", "f_locals", "try", ":", "env", "=", "frame", ".", "copy", "(", ")", "finally", ":", "del", "frame", "visitor", "=", "CollectionVisitor", "(", "self", ",", "env", ")", "predicate", "=", "visitor", ".", "eval", "(", "expr", ")", "return", "self", ".", "filter", "(", "predicate", ")" ]
29.217391
0.004323
def migrate_non_shared(vm_, target, ssh=False): ''' Attempt to execute non-shared storage "all" migration :param vm_: domain name :param target: target libvirt host name :param ssh: True to connect over ssh CLI Example: .. code-block:: bash salt '*' virt.migrate_non_shared <vm name> <target hypervisor> A tunnel data migration can be performed by setting this in the configuration: .. code-block:: yaml virt: tunnel: True For more details on tunnelled data migrations, report to https://libvirt.org/migration.html#transporttunnel ''' cmd = _get_migrate_command() + ' --copy-storage-all ' + vm_\ + _get_target(target, ssh) stdout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] return salt.utils.stringutils.to_str(stdout)
[ "def", "migrate_non_shared", "(", "vm_", ",", "target", ",", "ssh", "=", "False", ")", ":", "cmd", "=", "_get_migrate_command", "(", ")", "+", "' --copy-storage-all '", "+", "vm_", "+", "_get_target", "(", "target", ",", "ssh", ")", "stdout", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "return", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "stdout", ")" ]
27.78125
0.001087
def get_next_of_type(self, processor_type): """Get the next available processor of a particular type and increment its occupancy counter. Args: processor_type (ProcessorType): The processor type associated with a zmq identity. Returns: (Processor): Information about the transaction processor """ with self._condition: if processor_type not in self: self.wait_for_registration(processor_type) try: processor = self[processor_type].next_processor() except NoProcessorVacancyError: processor = self.wait_for_vacancy(processor_type) processor.inc_occupancy() return processor
[ "def", "get_next_of_type", "(", "self", ",", "processor_type", ")", ":", "with", "self", ".", "_condition", ":", "if", "processor_type", "not", "in", "self", ":", "self", ".", "wait_for_registration", "(", "processor_type", ")", "try", ":", "processor", "=", "self", "[", "processor_type", "]", ".", "next_processor", "(", ")", "except", "NoProcessorVacancyError", ":", "processor", "=", "self", ".", "wait_for_vacancy", "(", "processor_type", ")", "processor", ".", "inc_occupancy", "(", ")", "return", "processor" ]
37.55
0.002597
def uptime(): """Returns uptime in seconds if even remotely possible, or None if not.""" if __boottime is not None: return time.time() - __boottime return {'amiga': _uptime_amiga, 'aros12': _uptime_amiga, 'beos5': _uptime_beos, 'cygwin': _uptime_linux, 'darwin': _uptime_osx, 'haiku1': _uptime_beos, 'linux': _uptime_linux, 'linux-armv71': _uptime_linux, 'linux2': _uptime_linux, 'mac': _uptime_mac, 'minix3': _uptime_minix, 'riscos': _uptime_riscos, 'sunos5': _uptime_solaris, 'syllable': _uptime_syllable, 'win32': _uptime_windows, 'wince': _uptime_windows}.get(sys.platform, _uptime_bsd)() or \ _uptime_bsd() or _uptime_plan9() or _uptime_linux() or \ _uptime_windows() or _uptime_solaris() or _uptime_beos() or \ _uptime_amiga() or _uptime_riscos() or _uptime_posix() or \ _uptime_syllable() or _uptime_mac() or _uptime_osx()
[ "def", "uptime", "(", ")", ":", "if", "__boottime", "is", "not", "None", ":", "return", "time", ".", "time", "(", ")", "-", "__boottime", "return", "{", "'amiga'", ":", "_uptime_amiga", ",", "'aros12'", ":", "_uptime_amiga", ",", "'beos5'", ":", "_uptime_beos", ",", "'cygwin'", ":", "_uptime_linux", ",", "'darwin'", ":", "_uptime_osx", ",", "'haiku1'", ":", "_uptime_beos", ",", "'linux'", ":", "_uptime_linux", ",", "'linux-armv71'", ":", "_uptime_linux", ",", "'linux2'", ":", "_uptime_linux", ",", "'mac'", ":", "_uptime_mac", ",", "'minix3'", ":", "_uptime_minix", ",", "'riscos'", ":", "_uptime_riscos", ",", "'sunos5'", ":", "_uptime_solaris", ",", "'syllable'", ":", "_uptime_syllable", ",", "'win32'", ":", "_uptime_windows", ",", "'wince'", ":", "_uptime_windows", "}", ".", "get", "(", "sys", ".", "platform", ",", "_uptime_bsd", ")", "(", ")", "or", "_uptime_bsd", "(", ")", "or", "_uptime_plan9", "(", ")", "or", "_uptime_linux", "(", ")", "or", "_uptime_windows", "(", ")", "or", "_uptime_solaris", "(", ")", "or", "_uptime_beos", "(", ")", "or", "_uptime_amiga", "(", ")", "or", "_uptime_riscos", "(", ")", "or", "_uptime_posix", "(", ")", "or", "_uptime_syllable", "(", ")", "or", "_uptime_mac", "(", ")", "or", "_uptime_osx", "(", ")" ]
42
0.004655
def depth(args): """ %prog depth DP.tsv Plot read depths across all TREDs. """ import seaborn as sns p = OptionParser(depth.__doc__) opts, args, iopts = p.set_image_options(args, figsize="14x14") if len(args) != 1: sys.exit(not p.print_help()) tsvfile, = args fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=6) data = pd.read_csv(tsvfile, sep="\t", low_memory=False) ids, treds = read_treds() for (dp, ax, title) in zip(("FDP", "PDP", "RDP", "PEDP"), (ax1, ax2, ax3, ax4), ("Spanning reads", "Partial reads", "Repeat-only reads", "Paired-end reads")): logging.debug("Build {}".format(title)) # Construct related data structure xd = [] # (tred, dp) mdp = [] # (tred, median_dp) for tred, motif in zip(treds["abbreviation"], treds["motif"]): if tred in ignore: logging.debug("Ignore {}".format(tred)) continue if len(motif) > 4: if "/" in motif: # CTG/CAG motif = motif.split("/")[0] else: motif = motif[:4] + ".." xtred = "{} {}".format(tred, motif) md = [x for x in data[tred + '.' + dp] if x >= 0] subsample = 10000 if dp == "RDP" else 1000 md = sample(md, subsample) pmd = [x for x in md if x > 0] median = np.median(pmd) if pmd else 0 mdp.append((xtred, median)) for d in md: xd.append((xtred, d)) # Determine order mdp.sort(key=lambda x: x[1]) order, mdp = zip(*mdp) # OK, now plot xt, xd = zip(*xd) sns.boxplot(xt, xd, ax=ax, order=order, fliersize=2) xticklabels = ax.get_xticklabels() ax.set_xticklabels(xticklabels, rotation=45, ha="right") ax.set_title("Number of {} per locus".format(title), size=18) ylim = 30 if dp == "RDP" else 100 ax.set_ylim(0, ylim) yticklabels = [int(x) for x in ax.get_yticks()] ax.set_yticklabels(yticklabels, family='Helvetica', size=14) root = fig.add_axes([0, 0, 1, 1]) pad = .04 panel_labels(root, ((pad, 1 - pad, "A"), (1 / 2. + pad / 2, 1 - pad, "B"), (pad, .5 - pad / 2, "C"), (1 / 2. + pad / 2, .5 - pad / 2, "D"))) normalize_axes(root) image_name = "depth." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "depth", "(", "args", ")", ":", "import", "seaborn", "as", "sns", "p", "=", "OptionParser", "(", "depth", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"14x14\"", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "tsvfile", ",", "=", "args", "fig", ",", "(", "(", "ax1", ",", "ax2", ")", ",", "(", "ax3", ",", "ax4", ")", ")", "=", "plt", ".", "subplots", "(", "ncols", "=", "2", ",", "nrows", "=", "2", ",", "figsize", "=", "(", "iopts", ".", "w", ",", "iopts", ".", "h", ")", ")", "plt", ".", "tight_layout", "(", "pad", "=", "6", ")", "data", "=", "pd", ".", "read_csv", "(", "tsvfile", ",", "sep", "=", "\"\\t\"", ",", "low_memory", "=", "False", ")", "ids", ",", "treds", "=", "read_treds", "(", ")", "for", "(", "dp", ",", "ax", ",", "title", ")", "in", "zip", "(", "(", "\"FDP\"", ",", "\"PDP\"", ",", "\"RDP\"", ",", "\"PEDP\"", ")", ",", "(", "ax1", ",", "ax2", ",", "ax3", ",", "ax4", ")", ",", "(", "\"Spanning reads\"", ",", "\"Partial reads\"", ",", "\"Repeat-only reads\"", ",", "\"Paired-end reads\"", ")", ")", ":", "logging", ".", "debug", "(", "\"Build {}\"", ".", "format", "(", "title", ")", ")", "# Construct related data structure", "xd", "=", "[", "]", "# (tred, dp)", "mdp", "=", "[", "]", "# (tred, median_dp)", "for", "tred", ",", "motif", "in", "zip", "(", "treds", "[", "\"abbreviation\"", "]", ",", "treds", "[", "\"motif\"", "]", ")", ":", "if", "tred", "in", "ignore", ":", "logging", ".", "debug", "(", "\"Ignore {}\"", ".", "format", "(", "tred", ")", ")", "continue", "if", "len", "(", "motif", ")", ">", "4", ":", "if", "\"/\"", "in", "motif", ":", "# CTG/CAG", "motif", "=", "motif", ".", "split", "(", "\"/\"", ")", "[", "0", "]", "else", ":", "motif", "=", "motif", "[", ":", "4", "]", "+", "\"..\"", "xtred", "=", "\"{} {}\"", ".", "format", "(", "tred", ",", "motif", ")", "md", "=", "[", "x", "for", "x", "in", "data", "[", "tred", "+", "'.'", "+", "dp", "]", "if", "x", ">=", "0", "]", "subsample", "=", "10000", "if", "dp", "==", "\"RDP\"", "else", "1000", "md", "=", "sample", "(", "md", ",", "subsample", ")", "pmd", "=", "[", "x", "for", "x", "in", "md", "if", "x", ">", "0", "]", "median", "=", "np", ".", "median", "(", "pmd", ")", "if", "pmd", "else", "0", "mdp", ".", "append", "(", "(", "xtred", ",", "median", ")", ")", "for", "d", "in", "md", ":", "xd", ".", "append", "(", "(", "xtred", ",", "d", ")", ")", "# Determine order", "mdp", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "order", ",", "mdp", "=", "zip", "(", "*", "mdp", ")", "# OK, now plot", "xt", ",", "xd", "=", "zip", "(", "*", "xd", ")", "sns", ".", "boxplot", "(", "xt", ",", "xd", ",", "ax", "=", "ax", ",", "order", "=", "order", ",", "fliersize", "=", "2", ")", "xticklabels", "=", "ax", ".", "get_xticklabels", "(", ")", "ax", ".", "set_xticklabels", "(", "xticklabels", ",", "rotation", "=", "45", ",", "ha", "=", "\"right\"", ")", "ax", ".", "set_title", "(", "\"Number of {} per locus\"", ".", "format", "(", "title", ")", ",", "size", "=", "18", ")", "ylim", "=", "30", "if", "dp", "==", "\"RDP\"", "else", "100", "ax", ".", "set_ylim", "(", "0", ",", "ylim", ")", "yticklabels", "=", "[", "int", "(", "x", ")", "for", "x", "in", "ax", ".", "get_yticks", "(", ")", "]", "ax", ".", "set_yticklabels", "(", "yticklabels", ",", "family", "=", "'Helvetica'", ",", "size", "=", "14", ")", "root", "=", "fig", ".", "add_axes", "(", "[", "0", ",", "0", ",", "1", ",", "1", "]", ")", "pad", "=", ".04", "panel_labels", "(", "root", ",", "(", "(", "pad", ",", "1", "-", "pad", ",", "\"A\"", ")", ",", "(", "1", "/", "2.", "+", "pad", "/", "2", ",", "1", "-", "pad", ",", "\"B\"", ")", ",", "(", "pad", ",", ".5", "-", "pad", "/", "2", ",", "\"C\"", ")", ",", "(", "1", "/", "2.", "+", "pad", "/", "2", ",", ".5", "-", "pad", "/", "2", ",", "\"D\"", ")", ")", ")", "normalize_axes", "(", "root", ")", "image_name", "=", "\"depth.\"", "+", "iopts", ".", "format", "savefig", "(", "image_name", ",", "dpi", "=", "iopts", ".", "dpi", ",", "iopts", "=", "iopts", ")" ]
35.383562
0.001883
def normalize_name(name, overrides=None): '''Normalize the key name to title case. For example, ``normalize_name('content-id')`` will become ``Content-Id`` Args: name (str): The name to normalize. overrides (set, sequence): A set or sequence containing keys that should be cased to themselves. For example, passing ``set('WARC-Type')`` will normalize any key named "warc-type" to ``WARC-Type`` instead of the default ``Warc-Type``. Returns: str ''' normalized_name = name.title() if overrides: override_map = dict([(name.title(), name) for name in overrides]) return override_map.get(normalized_name, normalized_name) else: return normalized_name
[ "def", "normalize_name", "(", "name", ",", "overrides", "=", "None", ")", ":", "normalized_name", "=", "name", ".", "title", "(", ")", "if", "overrides", ":", "override_map", "=", "dict", "(", "[", "(", "name", ".", "title", "(", ")", ",", "name", ")", "for", "name", "in", "overrides", "]", ")", "return", "override_map", ".", "get", "(", "normalized_name", ",", "normalized_name", ")", "else", ":", "return", "normalized_name" ]
31.041667
0.001302
def _get_basin_depth_term(self, C, sites, period): """ In the case of the base model the basin depth term is switched off. Therefore we return an array of zeros. """ return np.zeros(len(sites.vs30), dtype=float)
[ "def", "_get_basin_depth_term", "(", "self", ",", "C", ",", "sites", ",", "period", ")", ":", "return", "np", ".", "zeros", "(", "len", "(", "sites", ".", "vs30", ")", ",", "dtype", "=", "float", ")" ]
41
0.007968
def p_global_var(p): '''global_var : VARIABLE | DOLLAR variable | DOLLAR LBRACE expr RBRACE''' if len(p) == 2: p[0] = ast.Variable(p[1], lineno=p.lineno(1)) elif len(p) == 3: p[0] = ast.Variable(p[2], lineno=p.lineno(1)) else: p[0] = ast.Variable(p[3], lineno=p.lineno(1))
[ "def", "p_global_var", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "ast", ".", "Variable", "(", "p", "[", "1", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "elif", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "ast", ".", "Variable", "(", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "else", ":", "p", "[", "0", "]", "=", "ast", ".", "Variable", "(", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
33.9
0.002874
def check_for_cancelled_events(self, d): """Check if any events are cancelled on the given date 'd'.""" for event in self.events: for cn in event.cancellations.all(): if cn.date == d: event.title += ' (CANCELLED)'
[ "def", "check_for_cancelled_events", "(", "self", ",", "d", ")", ":", "for", "event", "in", "self", ".", "events", ":", "for", "cn", "in", "event", ".", "cancellations", ".", "all", "(", ")", ":", "if", "cn", ".", "date", "==", "d", ":", "event", ".", "title", "+=", "' (CANCELLED)'" ]
45.333333
0.00722
def _aws_encode_changebatch(o): ''' helper method to process a change batch & encode the bits which need encoding. ''' change_idx = 0 while change_idx < len(o['Changes']): o['Changes'][change_idx]['ResourceRecordSet']['Name'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['Name']) if 'ResourceRecords' in o['Changes'][change_idx]['ResourceRecordSet']: rr_idx = 0 while rr_idx < len(o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords']): o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords'][rr_idx]['Value'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords'][rr_idx]['Value']) rr_idx += 1 if 'AliasTarget' in o['Changes'][change_idx]['ResourceRecordSet']: o['Changes'][change_idx]['ResourceRecordSet']['AliasTarget']['DNSName'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['AliasTarget']['DNSName']) change_idx += 1 return o
[ "def", "_aws_encode_changebatch", "(", "o", ")", ":", "change_idx", "=", "0", "while", "change_idx", "<", "len", "(", "o", "[", "'Changes'", "]", ")", ":", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", "[", "'Name'", "]", "=", "aws_encode", "(", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", "[", "'Name'", "]", ")", "if", "'ResourceRecords'", "in", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", ":", "rr_idx", "=", "0", "while", "rr_idx", "<", "len", "(", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", "[", "'ResourceRecords'", "]", ")", ":", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", "[", "'ResourceRecords'", "]", "[", "rr_idx", "]", "[", "'Value'", "]", "=", "aws_encode", "(", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", "[", "'ResourceRecords'", "]", "[", "rr_idx", "]", "[", "'Value'", "]", ")", "rr_idx", "+=", "1", "if", "'AliasTarget'", "in", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", ":", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", "[", "'AliasTarget'", "]", "[", "'DNSName'", "]", "=", "aws_encode", "(", "o", "[", "'Changes'", "]", "[", "change_idx", "]", "[", "'ResourceRecordSet'", "]", "[", "'AliasTarget'", "]", "[", "'DNSName'", "]", ")", "change_idx", "+=", "1", "return", "o" ]
63.125
0.005854
def visitObjectExpr(self, ctx: jsgParser.ObjectExprContext): """ objectExpr: OBRACE membersDef? CBRACE OBRACE (LEXER_ID_REF | ANY)? MAPSTO valueType ebnfSuffix? CBRACE """ if not self._name: self._name = self._context.anon_id() if ctx.membersDef(): self.visitChildren(ctx) elif ctx.MAPSTO(): if ctx.LEXER_ID_REF(): self._map_name_type = as_token(ctx) # Any and absent mean the same thing self._map_valuetype = JSGValueType(self._context, ctx.valueType()) if ctx.ebnfSuffix(): self._map_ebnf = JSGEbnf(self._context, ctx.ebnfSuffix())
[ "def", "visitObjectExpr", "(", "self", ",", "ctx", ":", "jsgParser", ".", "ObjectExprContext", ")", ":", "if", "not", "self", ".", "_name", ":", "self", ".", "_name", "=", "self", ".", "_context", ".", "anon_id", "(", ")", "if", "ctx", ".", "membersDef", "(", ")", ":", "self", ".", "visitChildren", "(", "ctx", ")", "elif", "ctx", ".", "MAPSTO", "(", ")", ":", "if", "ctx", ".", "LEXER_ID_REF", "(", ")", ":", "self", ".", "_map_name_type", "=", "as_token", "(", "ctx", ")", "# Any and absent mean the same thing", "self", ".", "_map_valuetype", "=", "JSGValueType", "(", "self", ".", "_context", ",", "ctx", ".", "valueType", "(", ")", ")", "if", "ctx", ".", "ebnfSuffix", "(", ")", ":", "self", ".", "_map_ebnf", "=", "JSGEbnf", "(", "self", ".", "_context", ",", "ctx", ".", "ebnfSuffix", "(", ")", ")" ]
45.866667
0.005698
def get_column_for_modelfield(model_field): """ Return the built-in Column class for a model field class. """ # If the field points to another model, we want to get the pk field of that other model and use # that as the real field. It is possible that a ForeignKey points to a model with table # inheritance, however, so we need to traverse the internal OneToOneField as well, so this will # climb the 'pk' field chain until we have something real. while model_field.related_model: model_field = model_field.related_model._meta.pk for ColumnClass, modelfield_classes in COLUMN_CLASSES: if isinstance(model_field, tuple(modelfield_classes)): return ColumnClass
[ "def", "get_column_for_modelfield", "(", "model_field", ")", ":", "# If the field points to another model, we want to get the pk field of that other model and use", "# that as the real field. It is possible that a ForeignKey points to a model with table", "# inheritance, however, so we need to traverse the internal OneToOneField as well, so this will", "# climb the 'pk' field chain until we have something real.", "while", "model_field", ".", "related_model", ":", "model_field", "=", "model_field", ".", "related_model", ".", "_meta", ".", "pk", "for", "ColumnClass", ",", "modelfield_classes", "in", "COLUMN_CLASSES", ":", "if", "isinstance", "(", "model_field", ",", "tuple", "(", "modelfield_classes", ")", ")", ":", "return", "ColumnClass" ]
58.833333
0.005579