code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def minmax(self, minimum=None, maximum=None): if minimum is None and maximum is None: return {"minimum": self._minimum, "maximum": self._maximum}; if minimum != None: if self._type in [, , , , ]: if not isinstance(minimum, basestring) \ or not _typeToRegex[self._type].match(minimum): raise ValueError() elif self._type in [, , , ]: if not isinstance(minimum, (int, long)): if minimum < 0: raise ValueError() elif self._type == : try: minimum = float(minimum) except ValueError: raise ValueError() elif self._type == : else: raise TypeError( + self._type) self._minimum = minimum if maximum != None: if self._type in [, , , ]: if not isinstance(maximum, basestring) \ or not _typeToRegex[self._type].match(maximum): raise ValueError() elif self._type in [, , , ]: if not isinstance(maximum, (int, long)): if maximum < 0: raise ValueError() elif self._type == : try: minimum = float(minimum) except ValueError: raise ValueError() elif self._type == : else: raise TypeError( + self._type) if self._minimum is not None: if self._type == : if self.__compare_ips(self._minimum, maximum) == 1: raise ValueError() else: if self._minimum > maximum: raise ValueError() self._maximum = maximum
Min/Max Sets or gets the minimum and/or maximum values for the Node. For getting, returns {"minimum":mixed,"maximum":mixed} Arguments: minimum {mixed} -- The minimum value maximum {mixed} -- The maximum value Raises: TypeError, ValueError Returns: None | dict
def set_properties(self, properties, recursive=True): if not properties: return MAX_SIZE = 50 if len(properties) > MAX_SIZE: for chunk in chunks(properties, MAX_SIZE): self._accessor.set_properties(self, chunk, recursive) else: self._accessor.set_properties(self, properties, recursive)
Adds new or modifies existing properties listed in properties properties - is a dict which contains the property names and values to set. Property values can be a list or tuple to set multiple values for a key. recursive - on folders property attachment is recursive by default. It is possible to force recursive behavior.
def remove(self, element): if element in self._map: return self.evolver().remove(element).persistent() raise KeyError("Element not present in PSet" % element)
Return a new PSet with element removed. Raises KeyError if element is not present. >>> s1 = s(1, 2) >>> s1.remove(2) pset([1])
def expected_counts_stationary(T, n, mu=None): r if (n <= 0): EC = coo_matrix(T.shape, dtype=float) return EC else: if mu is None: mu = stationary_distribution(T) D_mu = diags(mu, 0) EC = n * D_mu.dot(T) return EC
r"""Expected transition counts for Markov chain in equilibrium. Since mu is stationary for T we have .. math:: E(C^{(n)})=n diag(mu)*T. Parameters ---------- T : (M, M) sparse matrix Transition matrix. n : int Number of steps for chain. mu : (M,) ndarray (optional) Stationary distribution for T. If mu is not specified it will be computed via diagonalization of T. Returns ------- EC : (M, M) sparse matrix Expected value for transition counts after N steps.
def arcs_missing(self): possible = self.arc_possibilities() executed = self.arcs_executed() missing = [ p for p in possible if p not in executed and p[0] not in self.no_branch ] return sorted(missing)
Returns a sorted list of the arcs in the code not executed.
def make_perfect_cd(wcs): def_scale = (wcs.pscale) / 3600. def_orientat = np.deg2rad(wcs.orientat) perfect_cd = def_scale * np.array( [[-np.cos(def_orientat),np.sin(def_orientat)], [np.sin(def_orientat),np.cos(def_orientat)]] ) return perfect_cd
Create a perfect (square, orthogonal, undistorted) CD matrix from the input WCS.
def load(path=None, **kwargs): device_namesalt://production/network/routers/config.setdevice_namesalt://templates/replace_config.confdevice_namesalt://my_new_configuration.confdevice_namesalt://syslog_template.conf{"syslog_host": "10.180.222.7"} conn = __proxy__[]() ret = {} ret[] = True if path is None: ret[] = \ ret[] = False return ret op = {} if in kwargs: if kwargs[]: if isinstance(kwargs[][-1], dict): op.update(kwargs[][-1]) else: op.update(kwargs) template_vars = {} if "template_vars" in op: template_vars = op["template_vars"] template_cached_path = salt.utils.files.mkstemp() __salt__[]( path, template_cached_path, template_vars=template_vars) if not os.path.isfile(template_cached_path): ret[] = ret[] = False return ret if os.path.getsize(template_cached_path) == 0: ret[] = ret[] = False return ret op[] = template_cached_path if not in op: if path.endswith(): template_format = elif path.endswith(): template_format = else: template_format = op[] = template_format if in op and op[]: op[] = False del op[] elif in op and op[]: op[] = True elif in op and not op[]: op[] = True del op[] try: conn.cu.load(**op) ret[] = "Successfully loaded the configuration." except Exception as exception: ret[] = .format( exception) ret[] = op[] ret[] = False return ret finally: salt.utils.files.safe_rm(template_cached_path) return ret
Loads the configuration from the file provided onto the device. path (required) Path where the configuration/template file is present. If the file has a ``.conf`` extension, the content is treated as text format. If the file has a ``.xml`` extension, the content is treated as XML format. If the file has a ``.set`` extension, the content is treated as Junos OS ``set`` commands. overwrite : False Set to ``True`` if you want this file is to completely replace the configuration file. replace : False Specify whether the configuration file uses ``replace:`` statements. If ``True``, only those statements under the ``replace`` tag will be changed. format Determines the format of the contents update : False Compare a complete loaded configuration against the candidate configuration. For each hierarchy level or configuration object that is different in the two configurations, the version in the loaded configuration replaces the version in the candidate configuration. When the configuration is later committed, only system processes that are affected by the changed configuration elements parse the new configuration. This action is supported from PyEZ 2.1. template_vars Variables to be passed into the template processing engine in addition to those present in pillar, the minion configuration, grains, etc. You may reference these variables in your template like so: .. code-block:: jinja {{ template_vars["var_name"] }} CLI Examples: .. code-block:: bash salt 'device_name' junos.load 'salt://production/network/routers/config.set' salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
def graph_structure(self, x, standalone=True): if standalone: x = tf.concat(tf.split(x, 2, axis=0), axis=1) with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1), padding=, strides=2, kernel_size=3, data_format=), \ argscope([tf.layers.conv2d_transpose], padding=, activation=tf.identity, data_format=, strides=2, kernel_size=4): x = tf.layers.conv2d(pad(x, 3), 64, kernel_size=7, name=) conv2 = tf.layers.conv2d(pad(x, 2), 128, kernel_size=5, name=) x = tf.layers.conv2d(pad(conv2, 2), 256, kernel_size=5, name=) conv3 = tf.layers.conv2d(pad(x, 1), 256, name=, strides=1) x = tf.layers.conv2d(pad(conv3, 1), 512, name=) conv4 = tf.layers.conv2d(pad(x, 1), 512, name=, strides=1) x = tf.layers.conv2d(pad(conv4, 1), 512, name=) conv5 = tf.layers.conv2d(pad(x, 1), 512, name=, strides=1) x = tf.layers.conv2d(pad(conv5, 1), 1024, name=) conv6 = tf.layers.conv2d(pad(x, 1), 1024, name=, strides=1) flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name=, strides=1, activation=tf.identity) flow6_up = tf.layers.conv2d_transpose(flow6, 2, name=, use_bias=False) x = tf.layers.conv2d_transpose(conv6, 512, name=, activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat5 = tf.concat([conv5, x, flow6_up], axis=1, name=) flow5 = tf.layers.conv2d(pad(concat5, 1), 2, name=, strides=1, activation=tf.identity) flow5_up = tf.layers.conv2d_transpose(flow5, 2, name=, use_bias=False) x = tf.layers.conv2d_transpose(concat5, 256, name=, activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat4 = tf.concat([conv4, x, flow5_up], axis=1, name=) flow4 = tf.layers.conv2d(pad(concat4, 1), 2, name=, strides=1, activation=tf.identity) flow4_up = tf.layers.conv2d_transpose(flow4, 2, name=, use_bias=False) x = tf.layers.conv2d_transpose(concat4, 128, name=, activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat3 = tf.concat([conv3, x, flow4_up], axis=1, name=) flow3 = tf.layers.conv2d(pad(concat3, 1), 2, name=, strides=1, activation=tf.identity) flow3_up = tf.layers.conv2d_transpose(flow3, 2, name=, use_bias=False) x = tf.layers.conv2d_transpose(concat3, 64, name=, activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat2 = tf.concat([conv2, x, flow3_up], axis=1, name=) flow2 = tf.layers.conv2d(pad(concat2, 1), 2, name=, strides=1, activation=tf.identity) return tf.identity(flow2, name=)
Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0. Args: x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation of 5 tensors of [3, 3, 3, 2, 1] channels. standalone: If True, this model is used to predict flow from two inputs. If False, this model is used as part of the FlowNet2.
def handle_request(self): timeout = self.socket.gettimeout() if timeout is None: timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) ctime = get_time() done_req = False shutdown_latency = self.shutdown_latency if timeout is not None: shutdown_latency = min(shutdown_latency, timeout) \ if shutdown_latency is not None else timeout while not (self.done or done_req) and (timeout is None or timeout == 0 or (get_time() - ctime) < timeout): try: fd_sets = select.select([self], [], [], shutdown_latency) except (OSError, select.error) as e: if e.args[0] != errno.EINTR: raise fd_sets = [[], [], []] for _fd in fd_sets[0]: done_req = True self._handle_request_noblock() if timeout == 0: break if not (self.done or done_req): self.handle_timeout()
Handles an HTTP request.The actual HTTP request is handled using a different thread.
def paragraphs(self): return tuple([_Paragraph(p, self) for p in self._txBody.p_lst])
Immutable sequence of |_Paragraph| instances corresponding to the paragraphs in this text frame. A text frame always contains at least one paragraph.
def addDraftThingType(self, thingTypeId, name = None, description = None, schemaId = None, metadata = None): draftThingTypesUrl = ApiClient.draftThingTypesUrl % (self.host) payload = { : thingTypeId, : name, : description, : schemaId, : metadata} r = requests.post(draftThingTypesUrl, auth=self.credentials, data=json.dumps(payload), headers = {: }, verify=self.verify) status = r.status_code if status == 201: self.logger.debug("The draft thing Type is created") return r.json() elif status == 400: raise ibmiotf.APIException(400, "Invalid request (No body, invalid JSON, unexpected key, bad value)", r.json()) elif status == 401: raise ibmiotf.APIException(401, "The authentication token is empty or invalid", None) elif status == 403: raise ibmiotf.APIException(403, "The authentication method is invalid or the api key used does not exist", None) elif status == 409: raise ibmiotf.APIException(409, "The draft thing type already exists", r.json()) elif status == 500: raise ibmiotf.APIException(500, "Unexpected error", None) else: raise ibmiotf.APIException(None, "Unexpected error", None)
Creates a thing type. It accepts thingTypeId (string), name (string), description (string), schemaId(string) and metadata(dict) as parameter In case of failure it throws APIException
def _parse_ldap(ldap_filter): if ldap_filter is None: return None assert is_string(ldap_filter) ldap_filter = ldap_filter.strip() if not ldap_filter: return None escaped = False filter_len = len(ldap_filter) root = None stack = [] subfilter_stack = [] idx = 0 while idx < filter_len: if not escaped: if ldap_filter[idx] == "(": idx = _skip_spaces(ldap_filter, idx + 1) if idx == -1: raise ValueError( "Missing filter operator: {0}".format(ldap_filter) ) operator = _compute_operation(ldap_filter, idx) if operator is not None: stack.append(LDAPFilter(operator)) else: subfilter_stack.append(idx) elif ldap_filter[idx] == ")": if subfilter_stack: start_idx = subfilter_stack.pop() criterion = _parse_ldap_criteria( ldap_filter, start_idx, idx ) if stack: top = stack.pop() top.append(criterion) stack.append(top) else: root = LDAPFilter(AND) root.append(criterion) elif stack: ended_filter = stack.pop() if stack: top = stack.pop() top.append(ended_filter) stack.append(top) else: root = ended_filter else: raise ValueError( "Too many end of parenthesis:{0}: {1}".format( idx, ldap_filter[idx:] ) ) elif ldap_filter[idx] == "\\": escaped = True else: escaped = False idx += 1 if root is None: raise ValueError("Invalid filter string: {0}".format(ldap_filter)) return root.normalize()
Parses the given LDAP filter string :param ldap_filter: An LDAP filter string :return: An LDAPFilter object, None if the filter was empty :raise ValueError: The LDAP filter string is invalid
def list_firewall_rules(self, server_name): _validate_not_none(, server_name) response = self._perform_get(self._get_firewall_rules_path(server_name), None) return _MinidomXmlToObject.parse_service_resources_response( response, FirewallRule)
Retrieves the set of firewall rules for an Azure SQL Database Server. server_name: Name of the server.
def orient_import2(self, event): pmag_menu_dialogs.ImportAzDipFile(self.parent, self.parent.WD)
initialize window to import an AzDip format file into the working directory
def parse_case_snake_to_camel(snake, upper_first=True): snake = snake.split() first_part = snake[0] if upper_first: first_part = first_part.title() return first_part + .join(word.title() for word in snake[1:])
Convert a string from snake_case to CamelCase. :param str snake: The snake_case string to convert. :param bool upper_first: Whether or not to capitalize the first character of the string. :return: The CamelCase version of string. :rtype: str
def solve_minimize( self, func, weights, constraints, lower_bound=0.0, upper_bound=1.0, func_deriv=False ): bounds = ((lower_bound, upper_bound), ) * len(self.SUPPORTED_COINS) return minimize( fun=func, x0=weights, jac=func_deriv, bounds=bounds, constraints=constraints, method=, options={: False} )
Returns the solution to a minimization problem.
def create(self, input=None, live_stream=False, outputs=None, options=None): data = {"input": input, "test": self.test} if outputs: data[] = outputs if options: data.update(options) if live_stream: data[] = live_stream return self.post(self.base_url, body=json.dumps(data))
Creates a transcoding job. Here are some examples:: job.create('s3://zencodertesting/test.mov') job.create(live_stream=True) job.create(input='http://example.com/input.mov', outputs=({'label': 'test output'},)) https://app.zencoder.com/docs/api/jobs/create
def rdkitmol_Hs(self): r if self.__rdkitmol_Hs: return self.__rdkitmol_Hs else: try: self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol) return self.__rdkitmol_Hs except: return None
r'''RDKit object of the chemical, with hydrogen. If RDKit is not available, holds None. For examples of what can be done with RDKit, see `their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
def makeObjectFeed( paginator, objectToXMLFunction, feedId, title, webRoot, idAttr="id", nameAttr="name", dateAttr=None, request=None, page=1, count=20, author=APP_AUTHOR): listSize = paginator.count if listSize: object_list = paginator.page(page).object_list else: object_list = [] count = int(count) originalId = feedId idParts = feedId.split("?", 1) if len(idParts) == 2: feedId = idParts[0] if request: GETStruct = request.GET else: GETStruct = False feedTag = etree.Element(ATOM + "feed", nsmap=ATOM_NSMAP) idTag = etree.SubElement(feedTag, ATOM + "id") idTag.text = "%s/%s" % (webRoot, feedId) titleTag = etree.SubElement(feedTag, ATOM + "title") titleTag.text = title if author: authorTag = etree.SubElement(feedTag, ATOM + "author") nameTag = etree.SubElement(authorTag, ATOM + "name") urlTag = etree.SubElement(authorTag, ATOM + "uri") nameTag.text = author.get(, ) urlTag.text = author.get(, ) updatedTag = etree.SubElement(feedTag, ATOM + "updated") updatedTag.text = xsDateTime_format(localize_datetime(datetime.now())) linkTag = etree.SubElement(feedTag, ATOM + "link") linkTag.set("rel", "self") if not request or not request.META[]: linkTag.set("href", "%s/%s" % (webRoot, feedId)) else: linkTag.set( "href", "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(request.GET, doseq=True) ) ) endLink = etree.SubElement(feedTag, ATOM + "link") endLink.set("rel", "last") if GETStruct: endLinkGS = GETStruct.copy() else: endLinkGS = {} endLinkGS.update({"page": paginator.num_pages}) endLink.set( "href", "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(endLinkGS, doseq=True) ) ) startLink = etree.SubElement(feedTag, ATOM + "link") startLink.set("rel", "first") if GETStruct: startLinkGS = GETStruct.copy() else: startLinkGS = {} startLinkGS.update({"page": paginator.page_range[0]}) startLink.set( "href", "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(startLinkGS, doseq=True) ) ) if paginator.page(page).has_next(): nextLink = etree.SubElement(feedTag, ATOM + "link") nextLink.set("rel", "next") if GETStruct: nextLinkGS = GETStruct.copy() else: nextLinkGS = {} nextLinkGS.update({"page": paginator.page(page).next_page_number()}) nextLinkText = "%s/%s?%s" % ( webRoot, feedId, urllib.urlencode(nextLinkGS, doseq=True) ) nextLink.set("href", nextLinkText) for o in object_list: objectXML = objectToXMLFunction(o) if dateAttr: dateStamp = getattr(o, dateAttr) else: dateStamp = None althref = feedId.strip().split()[-1] althref = % ( webRoot, althref, getattr(o, idAttr) ) objectEntry = wrapAtom( xml=objectXML, id= % (webRoot, originalId, getattr(o, idAttr)), title=getattr(o, nameAttr), updated=dateStamp, alt=althref ) feedTag.append(objectEntry) return feedTag
Take a list of some kind of object, a conversion function, an id and a title Return XML representing an ATOM feed
def canonical_order(self): starting_vertex = max( ( -len(self.equivalent_vertices[vertex]), self.get_vertex_string(vertex), self.vertex_fingerprints[vertex].tobytes(), vertex ) for vertex in self.central_vertices )[-1] l = [ [ -distance, -len(self.equivalent_vertices[vertex]), self.get_vertex_string(vertex), self.vertex_fingerprints[vertex].tobytes(), vertex ] for vertex, distance in self.iter_breadth_first(starting_vertex) if len(self.neighbors[vertex]) > 0 ] l.sort(reverse=True) for i in range(1, len(l)): if l[i][:-1] == l[i-1][:-1]: raise NotImplementedError return [record[-1] for record in l]
The vertices in a canonical or normalized order. This routine will return a list of vertices in an order that does not depend on the initial order, but only depends on the connectivity and the return values of the function self.get_vertex_string. Only the vertices that are involved in edges will be included. The result can be given as first argument to self.get_subgraph, with reduce=True as second argument. This will return a complete canonical graph. The routine is designed not to use symmetry relations that are obtained with the GraphSearch routine. We also tried to create an ordering that feels like natural, i.e. starting in the center and pushing vertices with few equivalents to the front. If necessary, the nature of the vertices and their bonds to atoms closer to the center will also play a role, but only as a last resort.
def _get_upper_bound(self): r if self.lap_type == : return 2 elif self.lap_type == : bounds = [] bounds += [self.n_vertices * np.max(self.W)] bounds += [2 * np.max(self.dw)] if self.n_edges > 0: sources, targets, _ = self.get_edge_list() bounds += [np.max(self.dw[sources] + self.dw[targets])] if not self.is_directed(): W = self.W else: W = utils.symmetrize(self.W, method=) m = W.dot(self.dw) / self.dw bounds += [np.max(self.dw + m)] return min(bounds) else: raise ValueError( .format(self.lap_type))
r"""Return an upper bound on the eigenvalues of the Laplacian.
def connect(self, f, mode=None): mode = mode or self.STRONG self.logger.debug("connecting %r with mode %r", f, mode) return self._connect(mode(f))
Connect an object `f` to the signal. The type the object needs to have depends on `mode`, but usually it needs to be a callable. :meth:`connect` returns an opaque token which can be used with :meth:`disconnect` to disconnect the object from the signal. The default value for `mode` is :attr:`STRONG`. Any decorator can be used as argument for `mode` and it is applied to `f`. The result is stored internally and is what will be called when the signal is being emitted. If the result of `mode` returns a false value during emission, the connection is removed. .. note:: The return values required by the callable returned by `mode` and the one required by a callable passed to `f` using the predefined modes are complementary! A callable `f` needs to return true to be removed from the connections, while a callable returned by the `mode` decorator needs to return false. Existing modes are listed below.
def accountSummary(self, account: str = ) -> List[AccountValue]: if not self.wrapper.acctSummary: self.reqAccountSummary() if account: return [v for v in self.wrapper.acctSummary.values() if v.account == account] else: return list(self.wrapper.acctSummary.values())
List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name.
def lpush(self, key, *values): return self._execute([b, key] + list(values))
Insert all the specified values at the head of the list stored at key. :param key: The list's key :type key: :class:`str`, :class:`bytes` :param values: One or more positional arguments to insert at the beginning of the list. Each value is inserted at the beginning of the list individually (see discussion below). :returns: the length of the list after push operations :rtype: int :raises: :exc:`~tredis.exceptions.TRedisException` If `key` does not exist, it is created as empty list before performing the push operations. When key holds a value that is not a list, an error is returned. It is possible to push multiple elements using a single command call just specifying multiple arguments at the end of the command. Elements are inserted one after the other to the head of the list, from the leftmost element to the rightmost element. So for instance ``client.lpush('mylist', 'a', 'b', 'c')`` will result into a list containing ``c`` as first element, ``b`` as second element and ``a`` as third element. .. note:: **Time complexity**: ``O(1)``
def verify(path): valid = False try: h5 = h5py.File(path, mode="r") qpi0 = h5["qpi_0"] except (OSError, KeyError): pass else: if ("qpimage version" in qpi0.attrs and "phase" in qpi0 and "amplitude" in qpi0 and "bg_data" in qpi0["phase"] and "bg_data" in qpi0["amplitude"]): valid = True return valid
Verify that `path` has the qpimage series file format
def _pfp__build(self, stream=None, save_offset=False): if stream is not None and save_offset: self._pfp__offset = stream.tell() if self.bitsize is None: data = struct.pack( "{}{}".format(self.endian, self.format), self._pfp__value ) if stream is not None: stream.write(data) return len(data) else: return data else: data = struct.pack( "{}{}".format(BIG_ENDIAN, self.format), self._pfp__value ) num_bytes = int(math.ceil(self.bitsize / 8.0)) bit_data = data[-num_bytes:] raw_bits = bitwrap.bytes_to_bits(bit_data) bits = raw_bits[-self.bitsize:] if stream is not None: self.bitfield_rw.write_bits(stream, bits, self.bitfield_padded, self.bitfield_left_right, self.endian) return len(bits) // 8 else: return bits
Build the field and write the result into the stream :stream: An IO stream that can be written to :returns: None
def clear_nonexistent_import_errors(self, session): query = session.query(errors.ImportError) if self._file_paths: query = query.filter( ~errors.ImportError.filename.in_(self._file_paths) ) query.delete(synchronize_session=) session.commit()
Clears import errors for files that no longer exist. :param session: session for ORM operations :type session: sqlalchemy.orm.session.Session
def regex_in(pl,regex): b1c3dxab15cxx1xy2b1c3dxab15cxx1xy2 def cond_func(ele,regex): m = regex.search(ele) if(m == None): return(False) else: return(True) cond = some(pl,cond_func,regex)[] return(cond)
regex = re.compile("^[a-z]+$") pl = ['b1c3d','xab15cxx','1x','y2'] regex_in(pl,regex) regex = re.compile("^[0-9a-z]+$") pl = ['b1c3d','xab15cxx','1x','y2'] regex_in(pl,regex)
def get_dashboard_info(adapter, institute_id=None, slice_query=None): LOG.debug("General query with institute_id {}.".format(institute_id)) if institute_id == : institute_id = None general_sliced_info = get_general_case_info(adapter, institute_id=institute_id, slice_query=slice_query) total_sliced_cases = general_sliced_info[] data = {: total_sliced_cases} if total_sliced_cases == 0: return data data[] = [] for ped_info in general_sliced_info[].values(): ped_info[] = ped_info[] / total_sliced_cases data[].append(ped_info) data[] = get_case_groups(adapter, total_sliced_cases, institute_id=institute_id, slice_query=slice_query) data[] = get_analysis_types(adapter, total_sliced_cases, institute_id=institute_id, slice_query=slice_query) overview = [ { : , : general_sliced_info[], : general_sliced_info[] / total_sliced_cases, }, { : , : general_sliced_info[], : general_sliced_info[] / total_sliced_cases, }, { : , : general_sliced_info[], : general_sliced_info[] / total_sliced_cases, }, { : , : general_sliced_info[], : general_sliced_info[] / total_sliced_cases, } ] general_info = get_general_case_info(adapter, institute_id=institute_id) total_cases = general_info[] sliced_case_ids = general_sliced_info[] verified_query = { : , } if institute_id: verified_query[] = institute_id sliced_validation_cases = set() sliced_validated_cases = set() validated_tp = set() validated_fp = set() var_valid_orders = 0 validate_events = adapter.event_collection.find(verified_query) for validate_event in list(validate_events): case_id = validate_event.get() var_obj = adapter.variant(case_id=case_id, document_id=validate_event[]) if var_obj: return data
Returns cases with phenotype If phenotypes are provided search for only those Args: adapter(adapter.MongoAdapter) institute_id(str): an institute _id slice_query(str): query to filter cases to obtain statistics for. Returns: data(dict): Dictionary with relevant information
def fdf(self, x): x = self._flatten(x) n = 1 if hasattr(x, "__len__"): n = len(x) if self._dtype == 0: retval = _functional._fdf(self, x) else: retval = _functional._fdfc(self, x) if len(retval) == n: return numpy.array(retval) return numpy.array(retval).reshape(self.npar() + 1, n // self.ndim()).transpose()
Calculate the value of the functional for the specified arguments, and the derivatives with respect to the parameters (taking any specified mask into account). :param x: the value(s) to evaluate at
def darken(self, amount): hsl = self.to_hsl() hsl.l = self.clamp(hsl.l - amount) return self.from_hsl(hsl)
Darken (reduce the luminance) of this color. Args: amount (float) : Amount to reduce the luminance by (clamped above zero) Returns: Color
def copy_to(source, dest, engine_or_conn, **flags): dialect = postgresql.dialect() statement = getattr(source, , source) compiled = statement.compile(dialect=dialect) conn, autoclose = raw_connection_from(engine_or_conn) cursor = conn.cursor() query = cursor.mogrify(compiled.string, compiled.params).decode() formatted_flags = .format(format_flags(flags)) if flags else copy = .format(query, formatted_flags) cursor.copy_expert(copy, dest) if autoclose: conn.close()
Export a query or select to a file. For flags, see the PostgreSQL documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html. Examples: :: select = MyTable.select() with open('/path/to/file.tsv', 'w') as fp: copy_to(select, fp, conn) query = session.query(MyModel) with open('/path/to/file/csv', 'w') as fp: copy_to(query, fp, engine, format='csv', null='.') :param source: SQLAlchemy query or select :param dest: Destination file pointer, in write mode :param engine_or_conn: SQLAlchemy engine, connection, or raw_connection :param **flags: Options passed through to COPY If an existing connection is passed to `engine_or_conn`, it is the caller's responsibility to commit and close.
def hasattrs(object, *names): for name in names: if not hasattr(object, name): return False return True
Takes in an object and a variable length amount of named attributes, and checks to see if the object has each property. If any of the attributes are missing, this returns false. :param object: an object that may or may not contain the listed attributes :param names: a variable amount of attribute names to check for :return: True if the object contains each named attribute, false otherwise
def get_core(self): if self.maplesat and self.status == False: return pysolvers.maplesat_core(self.maplesat)
Get an unsatisfiable core if the formula was previously unsatisfied.
def insert(cls, cur, table: str, values: dict): keys = cls._COMMA.join(values.keys()) value_place_holder = cls._PLACEHOLDER * len(values) query = cls._insert_string.format(table, keys, value_place_holder[:-1]) yield from cur.execute(query, tuple(values.values())) return (yield from cur.fetchone())
Creates an insert statement with only chosen fields Args: table: a string indicating the name of the table values: a dict of fields and values to be inserted Returns: A 'Record' object with table columns as properties
def constraint_matches(self, c, m): if isinstance(m, tuple): d = {} if isinstance(c, Operator) and c._op_name == m[0]: for c1, m1 in zip(c._args, m[1:]): r = self.constraint_matches(c1, m1) if r is False: return r d.update(r) return d return False return m.match(c)
Return dict noting the substitution values (or False for no match)
def paginate_sources(owner=None, page=1, page_size=DEFAULT_PAGE_SIZE): sources = _sources_queryset(owner=owner) page = max(page or 1, 1) return sources.paginate(page, page_size)
Paginate harvest sources
def sort_sam(sam, sort): tempdir = % (os.path.abspath(sam).rsplit(, 1)[0]) if sort is True: mapping = % (sam.rsplit(, 1)[0]) if sam != : if os.path.exists(mapping) is False: os.system("\ sort -k1 --buffer-size=%sG -T %s -o %s %s\ " % (sbuffer, tempdir, mapping, sam)) else: mapping = p = Popen("sort -k1 --buffer-size=%sG -T %s -o %s" \ % (sbuffer, tempdir, mapping), stdin = sys.stdin, shell = True) p.communicate() mapping = open(mapping) else: if sam == : mapping = sys.stdin else: mapping = open(sam) return mapping
sort sam file
def get_gateway_info(self): def process_result(result): return GatewayInfo(result) return Command(, [ROOT_GATEWAY, ATTR_GATEWAY_INFO], process_result=process_result)
Return the gateway info. Returns a Command.
def _locateConvergencePoint(stats, minOverlap, maxOverlap): for i, v in enumerate(stats[::-1]): if not (v >= minOverlap and v <= maxOverlap): return len(stats) - i + 1 return 1
Walk backwards through stats until you locate the first point that diverges from target overlap values. We need this to handle cases where it might get to target values, diverge, and then get back again. We want the last convergence point.
def getall(self, table): try: self._check_db() except Exception as e: self.err(e, "Can not connect to database") return if table not in self.db.tables: self.warning("The table " + table + " does not exists") return try: res = self.db[table].all() df = pd.DataFrame(list(res)) return df except Exception as e: self.err(e, "Error retrieving data in table")
Get all rows values for a table
def _key_values(self, sn: "SequenceNode") -> Union[EntryKeys, EntryValue]: try: keys = self.up_to("/") except EndOfInput: keys = self.remaining() if not keys: raise UnexpectedInput(self, "entry value or keys") if isinstance(sn, LeafListNode): return EntryValue(unquote(keys)) ks = keys.split(",") try: if len(ks) != len(sn.keys): raise UnexpectedInput(self, f"exactly {len(sn.keys)} keys") except AttributeError: raise BadSchemaNodeType(sn.qual_name, "list") sel = {} for j in range(len(ks)): knod = sn.get_data_child(*sn.keys[j]) val = unquote(ks[j]) sel[(knod.name, None if knod.ns == sn.ns else knod.ns)] = val return EntryKeys(sel)
Parse leaf-list value or list keys.
def set_sig_figs(n=4): u.default_format = + str(n) + pd.options.display.float_format = ( + str(n) + ).format
Set the number of significant figures used to print Pint, Pandas, and NumPy quantities. Args: n (int): Number of significant figures to display.
def vectorize(density_matrix, method=): density_matrix = np.array(density_matrix) if method == : return density_matrix.flatten(order=) elif method == : return density_matrix.flatten(order=) elif method in [, ]: num = int(np.log2(len(density_matrix))) if len(density_matrix) != 2**num: raise Exception() if method == : pgroup = pauli_group(num, case=) else: pgroup = pauli_group(num, case=) vals = [np.trace(np.dot(p.to_matrix(), density_matrix)) for p in pgroup] return np.array(vals) return None
Flatten an operator to a vector in a specified basis. Args: density_matrix (ndarray): a density matrix. method (str): the method of vectorization. Allowed values are - 'col' (default) flattens to column-major vector. - 'row' flattens to row-major vector. - 'pauli'flattens in the n-qubit Pauli basis. - 'pauli-weights': flattens in the n-qubit Pauli basis ordered by weight. Returns: ndarray: the resulting vector. Raises: Exception: if input state is not a n-qubit state
def add(self, years=0, months=0, weeks=0, days=0): dt = add_duration( date(self.year, self.month, self.day), years=years, months=months, weeks=weeks, days=days, ) return self.__class__(dt.year, dt.month, dt.day)
Add duration to the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :rtype: Date
def MultiDelete(self, urns, token=None): urns = [rdfvalue.RDFURN(urn) for urn in urns] if token is None: token = data_store.default_token for urn in urns: if urn.Path() == "/": raise ValueError("Can't delete root URN. Please enter a valid URN") deletion_pool = DeletionPool(token=token) deletion_pool.MultiMarkForDeletion(urns) marked_root_urns = deletion_pool.root_urns_for_deletion marked_urns = deletion_pool.urns_for_deletion logging.debug(u"Found %d objects to remove when removing %s", len(marked_urns), urns) logging.debug(u"Removing %d root objects when removing %s: %s", len(marked_root_urns), urns, marked_root_urns) pool = data_store.DB.GetMutationPool() for root in marked_root_urns: self._DeleteChildFromIndex(root, mutation_pool=pool) for urn_to_delete in marked_urns: try: self.intermediate_cache.ExpireObject(urn_to_delete.Path()) except KeyError: pass pool.DeleteSubjects(marked_urns) pool.Flush() self.Flush() logging.debug("Removed %d objects", len(marked_urns))
Drop all the information about given objects. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urns: Urns of objects to remove. token: The Security Token to use for opening this item. Raises: ValueError: If one of the urns is too short. This is a safety check to ensure the root is not removed.
def fetch_fieldnames(self, sql: str, *args) -> List[str]: self.ensure_db_open() cursor = self.db.cursor() self.db_exec_with_cursor(cursor, sql, *args) try: return [i[0] for i in cursor.description] except: log.exception("fetch_fieldnames: SQL was: " + sql) raise
Executes SQL; returns just the output fieldnames.
def header(self, array): self._check_row_size(array) self._header = list(map(obj2unicode, array)) return self
Specify the header of the table
def rst_to_notebook(infile, outfile): with open(infile, ) as fin: rststr = fin.read() mdfmt = mdstr = pypandoc.convert_text(rststr, mdfmt, format=, extra_args=[]) mdstr = re.sub(r, r, mdstr) mdstr = nb = py2jn.py_string_to_notebook(mdstr) py2jn.tools.write_notebook(nb, outfile, nbver=4)
Convert an rst file to a notebook file.
def status(self): if self.report == None: return SentSms.ENROUTE else: return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED
Status of this SMS. Can be ENROUTE, DELIVERED or FAILED The actual status report object may be accessed via the 'report' attribute if status is 'DELIVERED' or 'FAILED'
def configuration(self, plugin): conf = self.config.get(plugin, "default;").split() if len(conf) == 1: conf.append() return tuple(conf)
Get plugin configuration. Return a tuple of (on|off|default, args)
def _to_pandas(ob): if isinstance(ob, (pd.Series, pd.DataFrame)): return ob if ob.ndim == 1: return pd.Series(ob) elif ob.ndim == 2: return pd.DataFrame(ob) else: raise ValueError( , )
Convert an array-like to a pandas object. Parameters ---------- ob : array-like The object to convert. Returns ------- pandas_structure : pd.Series or pd.DataFrame The correct structure based on the dimensionality of the data.
def get_rate_limits(response): periods = response.headers[] if not periods: return [] rate_limits = [] periods = periods.split() limits = response.headers[].split() remaining = response.headers[].split() reset = response.headers[].split() for idx, period in enumerate(periods): rate_limit = {} limit_period = get_readable_time_string(period) rate_limit["period"] = limit_period rate_limit["period_seconds"] = period rate_limit["request_limit"] = limits[idx] rate_limit["requests_remaining"] = remaining[idx] reset_datetime = get_datetime_from_timestamp(reset[idx]) rate_limit["reset"] = reset_datetime right_now = datetime.now() if (reset_datetime is not None) and (right_now < reset_datetime): seconds_remaining = (reset_datetime - right_now).seconds + 1 else: seconds_remaining = 0 rate_limit["reset_in_seconds"] = seconds_remaining rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining) rate_limits.append(rate_limit) return rate_limits
Returns a list of rate limit information from a given response's headers.
def get_all_not_wh_regions(db_connection): if not hasattr(get_all_not_wh_regions, ): sql = results = execute_sql(sql, db_connection) get_all_not_wh_regions._results = results return get_all_not_wh_regions._results
Gets a list of all regions that are not WH regions. :return: A list of all regions not including wormhole regions. Results have regionID and regionName. :rtype: list
def rescan_images(registry): with Session() as session: try: result = session.Image.rescanImages(registry) except Exception as e: print_error(e) sys.exit(1) if result[]: print("kernel image metadata updated") else: print("rescanning failed: {0}".format(result[]))
Update the kernel image metadata from all configured docker registries.
def convert_to_file(file, ndarr): if isinstance(file, six_string_types): with open(file, ) as fp: _internal_write(fp, ndarr) else: _internal_write(file, ndarr)
Writes the contents of the numpy.ndarray ndarr to file in IDX format. file is a file-like object (with write() method) or a file name.
def conference_undeaf(self, call_params): path = + self.api_version + method = return self.request(path, method, call_params)
REST Conference Undeaf helper
def requestPdpContextActivation(AccessPointName_presence=0): a = TpPd(pd=0x8) b = MessageType(mesType=0x44) c = PacketDataProtocolAddress() packet = a / b / c if AccessPointName_presence is 1: d = AccessPointName(ieiAPN=0x28) packet = packet / d return packet
REQUEST PDP CONTEXT ACTIVATION Section 9.5.4
def smart_account(app): if os.environ[] == : return from flask_security import SQLAlchemyUserDatastore, Security account_module_name, account_class_name = os.environ[ ].rsplit(, 1) account_module = importlib.import_module(account_module_name) account_class = getattr(account_module, account_class_name) role_module_name, role_class_name = os.environ[ ].rsplit(, 1) role_module = importlib.import_module(role_module_name) role_class = getattr(role_module, role_class_name) r = True if os.environ[ ] != else False Security(app, SQLAlchemyUserDatastore( app.db, account_class, role_class), register_blueprint=r) pass
尝试使用内置方式构建账户
def log_url (self, url_data): self.writeln() if self.has_part(): self.write_url(url_data) if url_data.name and self.has_part(): self.write_name(url_data) if url_data.parent_url and self.has_part(): self.write_parent(url_data) if url_data.base_ref and self.has_part(): self.write_base(url_data) if url_data.url and self.has_part(): self.write_real(url_data) if url_data.checktime and self.has_part(): self.write_checktime(url_data) if url_data.dltime >= 0 and self.has_part(): self.write_dltime(url_data) if url_data.size >= 0 and self.has_part(): self.write_size(url_data) if url_data.info and self.has_part(): self.write_info(url_data) if url_data.modified and self.has_part(): self.write_modified(url_data) if url_data.warnings and self.has_part(): self.write_warning(url_data) if self.has_part(): self.write_result(url_data) self.flush()
Write url checking info.
def create(callback=None, path=None, method=Method.POST, resource=None, tags=None, summary="Create a new resource", middleware=None): def inner(c): op = ResourceOperation(c, path or NoPath, method, resource, tags, summary, middleware) op.responses.add(Response(HTTPStatus.CREATED, "{name} has been created")) op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error)) return op return inner(callback) if callback else inner
Decorator to configure an operation that creates a resource.
def _create_affine_multiframe(multiframe_dicom): first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0] last_frame = multiframe_dicom[Tag(0x5200, 0x9230)][-1] image_orient1 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[0:3].astype(float) image_orient2 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[3:6].astype(float) normal = numpy.cross(image_orient1, image_orient2) delta_r = float(first_frame[0x2005, 0x140f][0].PixelSpacing[0]) delta_c = float(first_frame[0x2005, 0x140f][0].PixelSpacing[1]) image_pos = numpy.array(first_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float) last_image_pos = numpy.array(last_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float) number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])) delta_s = abs(numpy.linalg.norm(last_image_pos - image_pos)) / (number_of_stack_slices - 1) return numpy.array( [[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -delta_s * normal[0], -image_pos[0]], [-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -delta_s * normal[1], -image_pos[1]], [image_orient1[2] * delta_c, image_orient2[2] * delta_r, delta_s * normal[2], image_pos[2]], [0, 0, 0, 1]])
Function to create the affine matrix for a siemens mosaic dataset This will work for siemens dti and 4D if in mosaic format
def spectra(i, **kwargs): ordered = kwargs.get(, False) options = kwargs.get(, ) gray = kwargs.get(, False) CD = {} CD[] = (1.0, 0.0, 0.55) CD[] = (0.15, 0.35, 0.0) CD[] = (0.73, 0.0, 0.0) CD[] = (0.8, 0.0, 0.8) CD[] = (0.49, 0.64, 0.0) CD[] = (1.0, 0.5, 0.0) CD[] = (0.5, 0.85, 1.0) CD[] = (1.0, 0.8, 0.8) CD[] = (0.5, 0.3, 0.0) CD[] = (0.9, 0.0, 0.0) CD[] = (0.12, .8, .8) CD[] = (0.8, 0.85, 1.0) CD[] = (1.0, 1.0, 0.0) CD[] = (0.25, 0.25, 0.25) CD[] = (0.5, 0.5, 0.5) CD[] = (0.75, 0.75, 0.75) CD[] = (0.05, 0.05, 0.05) CD[] = (0.0, 0.0, 0.0) CD[] = (1.0, 1.0, 1.0) if isinstance(i, int): i = i elif isinstance(i, float): i = int(i) elif isinstance(i, str): dat = CD[i] return dat DtoL = [, , , , , , , , , , , , , , , ] Best = [, , , , , , , , , , , , , , , ] Dots = [, , , , , , , , , , , , , , , ] ind = i % len(Best) dat = CD[Best[ind]] col = Best[ind] if ordered: ind = i % len(DtoL) dat = CD[DtoL[ind]] col = DtoL[ind] if options == "dots": ind = i % len(Dots) dat = CD[Dots[ind]] col = Dots[ind] if options == "ordered": ind = i % len(DtoL) dat = CD[DtoL[ind]] col = DtoL[ind] gray_value = 0.299 * dat[0] + 0.587 * dat[1] + 0.114 * dat[2] if gray: return gray_value, gray_value, gray_value return dat
Define colours by number. Can be plotted either in order of gray scale or in the 'best' order for having a strong gray contrast for only three or four lines :param i: the index to access a colour
def clear(self, *args, **kwargs): super(Deposit, self).clear(*args, **kwargs)
Clear only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved.
def str_to_num(i, exact_match=True): if not isinstance(i, str): return i try: if not exact_match: return int(i) elif str(int(i)) == i: return int(i) elif str(float(i)) == i: return float(i) else: pass except ValueError: pass return i
Attempts to convert a str to either an int or float
def _customized_loader(container, loader=Loader, mapping_tag=_MAPPING_TAG): def construct_mapping(loader, node, deep=False): loader.flatten_mapping(node) if not isinstance(node, yaml.MappingNode): msg = "expected a mapping node, but found %s" % node.id raise yaml.constructor.ConstructorError(None, None, msg, node.start_mark) mapping = container() for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) try: hash(key) except TypeError as exc: eargs = ("while constructing a mapping", node.start_mark, "found unacceptable key (%s)" % exc, key_node.start_mark) raise yaml.constructor.ConstructorError(*eargs) value = loader.construct_object(value_node, deep=deep) mapping[key] = value return mapping tag = "tag:yaml.org,2002:python/unicode" def construct_ustr(loader, node): return loader.construct_scalar(node) try: loader.add_constructor(tag, construct_ustr) except NameError: pass if type(container) != dict: loader.add_constructor(mapping_tag, construct_mapping) return loader
Create or update loader with making given callble 'container' to make mapping objects such as dict and OrderedDict, used to construct python object from yaml mapping node internally. :param container: Set container used internally
def create_widget(self): d = self.declaration self.dialog = BottomSheetDialog(self.get_context(), d.style)
Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent.
def euler_trans_matrix(etheta, elongan, eincl): s1 = sin(eincl); c1 = cos(eincl); s2 = sin(elongan); c2 = cos(elongan); s3 = sin(etheta); c3 = cos(etheta); c1s3 = c1*s3; c1c3 = c1*c3; return np.array([ [-c2*c3+s2*c1s3, c2*s3+s2*c1c3, -s2*s1], [-s2*c3-c2*c1s3, s2*s3-c2*c1c3, c2*s1], [s1*s3, s1*c3, c1] ])
Get the transformation matrix R to translate/rotate a mesh according to euler angles. The matrix is R(long,incl,theta) = Rz(pi).Rz(long).Rx(incl).Rz(theta) Rz(long).Rx(-incl).Rz(theta).Rz(pi) where Rx(u) = 1, 0, 0 0, cos(u), -sin(u) 0, sin(u), cos(u) Ry(u) = cos(u), 0, sin(u) 0, 1, 0 -sin(u), 0, cos(u) Rz(u) = cos(u), -sin(u), 0 sin(u), cos(u), 0 0, 0, 1 Rz(pi) = reflection across z-axis Note: R(0,0,0) = -1, 0, 0 0, -1, 0 0, 0, 1 :parameter float etheta: euler theta angle :parameter float elongan: euler long of asc node angle :parameter float eincl: euler inclination angle :return: matrix with size 3x3
def user_twitter_list_bag_of_words(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets) lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordbags: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. Inputs: - twitter_list_corpus: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: A bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
def _populate_inputs(self, total): total = total + 1 for input_number in range(1, total): self.query(+str(input_number).zfill(2))
Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input.
def service_create(auth=None, **kwargs): ** cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_service(**kwargs)
Create a service CLI Example: .. code-block:: bash salt '*' keystoneng.service_create name=glance type=image salt '*' keystoneng.service_create name=glance type=image description="Image"
def path_locations(home_dir): return home_dir, lib_dir, inc_dir, bin_dir
Return the path locations for the environment (where libraries are, where scripts go, etc)
def extend_expiration_date(self, days=KEY_EXPIRATION_DELTA): delta = timedelta_days(days) self.expiration_date = self.expiration_date + delta self.save()
Extend expiration date a number of given years
def from_dict(data, ctx): data = data.copy() if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = [ ctx.order.DynamicOrderState.from_dict(d, ctx) for d in data.get() ] if data.get() is not None: data[] = [ ctx.trade.CalculatedTradeState.from_dict(d, ctx) for d in data.get() ] if data.get() is not None: data[] = [ ctx.position.CalculatedPositionState.from_dict(d, ctx) for d in data.get() ] return AccountChangesState(**data)
Instantiate a new AccountChangesState from a dict (generally from loading a JSON response). The data used to instantiate the AccountChangesState is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
def _run_web_container(self, port, command, address, log_syslog=False, datapusher=True, interactive=False): if is_boot2docker(): ro = {} volumes_from = self._get_container_name() else: ro = {self.datadir + : } volumes_from = None links = { self._get_container_name(): , self._get_container_name(): } links.update({self._get_container_name(container): container for container in self.extra_containers}) if datapusher: if not in self.containers_running(): raise DatacatsError(container_logs(self._get_container_name(), "all", False, False)) links[self._get_container_name()] = ro = dict({ self.target: , scripts.get_script_path(): , scripts.get_script_path(): }, **ro) rw = { self.sitedir + : , self.sitedir + : } try: if not interactive: run_container( name=self._get_container_name(), image=, rw=rw, ro=ro, links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port)}, log_syslog=log_syslog ) else: if is_boot2docker(): switches = [, self._get_container_name(), , self._get_container_name()] else: switches = [] switches += [.format(vol, ro[vol]) for vol in ro] switches += [.format(vol, rw[vol]) for vol in rw] links = [.format(link, links[link]) for link in links] args = [, , , , self._get_container_name(), , .format(port) if is_boot2docker() else .format(address, port)] + \ switches + links + [, ] + command subprocess.call(args) except APIError as e: if in str(e): raise DatacatsError( ) else: raise
Start web container on port with command
def ensure_dir(self, *path_parts): path = self.getpath(*path_parts) ensure_directory(path) return path
Ensures a subdirectory of the working directory. Parameters ---------- path_parts : iterable[str] The parts of the path after the working directory.
def position_target_global_int_encode(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate): return MAVLink_position_target_global_int_message(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate)
Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
def inception_v3(pretrained=False, **kwargs): r if pretrained: if not in kwargs: kwargs[] = True if in kwargs: original_aux_logits = kwargs[] kwargs[] = True else: original_aux_logits = True model = Inception3(**kwargs) model.load_state_dict(model_zoo.load_url(model_urls[])) if not original_aux_logits: model.aux_logits = False del model.AuxLogits return model return Inception3(**kwargs)
r"""Inception v3 model architecture from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of N x 3 x 299 x 299, so ensure your images are sized accordingly. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet aux_logits (bool): If True, add an auxiliary branch that can improve training. Default: *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False*
def get_load_balancer(self, id): return LoadBalancer.get_object(api_token=self.token, id=id)
Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID
def enable_audit_device(self, device_type, description=None, options=None, path=None): if path is None: path = device_type params = { : device_type, : description, : options, } api_path = .format(path=path) return self._adapter.post( url=api_path, json=params )
Enable a new audit device at the supplied path. The path can be a single word name or a more complex, nested path. Supported methods: PUT: /sys/audit/{path}. Produces: 204 (empty body) :param device_type: Specifies the type of the audit device. :type device_type: str | unicode :param description: Human-friendly description of the audit device. :type description: str | unicode :param options: Configuration options to pass to the audit device itself. This is dependent on the audit device type. :type options: str | unicode :param path: Specifies the path in which to enable the audit device. This is part of the request URL. :type path: str | unicode :return: The response of the request. :rtype: requests.Response
def sort_ranges(inranges): return sorted(inranges,key=lambda x: (x.chr,x.start,x.end,x.direction))
from an array of ranges, make a sorted array of ranges :param inranges: List of GenomicRange data :type inranges: GenomicRange[] :returns: a new sorted GenomicRange list :rtype: GenomicRange[]
def main(cls): args = cls.parse_args() if args.focus: Focus.init() else: Focus.disable() logging.basicConfig(format=, level=args.verbosity * 10, datefmt=) logger.info(, Focus.data(args.settings.name)) try: settings = json.loads(args.settings.read()) except ValueError as msg: logger.error(, args.settings.name, msg) return 1 userdata = if args.userdata: logger.info(, Focus.info(args.userdata.name)) try: userdata = UserData.handle_import_tags(args.userdata.read(), os.path.dirname(args.userdata.name)) except UserDataException as msg: logging.error(msg) return 1 if args.list_userdata_macros: UserData.list_tags(userdata) return 0 if args.userdata_macros: args.userdata_macros = UserData.convert_pair_to_dict(args.userdata_macros or ) userdata = UserData.handle_tags(userdata, args.userdata_macros) if args.print_userdata: logger.info(, userdata) return 0 if args.provider: provider = getattr(globals()[args.provider], args.provider.title() + ) provider().main(args, settings, userdata) return 0
Main entry point of Laniakea.
def process(self, context, internal_response): consent_state = context.state[STATE_KEY] internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_state["filter"]) id_hash = self._get_consent_id(internal_response.requester, internal_response.subject_id, internal_response.attributes) try: consent_attributes = self._verify_consent(id_hash) except requests.exceptions.ConnectionError as e: satosa_logging(logger, logging.ERROR, "Consent service is not reachable, no consent given.", context.state) internal_response.attributes = {} return self._end_consent(context, internal_response) if consent_attributes is not None: satosa_logging(logger, logging.DEBUG, "Previous consent was given", context.state) internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_attributes) return self._end_consent(context, internal_response) return self._approve_new_consent(context, internal_response, id_hash)
Manage consent and attribute filtering :type context: satosa.context.Context :type internal_response: satosa.internal.InternalData :rtype: satosa.response.Response :param context: response context :param internal_response: the response :return: response
def gtr(self, value): if not (isinstance(value, GTR) or isinstance(value, GTR_site_specific)): raise TypeError(" GTR instance expected") self._gtr = value
Set a new GTR object Parameters ----------- value : GTR the new GTR object
def send_eth_to(self, private_key: str, to: str, gas_price: int, value: int, gas: int=22000, retry: bool = False, block_identifier=None, max_eth_to_send: int = 0) -> bytes: assert check_checksum(to) if max_eth_to_send and value > self.w3.toWei(max_eth_to_send, ): raise EtherLimitExceeded( % (value, max_eth_to_send)) tx = { : to, : value, : gas, : gas_price, } return self.send_unsigned_transaction(tx, private_key=private_key, retry=retry, block_identifier=block_identifier)
Send ether using configured account :param to: to :param gas_price: gas_price :param value: value(wei) :param gas: gas, defaults to 22000 :param retry: Retry if a problem is found :param block_identifier: None default, 'pending' not confirmed txs :return: tx_hash
def in_unit_of(self, unit, as_quantity=False): new_unit = u.Unit(unit) new_quantity = self.as_quantity.to(new_unit) if as_quantity: return new_quantity else: return new_quantity.value
Return the current value transformed to the new units :param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit instance, like "1 / (erg cm**2 s)" :param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number. Default is False :return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
def serializeTransform(transformObj): return .join([command + + .join([scourUnitlessLength(number) for number in numbers]) + for command, numbers in transformObj])
Reserializes the transform data with some cleanups.
def department_update(self, department_id, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/departments api_path = "/api/v2/departments/{department_id}" api_path = api_path.format(department_id=department_id) return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/chat/departments#update-department-by-id
def _mine_flush(self, load, skip_verify=False): if not skip_verify and not in load: return False if self.opts.get(, False) or self.opts.get(, False): return self.cache.flush(.format(load[]), ) return True
Allow the minion to delete all of its own mine contents
def validate(self, dct): if not isinstance(dct, dict): self._raise_invalid_fields(, dct, ) txn_type = dct.get(TXN_TYPE) if txn_type is None: self._raise_missed_fields(TXN_TYPE) if txn_type in self.operations: op = self.operations[txn_type] op.validate(dct)
Choose a schema for client request operation and validate the operation field. If the schema is not found skips validation. :param dct: an operation field from client request :return: raises exception if invalid request
def outLineReceived(self, line): log_debug(, name=self.name, line=self.outFilter(line))
Handle data via stdout linewise. This is useful if you turned off buffering. In your subclass, override this if you want to handle the line as a protocol line in addition to logging it. (You may upcall this function safely.)
def validate_authentication(self, username, password, handler): user = authenticate( **{self.username_field: username, : password} ) account = self.get_account(username) if not (user and account): raise AuthenticationFailed("Authentication failed.")
authenticate user with password
def get_patient_expression(job, patient_dict): expression_archive = job.fileStore.readGlobalFile(patient_dict[]) expression_archive = untargz(expression_archive, os.getcwd()) output_dict = {} for filename in , : output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(expression_archive, filename)) return output_dict
Convenience function to get the expression from the patient dict :param dict patient_dict: dict of patient info :return: The gene and isoform expression :rtype: toil.fileStore.FileID
def run(self, cmd, timeout=None, key=None): if not isinstance(cmd, list): raise TypeError("cmd needs to be a list") if not all(isinstance(item, str) for item in cmd): raise TypeError("cmd needs to be a list of strings") self.connect(timeout, key) return self._run_ssh(cmd)
Run a command on the phablet device using ssh :param cmd: a list of strings to execute as a command :param timeout: a timeout (in seconds) for device discovery :param key: a path to a public ssh key to use for connection :returns: the exit code of the command This method will not allow you to capture stdout/stderr from the target process. If you wish to do that please consider switching to one of subprocess functions along with. :meth:`cmdline()`.
def result(self, psd_state): freq_array = numpy.fft.fftshift(psd_state[]) pwr_array = numpy.fft.fftshift(psd_state[]) if self._crop_factor: crop_bins_half = round((self._crop_factor * self._bins) / 2) freq_array = freq_array[crop_bins_half:-crop_bins_half] pwr_array = pwr_array[crop_bins_half:-crop_bins_half] if psd_state[] > 1: pwr_array = pwr_array / psd_state[] if self._log_scale: pwr_array = 10 * numpy.log10(pwr_array) return (freq_array, pwr_array)
Return freqs and averaged PSD for given center frequency
def validate(self): for timeperiod, child in self.root.children.items(): child.validate() self.validation_timestamp = datetime.utcnow()
method starts validation of the tree. @see TreeNode.validate
def mismatches(s1, s2, context=0, eq=operator.eq): pharmacyfarmáciapha famacmácc yciaconstitutionconstituiçãoutionuiçãoideaideiae aeiainstructedinstruídouctedu ídoconcludedconcluídoudeduído n = len(s1) assert(len(s2) == n) lct, rct = context, context if isinstance(context, int) else context i = None for j in range(n): if eq(s1[j], s2[j]): if i is not None: p, q = max(0, i-lct), min(j+rct, n) yield s1[p:q], s2[p:q] i = None elif i is None: i = j if i is not None: p = max(i-lct, 0) yield s1[p:], s2[p:]
extract mismatched segments from aligned strings >>> list(mismatches(*align('pharmacy', 'farmácia'), context=1)) [('pha', ' fa'), ('mac', 'mác'), ('c y', 'cia')] >>> list(mismatches(*align('constitution', 'constituição'), context=1)) [('ution', 'uição')] >>> list(mismatches(*align('idea', 'ideia'), context=1)) [('e a', 'eia')] >>> list(mismatches(*align('instructed', 'instruído'), context=1)) [('ucted', 'u ído')] >>> list(mismatches(*align('concluded', 'concluído'), context=1)) [('uded', 'uído')]
def inferMainPropertyType(uriref): if uriref: if uriref == rdflib.OWL.DatatypeProperty: return uriref elif uriref == rdflib.OWL.AnnotationProperty: return uriref elif uriref == rdflib.RDF.Property: return uriref else: return rdflib.OWL.ObjectProperty else: return None
Attempt to reduce the property types to 4 main types (without the OWL ontology - which would be the propert way) In [3]: for x in g.all_properties: ...: print x.rdftype ...: http://www.w3.org/2002/07/owl#FunctionalProperty http://www.w3.org/2002/07/owl#FunctionalProperty http://www.w3.org/2002/07/owl#InverseFunctionalProperty http://www.w3.org/2002/07/owl#ObjectProperty http://www.w3.org/2002/07/owl#ObjectProperty http://www.w3.org/2002/07/owl#TransitiveProperty http://www.w3.org/2002/07/owl#TransitiveProperty etc.....
def expose(dists): from pex.common import safe_delete for path in VendorImporter.expose(dists, root=isolated()): safe_delete(os.path.join(path, )) yield path
Exposes vendored code in isolated chroots. Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored distributions and yield the two chroot paths they were unpacked to. :param dists: A list of vendored distribution names to expose. :type dists: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found. :returns: An iterator of exposed vendored distribution chroot paths.
def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]: result = yield from \ self._fetch_rule.check_initial_web_request(self._item_session, request) return result
Return info whether the URL should be fetched including checking robots.txt. Coroutine.