code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def _getrsyncoptions(self): ignores = list(self.DEFAULT_IGNORES) ignores += self.config.option.rsyncignore ignores += self.config.getini("rsyncignore") return {"ignores": ignores, "verbose": self.config.option.verbose}
Get options to be passed for rsync.
def available(): builder_json = h2o.api("GET /3/ModelBuilders", data={"algo": "deepwater"}) visibility = builder_json["model_builders"]["deepwater"]["visibility"] if visibility == "Experimental": print("Cannot build a Deep Water model - no backend found.") return False else: return True
Returns True if a deep water model can be built, or False otherwise.
def describe(self): description = { : self._description, : self.name, } description.update(self.extra_params) return description
Provide a dictionary with information describing itself.
def read_locked(*args, **kwargs): def decorator(f): attr_name = kwargs.get(, ) @six.wraps(f) def wrapper(self, *args, **kwargs): rw_lock = getattr(self, attr_name) with rw_lock.read_lock(): return f(self, *args, **kwargs) return wrapper if kwargs or not args: return decorator else: if len(args) == 1: return decorator(args[0]) else: return decorator
Acquires & releases a read lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock`) in the instance object this decorator is attached to.
def autosave_all(self): for index in range(self.stack.get_stack_count()): self.autosave(index)
Autosave all opened files.
def access_required(config=None): def _access_required(http_method_handler): def secure_http_method_handler(self, *args, **kwargs): if not self.__provider_config__.authentication: _message = "Service available to authenticated users only, no auth context provider set in handler" authentication_error = prestans.exception.AuthenticationError(_message) authentication_error.request = self.request raise authentication_error if not self.__provider_config__.authentication.is_authorized_user(config): _message = "Service available to authorized users only" authorization_error = prestans.exception.AuthorizationError(_message) authorization_error.request = self.request raise authorization_error http_method_handler(self, *args, **kwargs) return wraps(http_method_handler)(secure_http_method_handler) return _access_required
Authenticates a HTTP method handler based on a custom set of arguments
def connection_id_to_public_key(self, connection_id): with self._connections_lock: try: connection_info = self._connections[connection_id] return connection_info.public_key except KeyError: return None
Get stored public key for a connection.
def get_pages(url): while True: yield url doc = html.parse(url).find("body") links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")] if not links: break url = urljoin(url, links[0].get())
Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat
def debug(self, i: int=None) -> str: head = "[" + colors.yellow("debug") + "]" if i is not None: head = str(i) + " " + head return head
Returns a debug message
def _insert_additionals(self, fmtos, seen=None): def get_dependencies(fmto): if fmto is None: return [] return fmto.dependencies + list(chain(*map( lambda key: get_dependencies(getattr(self, key, None)), fmto.dependencies))) seen = seen or {fmto.key for fmto in fmtos} keys = {fmto.key for fmto in fmtos} self.replot = self.replot or any( fmto.requires_replot for fmto in fmtos) if self.replot or any(fmto.priority >= START for fmto in fmtos): self.replot = True self.plot_data = self.data new_fmtos = dict((f.key, f) for f in self._fmtos if ((f not in fmtos and is_data_dependent( f, self.data)))) seen.update(new_fmtos) keys.update(new_fmtos) fmtos += list(new_fmtos.values()) if any(fmto.priority >= BEFOREPLOTTING for fmto in fmtos): new_fmtos = dict((f.key, f) for f in self._fmtos if ((f not in fmtos and f.update_after_plot))) fmtos += list(new_fmtos.values()) for fmto in set(self._fmtos).difference(fmtos): all_dependencies = get_dependencies(fmto) if keys.intersection(all_dependencies): fmtos.append(fmto) if any(fmto.requires_clearing for fmto in fmtos): self.cleared = True return list(self._fmtos) return fmtos
Insert additional formatoptions into `fmtos`. This method inserts those formatoptions into `fmtos` that are required because one of the following criteria is fullfilled: 1. The :attr:`replot` attribute is True 2. Any formatoption with START priority is in `fmtos` 3. A dependency of one formatoption is in `fmtos` Parameters ---------- fmtos: list The list of formatoptions that shall be updated seen: set The formatoption keys that shall not be included. If None, all formatoptions in `fmtos` are used Returns ------- fmtos The initial `fmtos` plus further formatoptions Notes ----- `fmtos` and `seen` are modified in place (except that any formatoption in the initial `fmtos` has :attr:`~Formatoption.requires_clearing` attribute set to True)
def _add_graph_level(graph, level, parent_ids, names, scores, normalized_scores, include_pad): for i, parent_id in enumerate(parent_ids): if not include_pad and names[i] == PAD_TOKEN: continue new_node = (level, i) parent_node = (level - 1, parent_id) raw_score = % float(scores[i]) if scores[i] is not None else norm_score = % float(normalized_scores[i]) if normalized_scores[i] is not None else graph.add_node(new_node) graph.node[new_node]["name"] = names[i] graph.node[new_node]["score"] = "[RAW] {}".format(raw_score) graph.node[new_node]["norm_score"] = "[NORM] {}".format(norm_score) graph.node[new_node]["size"] = 100 graph.add_edge(parent_node, new_node)
Adds a level to the passed graph
def set_property(self, key, value): value_type = type(value) if value_type not in [str, int, bool]: raise NotImplementedError( ) key_object = self.properties.findChild(name=, text=key) if key_object is None: key_object = self.soup.new_tag() key_object.string = key self.properties.append(key_object) value_object = self.soup.new_tag( {str: , int: , bool: str(value).lower()}[ value_type]) if value_type is not bool: value_object.string = str(value) self.properties.append(value_object) return value_object = key_object.find_next_sibling() key_object.decompose() value_object.decompose() self.set_property(key, value)
Set a new (or updating existing) key value pair. Args: key: A string containing the key namespace value: A str, int, or bool value Raises: NotImplementedError: an unsupported value-type was provided
def columnOptions( self, tableType ): if ( not tableType ): return [] schema = tableType.schema() return map(lambda x: x.name(), schema.columns())
Returns the column options for the inputed table type. :param tableType | <subclass of orb.Table> :return [<str>, ..]
def _convert_priority(p_priority): result = 0 prio_map = { : 1, : 5, : 6, : 7, : 8, : 9, } try: result = prio_map[p_priority] except KeyError: if p_priority: result = 9 return result
Converts todo.txt priority to an iCalendar priority (RFC 2445). Priority A gets priority 1, priority B gets priority 5 and priority C-F get priorities 6-9. This scheme makes sure that clients that use "high", "medium" and "low" show the correct priority.
def getOffsetFromRva(self, rva): offset = -1 s = self.getSectionByRva(rva) if s != offset: offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value else: offset = rva return offset
Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file.
def extract_data(self, page): response_keys = set(page.keys()) uncommon_keys = response_keys - self.common_keys for possible_data_key in uncommon_keys: element = page[possible_data_key] if isinstance(element, dict): return [self.representation(self.client, self.service_name, element)] if isinstance(element, list): return [self.representation(self.client, self.service_name, x) for x in element]
Extract the AppNexus object or list of objects from the response
def dumps(post, handler=None, **kwargs): if handler is None: handler = getattr(post, , None) or YAMLHandler() start_delimiter = kwargs.pop(, handler.START_DELIMITER) end_delimiter = kwargs.pop(, handler.END_DELIMITER) metadata = handler.export(post.metadata, **kwargs) return POST_TEMPLATE.format( metadata=metadata, content=post.content, start_delimiter=start_delimiter, end_delimiter=end_delimiter).strip()
Serialize a :py:class:`post <frontmatter.Post>` to a string and return text. This always returns unicode text, which can then be encoded. Passing ``handler`` will change how metadata is turned into text. A handler passed as an argument will override ``post.handler``, with :py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>` used as a default. :: >>> print(frontmatter.dumps(post)) --- excerpt: tl;dr layout: post title: Hello, world! --- Well, hello there, world.
def get_document_length(self, document): if document in self._documents: return self._documents[document] else: raise IndexError(DOCUMENT_DOES_NOT_EXIST)
Returns the number of terms found within the specified document.
def hicexplorer_basic_statistics(self): data = {} for file in self.mod_data: max_distance_key = total_pairs = self.mod_data[file][][0] try: self.mod_data[file][max_distance_key][0] except KeyError: max_distance_key = data_ = { : self.mod_data[file][][0], : self.mod_data[file][][0] / total_pairs, : self.mod_data[file][][0] / total_pairs, : self.mod_data[file][][0], max_distance_key: self.mod_data[file][max_distance_key][0], } data[self.mod_data[file][][0]] = data_ headers = OrderedDict() headers[] = { : .format(config.read_count_prefix), : .format(config.read_count_desc), : } headers[] = { : , : 100, : 0, : lambda x: x * 100, : } headers[] = { : , : 100, : 0, : lambda x: (1 - x) * 100, : , : } headers[] = { : , : , : , : } headers[max_distance_key] = { : , : max_distance_key + , : , : } self.general_stats_addcols(data, headers)
Create the general statistics for HiCExplorer.
def pad_length(x, d): try: x[0] except TypeError: x = d * [x] return np.array(x)
Return a vector appropriate to a dimensional space, using an input vector as a prompt depending on its type: - If the input is a vector, return that vector. - If the input is a scalar, return a vector filled with that value. Useful when a function expects an array specifying values along each axis, but wants to also accept a scalar value in case the length is the same in all directions. Parameters ---------- x: float or array-like The input parameter that may need padding. d: int The dimensional space to make `x` appropriate for. Returns ------- x_pad: array-like, shape (d,) The padded parameter.
def doLog(self, level, where, format, *args, **kwargs): if _canShortcutLogging(self.logCategory, level): return {} args = self.logFunction(*args) return doLog(level, self.logObjectName(), self.logCategory, format, args, where=where, **kwargs)
Log a message at the given level, with the possibility of going higher up in the stack. @param level: log level @type level: int @param where: how many frames to go back from the last log frame; or a function (to log for a future call) @type where: int (negative), or function @param kwargs: a dict of pre-calculated values from a previous doLog call @return: a dict of calculated variables, to be reused in a call to doLog that should show the same location @rtype: dict
def get_all(cls, include_disabled=True): if cls == BaseAccount: raise InquisitorError() account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name) if not include_disabled: qry = qry.filter(Account.enabled == 1) accounts = qry.find(Account.account_type_id == account_type_id) return {res.account_id: cls(res) for res in accounts}
Returns a list of all accounts of a given type Args: include_disabled (`bool`): Include disabled accounts. Default: `True` Returns: list of account objects
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, asynchronous=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): if not asynchronous: return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout) else: thread = self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout)) return thread
Makes the HTTP request (synchronous) and return the deserialized data. To make an async request, set the asynchronous parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param asynchronous bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: If asynchronous parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter asynchronous is False or missing, then the method will return the response directly.
def _empathy_status(status, message): ACCT_IFACE = DBUS_PROP_IFACE = ACCT_MAN_IFACE = ACCT_MAN_PATH = SP_IFACE = ( ) am_iface = _dbus_get_interface(ACCT_MAN_IFACE, ACCT_MAN_PATH, DBUS_PROP_IFACE) if am_iface: account_paths = am_iface.Get(ACCT_MAN_IFACE, ) for account_path in account_paths: try: account = _dbus_get_object(ACCT_MAN_IFACE, account_path) if account.Get(ACCT_IFACE, ) != 0: continue conn_path = account.Get(ACCT_IFACE, ) conn_iface = conn_path.replace("/", ".")[1:] sp_iface = _dbus_get_interface(conn_iface, conn_path, SP_IFACE) except dbus.exceptions.DBusException: continue for code in EMPATHY_CODE_MAP[status]: try: sp_iface.SetPresence(code, message) except dbus.exceptions.DBusException: pass else: break
Updates status and message for Empathy IM application. `status` Status type. `message` Status message.
def get_api_date(self): s date if between midnight and 10am Eastern time. Override this function in a subclass to change how the API date is calculated. %Y-%m-%dInvalid date \US/Easterns before api_date -= timedelta(days=1) self.date = api_date
Figure out the date to use for API requests. Assumes yesterday's date if between midnight and 10am Eastern time. Override this function in a subclass to change how the API date is calculated.
def _item_list(profile=None): * g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) return ret
Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list
def installed(name, default=False, user=None, opts=None, env=None): ret = {: name, : None, : , : {}} if __opts__[]: ret[] = .format(name) return ret ret = _check_rvm(ret, user) if ret[] is False: if not __salt__[](runas=user): ret[] = return ret else: return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env) else: return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env)
Verify that the specified ruby is installed with RVM. RVM is installed when necessary. name The version of ruby to install default : False Whether to make this ruby the default. user: None The user to run rvm as. env: None A list of environment variables to set (ie, RUBY_CONFIGURE_OPTS) opts: None A list of option flags to pass to RVM (ie -C, --patch) .. versionadded:: 0.17.0
def deleteSettings(self, groupName=None): groupName = groupName if groupName else self.settingsGroupName settings = QtCore.QSettings() logger.info("Deleting {} from: {}".format(groupName, settings.fileName())) removeSettingsGroup(groupName)
Deletes registry items from the persistent store.
def assoc(_d, key, value): d = deepcopy(_d) d[key] = value return d
Associate a key with a value in a dictionary :param _d: a dictionary :param key: a key in the dictionary :param value: a value for the key :returns: a new dictionary >>> data = {} >>> new_data = assoc(data, 'name', 'Holy Grail') >>> new_data {'name': 'Holy Grail'} >>> data {} .. note:: the original dictionary is not modified
def get_supply_voltage(self, dest_addr_long=None): value = self._get_parameter(b"%V", dest_addr_long=dest_addr_long) return (hex_to_int(value) * (1200/1024.0)) / 1000
Fetches the value of %V and returns it as volts.
def cell_arrays(self): cdata = self.GetCellData() narr = cdata.GetNumberOfArrays() if hasattr(self, ): keys = list(self._cell_arrays.keys()) if narr == len(keys): if keys: if self._cell_arrays[keys[0]].size == self.n_cells: return self._cell_arrays else: return self._cell_arrays self._cell_arrays = CellScalarsDict(self) for i in range(narr): name = cdata.GetArrayName(i) self._cell_arrays[name] = self._cell_scalar(name) self._cell_arrays.enable_callback() return self._cell_arrays
Returns the all cell arrays
def init(image, root=None): * nbd = connect(image) if not nbd: return return mount(nbd, root)
Mount the named image via qemu-nbd and return the mounted roots CLI Example: .. code-block:: bash salt '*' qemu_nbd.init /srv/image.qcow2
def can_create_catalog_with_record_types(self, catalog_record_types): if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=catalog_record_types) return True
Tests if this user can create a single ``Catalog`` using the desired record types. While ``CatalogingManager.getCatalogRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Catalog``. Providing an empty array tests if a ``Catalog`` can be created with no records. arg: catalog_record_types (osid.type.Type[]): array of catalog record types return: (boolean) - ``true`` if ``Catalog`` creation using the specified record ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``catalog_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def is_point_layer(layer): try: return (layer.type() == QgsMapLayer.VectorLayer) and ( layer.geometryType() == QgsWkbTypes.PointGeometry) except AttributeError: return False
Check if a QGIS layer is vector and its geometries are points. :param layer: A vector layer. :type layer: QgsVectorLayer, QgsMapLayer :returns: True if the layer contains points, otherwise False. :rtype: bool
def normalize_ext_rename(filepath): logger.debug( + str(filepath)) new_file_path = normalize_ext(filepath) logger.debug( + str(new_file_path)) filepath = rename_file(filepath, new_file_path) logger.debug( + str(filepath)) return filepath
normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file >>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt') >>> pth == normalize_ext_rename(pth) True
async def connect(self, retry=2): url = API_ENDPOINT_1 + headers = { "Content-Type": "application/x-zc-object", "Connection": "Keep-Alive", "X-Zc-Major-Domain": "seanywell", "X-Zc-Msg-Name": "millService", "X-Zc-Sub-Domain": "milltype", "X-Zc-Seq-Id": "1", "X-Zc-Version": "1", } payload = {"account": self._username, "password": self._password} try: with async_timeout.timeout(self._timeout): resp = await self.websession.post(url, data=json.dumps(payload), headers=headers) except (asyncio.TimeoutError, aiohttp.ClientError): if retry < 1: _LOGGER.error("Error connecting to Mill", exc_info=True) return False return await self.connect(retry - 1) result = await resp.text() if in result: _LOGGER.error() return False if in result: _LOGGER.error() return False data = json.loads(result) token = data.get() if token is None: _LOGGER.error() return False user_id = data.get() if user_id is None: _LOGGER.error() return False self._token = token self._user_id = user_id return True
Connect to Mill.
def tabulate(lol, headers, eol=): yield % .join(headers) + eol yield % .join([ * len(w) for w in headers]) + eol for row in lol: yield % .join(str(c) for c in row) + eol
Use the pypi tabulate package instead!
def get_range(self, ignore_blank_lines=True): ref_lvl = self.trigger_level first_line = self._trigger.blockNumber() block = self._trigger.next() last_line = block.blockNumber() lvl = self.scope_level if ref_lvl == lvl: ref_lvl -= 1 while (block.isValid() and TextBlockHelper.get_fold_lvl(block) > ref_lvl): last_line = block.blockNumber() block = block.next() if ignore_blank_lines and last_line: block = block.document().findBlockByNumber(last_line) while block.blockNumber() and block.text().strip() == : block = block.previous() last_line = block.blockNumber() return first_line, last_line
Gets the fold region range (start and end line). .. note:: Start line do no encompass the trigger line. :param ignore_blank_lines: True to ignore blank lines at the end of the scope (the method will rewind to find that last meaningful block that is part of the fold scope). :returns: tuple(int, int)
def get_body(self, msg): body = "" charset = "" if msg.is_multipart(): for part in msg.walk(): ctype = part.get_content_type() cdispo = str(part.get()) if ctype == and not in cdispo: body = part.get_payload(decode=True) charset = part.get_content_charset() break else: body = msg.get_payload(decode=True) charset = msg.get_content_charset() return body.decode(charset)
Extracts and returns the decoded body from an EmailMessage object
def shutdown(self): self._shuttingDown = {key: Deferred() for key in self.cachedConnections.keys()} return DeferredList( [maybeDeferred(p.transport.loseConnection) for p in self.cachedConnections.values()] + self._shuttingDown.values())
Disconnect all cached connections. @returns: a deferred that fires once all connection are disconnected. @rtype: L{Deferred}
def select_larva(self): action = sc_pb.Action() action.action_ui.select_larva.SetInParent() return action
Select all larva.
def backoff( max_tries=constants.BACKOFF_DEFAULT_MAXTRIES, delay=constants.BACKOFF_DEFAULT_DELAY, factor=constants.BACKOFF_DEFAULT_FACTOR, exceptions=None): if max_tries <= 0: raise ValueError(.format(max_tries)) if delay <= 0: raise ValueError(.format(delay)) if factor <= 1: raise ValueError(.format(factor)) def outter(f): def inner(*args, **kwargs): m_max_tries, m_delay = max_tries, delay while m_max_tries > 0: try: retval = f(*args, **kwargs) except exceptions: logger.exception( , f, max_tries, delay, factor, exceptions) m_max_tries -= 1 if m_max_tries <= 0: raise time.sleep(m_delay) m_delay *= factor else: return retval return inner return outter
Implements an exponential backoff decorator which will retry decorated function upon given exceptions. This implementation is based on `Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from the *Python Decorator Library*. :param int max_tries: Number of tries before give up. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`. :param int delay: Delay between retries (in seconds). Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`. :param int factor: Multiply factor in which delay will be increased for the next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`. :param exceptions: Tuple of exception types to catch that triggers retry. Any exception not listed will break the decorator and retry routines will not run. :type exceptions: tuple[Exception]
def list(self, request, *args, **kwargs): return super(ProjectViewSet, self).list(request, *args, **kwargs)
To get a list of projects, run **GET** against */api/projects/* as authenticated user. Here you can also check actual value for project quotas and project usage Note that a user can only see connected projects: - projects that the user owns as a customer - projects where user has any role Supported logic filters: - ?can_manage - return a list of projects where current user is manager or a customer owner; - ?can_admin - return a list of projects where current user is admin;
def by_current_session(cls): session = Session.current_session() if session is None: return None return cls.where_id(session.user_id)
Returns current user session
def generate_raml_resource_types(module): from pale import extract_endpoints, extract_resources, is_pale_module if not is_pale_module(module): raise ValueError( ) module_resource_types = extract_resources(module) raml_resource_types_unsorted = {} for resource in module_resource_types: resource_name = resource.__name__ raml_resource_types_unsorted[resource_name] = document_resource(resource) if hasattr(resource, "_description"): modified_description = clean_description(resource._description) raml_resource_types_unsorted[resource_name]["description"] = modified_description raml_resource_types_doc = OrderedDict(sorted(raml_resource_types_unsorted.items(), key=lambda t: t[0])) output = StringIO() indent = " " ignored_resources = [] for resource_type in raml_resource_types_doc: this_resource_type = raml_resource_types_doc[resource_type] if resource_type not in ignored_resources: output.write(indent + resource_type + ":\n") indent += " " if this_resource_type.get("description") != None: modified_description = clean_description(this_resource_type["description"]) output.write(indent + "description: " + modified_description + "\n") if len(this_resource_type["fields"]) == 0: this_type = "object" if this_resource_type.get("_underlying_model") != None: if this_resource_type["_underlying_model"] != object: if hasattr(this_resource_type._underlying_model, "_value_type") \ and this_resource_type["_underlying_model"]._value_type not in ignored_resources: this_type = this_resource_type["_underlying_model"]._value_type output.write(indent + "type: " + this_type + "\n") indent = indent[:-2] else: output.write(indent + "properties:\n") indent += " " sorted_fields = OrderedDict(sorted(this_resource_type["fields"].items(), key=lambda t: t[0])) for field in sorted_fields: output.write(indent + field + ":\n") properties = sorted_fields[field] indent += " " if "_underlying_model" in this_resource_type and this_resource_type["_underlying_model"] == object: output.write(indent + "type: base\n") elif "item_type" in properties: output.write(indent + "type: array\n") output.write(indent + "items: " + properties["item_type"] + "\n") elif "type" in properties: output.write(indent + "type: " + properties["type"].replace(" ", "_") + "\n") if properties.get("extended_description") != None: modified_description = clean_description(properties["extended_description"]) output.write(indent + "description: " + modified_description + "\n") elif properties.get("description") != None: modified_description = clean_description(properties["description"]) output.write(indent + "description: " + modified_description + "\n") if properties.get("default_fields") != None: output.write(indent + "properties:\n") indent += " " for field_name in sorted(properties["default_fields"]): output.write(indent + field_name + ": string\n") indent = indent[:-2] indent = indent[:-2] indent = indent[:-4] raml_resource_types = output.getvalue() output.close() return raml_resource_types
Compile a Pale module's resource documentation into RAML format. RAML calls Pale resources 'resourceTypes'. This function converts Pale resources into the RAML resourceType format. The returned string should be appended to the RAML documentation string before it is returned.
def request_stop(self, message=, exit_code=0): if self.is_master: self.daemons_stop(timeout=self.conf.daemons_stop_timeout) super(Arbiter, self).request_stop(message, exit_code)
Stop the Arbiter daemon :return: None
def qemu_rebase(target, backing_file, safe=True, fail_on_error=True): cmd = [, , , backing_file, target] if not safe: cmd.insert(2, ) return run_command_with_validation( cmd, fail_on_error, msg=.format( target=target, backing_file=backing_file ) )
changes the backing file of 'source' to 'backing_file' If backing_file is specified as "" (the empty string), then the image is rebased onto no backing file (i.e. it will exist independently of any backing file). (Taken from qemu-img man page) Args: target(str): Path to the source disk backing_file(str): path to the base disk safe(bool): if false, allow unsafe rebase (check qemu-img docs for more info)
def to_canstrat(self, key, log, lith_field, filename=None, as_text=False): if (filename is None): if (not as_text): m = "You must provide a filename or set as_text to True." raise WellError(m) strip = self.data[key] strip = strip.fill() record = {1: [well_to_card_1(self)], 2: [well_to_card_2(self, key)], 8: [], 7: [interval_to_card_7(iv, lith_field) for iv in strip] } result = for c in [1, 2, 8, 7]: for d in record[c]: result += write_row(d, card=c, log=log) if as_text: return result else: with open(filename, ) as f: f.write(result) return None
Make a Canstrat DAT (aka ASCII) file. TODO: The data part should probably belong to striplog, and only the header should be written by the well. Args: filename (str) key (str) log (str): the log name, should be 6 characters. lith_field (str) the name of the lithology field in the striplog's Primary component. Must match the Canstrat definitions. filename (str) as_text (bool): if you don't want to write a file.
def tau_reduction(ms, rate, n_per_decade): ms = np.int64(ms) keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) - np.rint(n_per_decade*np.log10(ms[:-1]))) ms = ms[:-1] assert len(ms) == len(keep) ms = ms[keep] taus = ms/float(rate) return ms, taus
Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values
def clean_build(self): import shutil if self.build_fs.exists: try: shutil.rmtree(self.build_fs.getsyspath()) except NoSysPathError: pass
Delete the build directory and all ingested files
async def main(): async with ClientSession() as websession: try: client = Client(, , websession) await client.async_init() print() print(await client.tiles.all()) print() print(await client.tiles.all(show_inactive=True)) except TileError as err: print(err)
Run.
def plan_results(self, project_key, plan_key, expand=None, favourite=False, clover_enabled=False, label=None, issue_key=None, start_index=0, max_results=25): return self.results(project_key, plan_key, expand=expand, favourite=favourite, clover_enabled=clover_enabled, label=label, issue_key=issue_key, start_index=start_index, max_results=max_results)
Get Plan results :param project_key: :param plan_key: :param expand: :param favourite: :param clover_enabled: :param label: :param issue_key: :param start_index: :param max_results: :return:
def port_tag_details(cls, tags): for tag in tags: match = port_tag_re.match(tag) if match: source_sink, port, extra = match.groups() return source_sink == "source", cls(port), extra
Search tags for port info, returning it Args: tags: A list of tags to check Returns: None or (is_source, port, connected_value|disconnected_value) where port is one of the Enum entries of Port
def _configure(cls, **defaults): for attr in defaults: setattr(cls, attr, defaults[attr])
Updates class-level defaults for :class:`_Options` container.
def moduli_to_velocities(rho, K_s, G): return np.sqrt(K_s / rho), np.sqrt(G / rho)
convert moduli to velocities mainly to support Burnman operations :param rho: density in kg/m^3 :param v_phi: adiabatic bulk modulus in Pa :param v_s: shear modulus in Pa :return: bulk sound speed and shear velocity
def segment_content_handler(): from ligo.lw.lsctables import (SegmentTable, SegmentDefTable, SegmentSumTable) from ligo.lw.ligolw import PartialLIGOLWContentHandler def _filter(name, attrs): return reduce( operator.or_, [table_.CheckProperties(name, attrs) for table_ in (SegmentTable, SegmentDefTable, SegmentSumTable)]) return build_content_handler(PartialLIGOLWContentHandler, _filter)
Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables
def group_join( self, inner_enumerable, outer_key=lambda x: x, inner_key=lambda x: x, result_func=lambda x: x ): if not isinstance(inner_enumerable, Enumerable): raise TypeError( u"inner enumerable parameter must be an instance of Enumerable" ) return Enumerable( itertools.product( self, inner_enumerable.default_if_empty() ) ).group_by( key_names=[], key=lambda x: outer_key(x[0]), result_func=lambda g: ( g.first()[0], g.where( lambda x: inner_key(x[1]) == g.key.id).select( lambda x: x[1] ) ) ).select(result_func)
Return enumerable of group join between two enumerables :param inner_enumerable: inner enumerable to join to self :param outer_key: key selector of outer enumerable as lambda expression :param inner_key: key selector of inner enumerable as lambda expression :param result_func: lambda expression to transform the result of group join :return: new Enumerable object
def _unicode(self): str return u.join ([ u.join(c) for c in self.w ])
This returns a printable representation of the screen as a unicode string (which, under Python 3.x, is the same as 'str'). The end of each screen line is terminated by a newline.
def term_with_coeff(term, coeff): if not isinstance(coeff, Number): raise ValueError("coeff must be a Number") new_pauli = term.copy() new_pauli.coefficient = complex(coeff) return new_pauli
Change the coefficient of a PauliTerm. :param PauliTerm term: A PauliTerm object :param Number coeff: The coefficient to set on the PauliTerm :returns: A new PauliTerm that duplicates term but sets coeff :rtype: PauliTerm
def _assert_explicit_vr(dicom_input): if settings.validate_multiframe_implicit: header = dicom_input[0] if header.file_meta[0x0002, 0x0010].value == : raise ConversionError()
Assert that explicit vr is used
def is_equivalent(self, other): if (self.unicode_repr is not None) and (is_unicode_string(other)) and (self.unicode_repr == other): return True if isinstance(other, IPAChar): return self.canonical_representation == other.canonical_representation try: return self.canonical_representation == IPAChar(name=None, descriptors=other).canonical_representation except: return False
Return ``True`` if the IPA character is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, containing the representation of the IPA character, 2. a Unicode string, containing a space-separated list of descriptors, 3. a list of Unicode strings, containing descriptors, and 4. another IPAChar. :rtype: bool
def _find_spelling_errors_in_chunks(chunks, contents, valid_words_dictionary=None, technical_words_dictionary=None, user_dictionary_words=None): for chunk in chunks: for error in spellcheck_region(chunk.data, valid_words_dictionary, technical_words_dictionary, user_dictionary_words): col_offset = _determine_character_offset(error.line_offset, error.column_offset, chunk.column) msg = _SPELLCHECK_MESSAGES[error.error_type].format(error.word) yield _populate_spelling_error(error.word, error.suggestions, contents, error.line_offset + chunk.line, col_offset, msg)
For each chunk and a set of valid and technical words, find errors.
def set_as_error(self, color=Qt.red): self.format.setUnderlineStyle( QTextCharFormat.WaveUnderline) self.format.setUnderlineColor(color)
Highlights text as a syntax error. :param color: Underline color :type color: QtGui.QColor
def choose_parent_view(self, request): kwargs = {: self} view_class = self.choose_parent_view_class return view_class.as_view(**kwargs)(request)
Instantiates a class-based view to provide a view that allows a parent page to be chosen for a new object, where the assigned model extends Wagtail's Page model, and there is more than one potential parent for new instances. The view class used can be overridden by changing the 'choose_parent_view_class' attribute.
def order_by(self, order_attribute): to_return = [] for f in sorted(self.items, key=lambda i: getattr(i, order_attribute)): to_return.append(f) return to_return
Return the list of items in a certain order
def MakeSuiteFromHist(hist, name=None): if name is None: name = hist.name d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object
def clean(args): p = OptionParser(clean.__doc__) opts, args = p.parse_args(args) for link_name in os.listdir(os.getcwd()): if not op.islink(link_name): continue logging.debug("remove symlink `{0}`".format(link_name)) os.unlink(link_name)
%prog clean Removes all symlinks from current folder
def prefix_items(self, prefix, strip_prefix=False): items = self.items(key_from=prefix) start = 0 if strip_prefix: start = len(prefix) for key, value in items: if not key.startswith(prefix): break yield key[start:], value
Get all (key, value) pairs with keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All (key, value) pairs in the store where the keys begin with the ``prefix``.
def action(args): log.info() r = refpkg.Refpkg(args.refpkg, create=False) q = r.contents for i in range(args.n): if q[] is None: log.error( .format(args.n, i)) return 1 else: q = q[] for i in range(args.n): r.rollback() return 0
Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back).
def on_created(self, event, dry_run=False, remove_uploaded=True): super(ArchiveEventHandler, self).on_created(event) log.info("created: %s", event)
Called when a file (or directory) is created.
def mouseUp(x=None, y=None, button=, duration=0.0, tween=linear, pause=None, _pause=True): if button not in (, , , 1, 2, 3): raise ValueError("button argument must be one of (, , , 1, 2, 3), not %s" % button) _failSafeCheck() x, y = _unpackXY(x, y) _mouseMoveDrag(, x, y, 0, 0, duration=0, tween=None) x, y = platformModule._position() if button == 1 or str(button).lower() == : platformModule._mouseUp(x, y, ) elif button == 2 or str(button).lower() == : platformModule._mouseUp(x, y, ) elif button == 3 or str(button).lower() == : platformModule._mouseUp(x, y, ) _autoPause(pause, _pause)
Performs releasing a mouse button up (but not down beforehand). The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): The x position on the screen where the mouse up happens. None by default. If tuple, this is used for x and y. If x is a str, it's considered a filename of an image to find on the screen with locateOnScreen() and click the center of. y (int, float, None, optional): The y position on the screen where the mouse up happens. None by default. button (str, int, optional): The mouse button released. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. Returns: None Raises: ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
def value(self): return { "type" : "simple", "symbol" : self.symbol.value, "label" : self.label, "description" : self.description, "rotationType": self.rotationType, "rotationExpression": self.rotationExpression }
returns object as dictionary
async def get_box_ids_json(self) -> str: LOGGER.debug() s_ids = set() cd_ids = set() rr_ids = set() for cred in json.loads(await self.get_creds_display_coarse()): s_ids.add(cred[]) cd_ids.add(cred[]) if cred[]: rr_ids.add(cred[]) rv = json.dumps({ : list(s_ids), : list(cd_ids), : list(rr_ids) }) LOGGER.debug(, rv) return rv
Return json object on lists of all unique box identifiers for credentials in wallet: schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g., :: { "schema_id": [ "R17v42T4pk...:2:tombstone:1.2", "9cHbp54C8n...:2:business:2.0", ... ], "cred_def_id": [ "R17v42T4pk...:3:CL:19:0", "9cHbp54C8n...:3:CL:37:0", ... ] "rev_reg_id": [ "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0", "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2", ... ] } :return: tuple of sets for schema ids, cred def ids, rev reg ids
def move(src, dest, user=None): src_host, src_port, src_path = path.split(src, user) dest_host, dest_port, dest_path = path.split(dest, user) src_fs = hdfs(src_host, src_port, user) dest_fs = hdfs(dest_host, dest_port, user) try: retval = src_fs.move(src_path, dest_fs, dest_path) return retval finally: src_fs.close() dest_fs.close()
Move or rename src to dest.
def do_flipper(parser, token): nodelist = parser.parse((,)) tag_name, user_key, feature = token.split_contents() parser.delete_first_token() return FlipperNode(nodelist, user_key, feature)
The flipper tag takes two arguments: the user to look up and the feature to compare against.
def validate_values(self, definition): if not self._strict_type_checks: return scalar_kind = definition.get() if scalar_kind not in SCALAR_TYPES_MAP.keys(): raise ParserError(self._name + + scalar_kind + .format(BASE_DOC_URL)) collection_policy = definition.get(, None) if collection_policy and collection_policy not in [, ]: raise ParserError(self._name + + collection_policy + .format(BASE_DOC_URL)) cpp_guard = definition.get() if cpp_guard and re.match(r, cpp_guard): raise ParserError(self._name + + cpp_guard + .format(BASE_DOC_URL)) record_in_processes = definition.get(, []) for proc in record_in_processes: if not utils.is_valid_process_name(proc): raise ParserError(self._name + + proc + .format(BASE_DOC_URL)) expires = definition.get() if not utils.validate_expiration_version(expires) and self._strict_type_checks: raise ParserError( .format(self._name, expires, BASE_DOC_URL))
This function checks that the fields have the correct values. :param definition: the dictionary containing the scalar properties. :raises ParserError: if a scalar definition field contains an unexpected value.
def draw_noisy_time_series(self, SNR=1.0, red_noise_ratio=0.25, outlier_ratio=0.0): if outlier_ratio < 0.0 or outlier_ratio > 1.0: raise ValueError("Outlier ratio must be in [0, 1]") if red_noise_ratio < 0.0: raise ValueError("Red noise ratio must be positive") np.random.seed(self.rseed) t = self.t y_clean = self.y_clean N = len(t) s, mean_s_squared = generate_uncertainties(N, rseed=self.rseed) white_noise = np.random.multivariate_normal(np.zeros(N,), np.diag(s**2)) red_noise_variance = mean_s_squared*red_noise_ratio red_noise = first_order_markov_process(t, red_noise_variance, 1.0, rseed=self.rseed) noise = white_noise + red_noise var_noise = mean_s_squared + red_noise_variance SNR_unitless = 10.0**(SNR/10.0) self.A = np.sqrt(SNR_unitless*var_noise) y = self.A*y_clean y_noisy = y + noise rperm = np.where(np.random.uniform(size=N) < outlier_ratio)[0] outlier = np.random.uniform(5.0*np.std(y), 10.0*np.std(y), size=len(rperm)) y_noisy[rperm] += outlier return t, y_noisy, s
A function to draw a noisy time series based on the clean model such that y_noisy = y + yw + yr, where yw is white noise, yr is red noise and y will be rescaled so that y_noisy complies with the specified signal-to-noise ratio (SNR). Parameters --------- SNR: float Signal-to-noise ratio of the resulting contaminated signal in decibels [dB]. SNR is defined as SNR = 10*log(var_signal/var_noise), hence NR var_signal/var_noise 10 10 7 5 3 2 0 1 -3 0.5 -7 0.2 -10 0.1 red_noise_variance: float in [0, 1] The variance of the red noise component is set according to Var(yw)*red_noise_ratio. Set this to zero to obtain uncertainties that explain the noise perfectly outlier_ratio: float in [0, 1] Percentage of outlier data points Returns ------- t: ndarray Vector containing the time instants y_noisy: ndarray Vector containing the contaminated signal s: ndarray Vector containing the uncertainties associated to the white noise component
def remove_task_db(self, fid, force=False): self.remove_slice_db(fid) sql = self.cursor.execute(sql, [fid, ]) self.check_commit(force=force)
将任务从数据库中删除
def migrate_abci_chain(self): latest_chain = self.get_latest_abci_chain() if latest_chain is None: return block = self.get_latest_block() suffix = chain_id = latest_chain[] block_height_str = str(block[]) new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str self.store_abci_chain(block[] + 1, new_chain_id, False)
Generate and record a new ABCI chain ID. New blocks are not accepted until we receive an InitChain ABCI request with the matching chain ID and validator set. Chain ID is generated based on the current chain and height. `chain-X` => `chain-X-migrated-at-height-5`. `chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`. If there is no known chain (we are at genesis), the function returns.
def dict_to_source(dict): citation if isinstance(dict, Source): return dict return Source( dict[], dict.get() )
Transform a dict with key 'citation' into a :class:`Source`. If the argument passed is already a :class:`Source`, this method just returns the argument.
def GetFileSystems(): version = OSXVersion() major, minor = version.VersionAsMajorMinor() libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) if major <= 10 and minor <= 5: use_64 = False fs_struct = StatFSStruct else: use_64 = True fs_struct = StatFS64Struct struct_size = fs_struct.GetSize() buf_size = struct_size * 20 cbuf = ctypes.create_string_buffer(buf_size) if use_64: ret = libc.getfsstat64(ctypes.byref(cbuf), buf_size, 2) else: ret = libc.getfsstat(ctypes.byref(cbuf), buf_size, 2) if ret == 0: logging.debug("getfsstat failed err: %s", ret) return [] return ParseFileSystemsStruct(fs_struct, ret, cbuf)
Make syscalls to get the mounted filesystems. Returns: A list of Struct objects. Based on the information for getfsstat http://developer.apple.com/library/mac/#documentation/Darwin/ Reference/ManPages/man2/getfsstat.2.html
def get_parent_until(path): dirname = osp.dirname(path) try: mod = osp.basename(path) mod = osp.splitext(mod)[0] imp.find_module(mod, [dirname]) except ImportError: return items = [mod] while 1: items.append(osp.basename(dirname)) try: dirname = osp.dirname(dirname) imp.find_module(, [dirname + os.sep]) except ImportError: break return .join(reversed(items))
Given a file path, determine the full module path. e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields 'numpy.core'
def report_idle_after(seconds): def decorator(func): def wrapper(*args, **kwargs): def _handle_timeout(signum, frame): config = get_config() if not config.ready: config.load() message = { "subject": "Idle Experiment.", "body": idle_template.format( app_id=config.get("id"), minutes_so_far=round(seconds / 60) ), } log("Reporting problem with idle experiment...") get_messenger(config).send(message) signal.signal(signal.SIGALRM, _handle_timeout) signal.alarm(seconds) try: result = func(*args, **kwargs) finally: signal.alarm(0) return result return wraps(func)(wrapper) return decorator
Report_idle_after after certain number of seconds.
def union(self, other, left_name="LEFT", right_name="RIGHT"): if not isinstance(left_name, str) or \ not isinstance(right_name, str): raise TypeError("left_name and right_name must be strings. " "{} - {} was provided".format(type(left_name), type(right_name))) if isinstance(other, GMQLDataset): other_idx = other.__index else: raise TypeError("other must be a GMQLDataset. " "{} was provided".format(type(other))) if len(left_name) == 0 or len(right_name) == 0: raise ValueError("left_name and right_name must not be empty") new_index = self.opmng.union(self.__index, other_idx, left_name, right_name) new_local_sources, new_remote_sources = self.__combine_sources(self, other) new_location = self.__combine_locations(self, other) return GMQLDataset(index=new_index, location=new_location, local_sources=new_local_sources, remote_sources=new_remote_sources, meta_profile=self.meta_profile)
*Wrapper of* ``UNION`` The UNION operation is used to integrate homogeneous or heterogeneous samples of two datasets within a single dataset; for each sample of either one of the input datasets, a sample is created in the result as follows: * its metadata are the same as in the original sample; * its schema is the schema of the first (left) input dataset; new identifiers are assigned to each output sample; * its regions are the same (in coordinates and attribute values) as in the original sample. Region attributes which are missing in an input dataset sample (w.r.t. the merged schema) are set to null. :param other: a GMQLDataset :param left_name: name that you want to assign to the left dataset :param right_name: name tha t you want to assign to the right dataset :return: a new GMQLDataset Example of usage:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") d2 = gl.get_example_dataset("Example_Dataset_2") result = d1.union(other=d2, left_name="D1", right_name="D2")
def get_uncompleted_tasks(self): all_tasks = self.get_tasks() completed_tasks = self.get_completed_tasks() return [t for t in all_tasks if t not in completed_tasks]
Return a list of all uncompleted tasks in this project. .. warning:: Requires Todoist premium. :return: A list of all uncompleted tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> project.add_task('Install PyTodoist') >>> uncompleted_tasks = project.get_uncompleted_tasks() >>> for task in uncompleted_tasks: ... task.complete()
def concretize_load_idx(self, idx, strategies=None): if isinstance(idx, int): return [idx] elif not self.state.solver.symbolic(idx): return [self.state.solver.eval(idx)] strategies = self.load_strategies if strategies is None else strategies return self._apply_concretization_strategies(idx, strategies, )
Concretizes a load index. :param idx: An expression for the index. :param strategies: A list of concretization strategies (to override the default). :param min_idx: Minimum value for a concretized index (inclusive). :param max_idx: Maximum value for a concretized index (exclusive). :returns: A list of concrete indexes.
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta): if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): data_array = np.empty(data_dset.shape, dtype=np.float32) data_dset.read_direct(data_array) data_array = data_array.transpose() elif len(ridx) <= len(cidx): first_subset = data_dset[:, ridx].astype(np.float32) data_array = first_subset[cidx, :].transpose() elif len(cidx) < len(ridx): first_subset = data_dset[cidx, :].astype(np.float32) data_array = first_subset[:, ridx].transpose() data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx]) return data_df
Parses in data_df from hdf5, subsetting if specified. Input: -data_dset (h5py dset): HDF5 dataset from which to read data_df -ridx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -cidx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -row_meta (pandas DataFrame): the parsed in row metadata -col_meta (pandas DataFrame): the parsed in col metadata
def ms_panset(self, viewer, event, data_x, data_y, msg=True): if self.canpan and (event.state == ): self._panset(viewer, data_x, data_y, msg=msg) return True
An interactive way to set the pan position. The location (data_x, data_y) will be centered in the window.
def capture_output_from_running_process(context: RunContext) -> None: _output = context.capture.readline(block=False) if _output: line = decode_and_filter(_output, context) if line: if not context.mute: _LOGGER_PROCESS.debug(line) context.process_output_chunks.append(line) return capture_output_from_running_process(context) return None
Parses output from a running sub-process Decodes and filters the process output line by line, buffering it If "mute" is False, sends the output back in real time :param context: run context :type context: _RunContext
def compare_last_two_snapshots(obj, raw=False): if get_snapshot_count(obj) < 2: return {} version = get_version(obj) snap1 = get_snapshot_by_version(obj, version - 1) snap2 = get_snapshot_by_version(obj, version) return compare_snapshots(snap1, snap2, raw=raw)
Helper to compare the last two snapshots directly
def get_scalingip(context, id, fields=None): LOG.info( % (id, context.tenant_id)) filters = {: ip_types.SCALING, : False} scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters) if not scaling_ip: raise q_exc.ScalingIpNotFound(id=id) return v._make_scaling_ip_dict(scaling_ip)
Retrieve a scaling IP. :param context: neutron api request context. :param id: The UUID of the scaling IP. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the scaling IP. If values are declared in the fields parameter, then only those keys will be present.
def conservtion_profile_pid(region, genome_alignment, mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None): res = [] s = region.start if region.isPositiveStrand() else region.end - 1 e = region.end if region.isPositiveStrand() else region.start - 1 step = 1 if region.isPositiveStrand() else -1 for i in range(s, e, step): try: col = genome_alignment.get_column(region.chrom, i, mi_seqs, species) res.append(pid(col)) except NoSuchAlignmentColumnError: res.append(None) except NoUniqueColumnError: res.append(None) return res
build a conservation profile for the given region using the genome alignment. The scores in the profile will be the percent of bases identical to the reference sequence. :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: a list of the same length as the region where each entry is the PID at the corresponding locus.
def parse_attr_signature(sig): match = ATTR_SIG_RE.match(sig.strip()) if not match: raise RuntimeError( + sig) name, _, params = match.groups() if params is not None and params.strip() != : params = split_sig(params) params = [parse_param_signature(x) for x in params] else: params = [] return (name, params)
Parse an attribute signature
def TriToBin(self, x, y, z): if (z >= 0): if (x + y + z == 0): return (0, 0) else: Sum = x + y + z X = 100.0 * x / Sum Y = 100.0 * y / Sum Z = 100.0 * z / Sum if (X + Y != 0): a = Z / 2.0 + (100.0 - Z) * Y / (Y + X) else: a = Z / 2.0 b = Z / 2.0 * (np.sqrt(3)) return (a, b) else: z = abs(z) if (x + y + z == 0): return (0, 0) else: Sum = x + y + z X = 100.0 * x / Sum Y = 100.0 * y / Sum Z = 100.0 * z / Sum if (X + Y != 0): a = Z / 2.0 + (100.0 - Z) * Y / (Y + X) else: a = Z / 2.0 b = Z / 2.0 * (np.sqrt(3)) return (a, -b)
Turn an x-y-z triangular coord to an a-b coord. if z is negative, calc with its abs then return (a, -b). :param x,y,z: the three numbers of the triangular coord :type x,y,z: float or double are both OK, just numbers :return: the corresponding a-b coord :rtype: a tuple consist of a and b
def validate_oath_hotp(self, params): from_key = params["hotp"][0] if not re.match(hotp_valid_input, from_key): self.log_error("IN: %s, Invalid OATH-HOTP OTP" % (params)) return "ERR Invalid OATH-HOTP OTP" uid, otp, = get_oath_hotp_bits(params) if not uid or not otp: self.log_error("IN: %s, could not get UID/OTP (/)" % (params, uid, otp)) return "ERR Invalid OATH-HOTP input" if args.debug: print "OATH-HOTP uid %s, OTP %s" % (uid, otp) return "ERR Could not validate OATH-HOTP OTP" try: if db.update_oath_hotp_c(entry, new_counter): return "OK counter=%04x" % (new_counter) else: return "ERR replayed OATH-HOTP" except Exception, e: self.log_error("IN: %s, database error updating counter : %s" % (params, e)) return "ERR Internal error"
Validate OATH-HOTP code using YubiHSM HMAC-SHA1 hashing with token keys secured in AEAD's that we have stored in an SQLite3 database.
def smooth_n_point(scalar_grid, n=5, passes=1): if n == 9: p = 0.25 q = 0.125 r = 0.0625 elif n == 5: p = 0.5 q = 0.125 r = 0.0 else: raise ValueError( ) smooth_grid = scalar_grid[:].copy() for _i in range(passes): smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1] + q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:] + smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2]) + r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] + + smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2])) return smooth_grid
Filter with normal distribution of weights. Parameters ---------- scalar_grid : array-like or `pint.Quantity` Some 2D scalar grid to be smoothed. n: int The number of points to use in smoothing, only valid inputs are 5 and 9. Defaults to 5. passes : int The number of times to apply the filter to the grid. Defaults to 1. Returns ------- array-like or `pint.Quantity` The filtered 2D scalar grid. Notes ----- This function is a close replication of the GEMPAK function SM5S and SM9S depending on the choice of the number of points to use for smoothing. This function can be applied multiple times to create a more smoothed field and will only smooth the interior points, leaving the end points with their original values. If a masked value or NaN values exists in the array, it will propagate to any point that uses that particular grid point in the smoothing calculation. Applying the smoothing function multiple times will propogate NaNs further throughout the domain.
def credits(self, **kwargs): path = self._get_series_id_season_number_episode_number_path() response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the TV episode credits by combination of season and episode number. Returns: A dict respresentation of the JSON returned from the API.
def _totals(self, query): self.add_parameters(limit=1) query = self._build_query(query) self._retrieve_data(query) self.url_params = None return int(self.request.headers["Total-Results"])
General method for returning total counts
def manual_dir(self): if not tf.io.gfile.exists(self._manual_dir): raise AssertionError( .format(self._manual_dir)) return self._manual_dir
Returns the directory containing the manually extracted data.
def is_mutating(status): if not status: return False mutating = set([, , , , , , , , ]) return status.split(None, 1)[0].lower() in mutating
Determines if the statement is mutating based on the status.