code
stringlengths
70
11.9k
docstring
stringlengths
4
7.08k
text
stringlengths
128
15k
def read_dotenv(dotenv=None, override=False): if dotenv is None: frame_filename = sys._getframe().f_back.f_code.co_filename dotenv = os.path.join(os.path.dirname(frame_filename), ) if os.path.isdir(dotenv) and os.path.isfile(os.path.join(dotenv, )): dotenv = os.path.join(dotenv, ) if os.path.exists(dotenv): with open(dotenv) as f: for k, v in parse_dotenv(f.read()).items(): if override: os.environ[k] = v else: os.environ.setdefault(k, v) else: warnings.warn("Not reading {0} - it doesn't exist.".format(dotenv), stacklevel=2)
Read a .env file into os.environ. If not given a path to a dotenv path, does filthy magic stack backtracking to find manage.py and then find the dotenv. If tests rely on .env files, setting the overwrite flag to True is a safe way to ensure tests run consistently across all environments. :param override: True if values in .env should override system variables.
### Input: Read a .env file into os.environ. If not given a path to a dotenv path, does filthy magic stack backtracking to find manage.py and then find the dotenv. If tests rely on .env files, setting the overwrite flag to True is a safe way to ensure tests run consistently across all environments. :param override: True if values in .env should override system variables. ### Response: def read_dotenv(dotenv=None, override=False): if dotenv is None: frame_filename = sys._getframe().f_back.f_code.co_filename dotenv = os.path.join(os.path.dirname(frame_filename), ) if os.path.isdir(dotenv) and os.path.isfile(os.path.join(dotenv, )): dotenv = os.path.join(dotenv, ) if os.path.exists(dotenv): with open(dotenv) as f: for k, v in parse_dotenv(f.read()).items(): if override: os.environ[k] = v else: os.environ.setdefault(k, v) else: warnings.warn("Not reading {0} - it doesn't exist.".format(dotenv), stacklevel=2)
def _remap_tag_to_tag_id(cls, tag, new_data): try: value = new_data[tag] except: except: try: new_data[tag_id] = value.id except AttributeError: new_data[tag_id] = value del new_data[tag]
Remaps a given changed field from tag to tag_id.
### Input: Remaps a given changed field from tag to tag_id. ### Response: def _remap_tag_to_tag_id(cls, tag, new_data): try: value = new_data[tag] except: except: try: new_data[tag_id] = value.id except AttributeError: new_data[tag_id] = value del new_data[tag]
def _get_css_classes(self, ttype): cls = self._get_css_class(ttype) while ttype not in STANDARD_TYPES: ttype = ttype.parent cls = self._get_css_class(ttype) + + cls return cls
Return the css classes of this token type prefixed with the classprefix option.
### Input: Return the css classes of this token type prefixed with the classprefix option. ### Response: def _get_css_classes(self, ttype): cls = self._get_css_class(ttype) while ttype not in STANDARD_TYPES: ttype = ttype.parent cls = self._get_css_class(ttype) + + cls return cls
def buscar_por_id(self, id_direito): if not is_valid_int_param(id_direito): raise InvalidParameterError( u) url = + str(id_direito) + code, map = self.submit(None, , url) return self.response(code, map)
Obtém os direitos de um grupo de usuário e um grupo de equipamento. :param id_direito: Identificador do direito grupo equipamento. :return: Dicionário com a seguinte estrutura: :: {'direito_grupo_equipamento': {'id_grupo_equipamento': < id_grupo_equipamento >, 'exclusao': < exclusao >, 'alterar_config': < alterar_config >, 'nome_grupo_equipamento': < nome_grupo_equipamento >, 'id_grupo_usuario': < id_grupo_usuario >, 'escrita': < escrita >, 'nome_grupo_usuario': < nome_grupo_usuario >, 'id': < id >, 'leitura': < leitura >}} :raise InvalidParameterError: O identificador do direito grupo equipamento é nulo ou inválido. :raise DireitoGrupoEquipamentoNaoExisteError: Direito Grupo Equipamento não cadastrado. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta.
### Input: Obtém os direitos de um grupo de usuário e um grupo de equipamento. :param id_direito: Identificador do direito grupo equipamento. :return: Dicionário com a seguinte estrutura: :: {'direito_grupo_equipamento': {'id_grupo_equipamento': < id_grupo_equipamento >, 'exclusao': < exclusao >, 'alterar_config': < alterar_config >, 'nome_grupo_equipamento': < nome_grupo_equipamento >, 'id_grupo_usuario': < id_grupo_usuario >, 'escrita': < escrita >, 'nome_grupo_usuario': < nome_grupo_usuario >, 'id': < id >, 'leitura': < leitura >}} :raise InvalidParameterError: O identificador do direito grupo equipamento é nulo ou inválido. :raise DireitoGrupoEquipamentoNaoExisteError: Direito Grupo Equipamento não cadastrado. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. ### Response: def buscar_por_id(self, id_direito): if not is_valid_int_param(id_direito): raise InvalidParameterError( u) url = + str(id_direito) + code, map = self.submit(None, , url) return self.response(code, map)
def __calculate_bu_dfs_recursively(u, b, dfs_data): first_time = True for v in dfs_data[][u]: if a(v, dfs_data) == u: if first_time: b[v] = b[u] else: b[v] = D(u, dfs_data) __calculate_bu_dfs_recursively(v, b, dfs_data) first_time = False
Calculates the b(u) lookup table with a recursive DFS.
### Input: Calculates the b(u) lookup table with a recursive DFS. ### Response: def __calculate_bu_dfs_recursively(u, b, dfs_data): first_time = True for v in dfs_data[][u]: if a(v, dfs_data) == u: if first_time: b[v] = b[u] else: b[v] = D(u, dfs_data) __calculate_bu_dfs_recursively(v, b, dfs_data) first_time = False
def get_cli_version(): directory = os.path.dirname(os.path.abspath(__file__)) version_path = os.path.join(directory, ) if os.path.exists(version_path): with open(version_path) as f: ver = f.read() return ver return get_setup_version()
获取终端命令版本号,若存在VERSION文件则使用其中的版本号, 否则使用 :meth:`.get_setup_version` :return: 终端命令版本号 :rtype: str
### Input: 获取终端命令版本号,若存在VERSION文件则使用其中的版本号, 否则使用 :meth:`.get_setup_version` :return: 终端命令版本号 :rtype: str ### Response: def get_cli_version(): directory = os.path.dirname(os.path.abspath(__file__)) version_path = os.path.join(directory, ) if os.path.exists(version_path): with open(version_path) as f: ver = f.read() return ver return get_setup_version()
def _depaginate_all(self, url): items = [] for x in self._depagination_generator(url): items += x return items
GETs the url provided and traverses the 'next' url that's returned while storing the data in a list. Returns a single list of all items.
### Input: GETs the url provided and traverses the 'next' url that's returned while storing the data in a list. Returns a single list of all items. ### Response: def _depaginate_all(self, url): items = [] for x in self._depagination_generator(url): items += x return items
def minute(self, symbol=None): symbol = self.find_path(symbol, subdir=, ext=[, ]) reader = TdxLCMinBarReader() if symbol is not None: return reader.get_df(symbol) return None
获取1分钟线 :param symbol: :return: pd.dataFrame or None
### Input: 获取1分钟线 :param symbol: :return: pd.dataFrame or None ### Response: def minute(self, symbol=None): symbol = self.find_path(symbol, subdir=, ext=[, ]) reader = TdxLCMinBarReader() if symbol is not None: return reader.get_df(symbol) return None
def analyze_insertions(fa, threads = 6): safe, sequences, id2name, names, insertions = analyze_fa(fa) seqs = seq_info(names, id2name, insertions, sequences) seqs, orfs = find_orfs(safe, seqs) seqs = find_introns(safe, seqs, sequences, threads) seqs = seqs2bool(seqs) seqs = annotate_orfs(orfs, seqs, threads) return seqs, id2name
- find ORFs using Prodigal - find introns using cmscan (vs Rfam intron database) - check that ORFs and intron overlap with insertion region - plot insertion length versus model position for each insertion (based on insertion type)
### Input: - find ORFs using Prodigal - find introns using cmscan (vs Rfam intron database) - check that ORFs and intron overlap with insertion region - plot insertion length versus model position for each insertion (based on insertion type) ### Response: def analyze_insertions(fa, threads = 6): safe, sequences, id2name, names, insertions = analyze_fa(fa) seqs = seq_info(names, id2name, insertions, sequences) seqs, orfs = find_orfs(safe, seqs) seqs = find_introns(safe, seqs, sequences, threads) seqs = seqs2bool(seqs) seqs = annotate_orfs(orfs, seqs, threads) return seqs, id2name
def drop_incomplete_days(dataframe, shift=0): dropped = 0 if shift > 23 or shift < 0: print("Invalid shift parameter setting! Using defaults.") shift = 0 first = shift last = first - 1 if last < 0: last += 24 try: n = len(dataframe.index) except: print() return dataframe delete = list() for i in range(0, n): if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 for i in range(n-1, 0, -1): if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 return dataframe.drop(dataframe.index[[delete]])
truncates a given dataframe to full days only This funtion truncates a given pandas dataframe (time series) to full days only, thus dropping leading and tailing hours of incomplete days. Please note that this methodology only applies to hourly time series. Args: dataframe: A pandas dataframe object with index defined as datetime shift (unsigned int, opt): First hour of daily recordings. For daily recordings of precipitation gages, 8 would be the first hour of the subsequent day of recordings since daily totals are usually recorded at 7. Omit defining this parameter if you intend to pertain recordings to 0-23h. Returns: A dataframe with full days only.
### Input: truncates a given dataframe to full days only This funtion truncates a given pandas dataframe (time series) to full days only, thus dropping leading and tailing hours of incomplete days. Please note that this methodology only applies to hourly time series. Args: dataframe: A pandas dataframe object with index defined as datetime shift (unsigned int, opt): First hour of daily recordings. For daily recordings of precipitation gages, 8 would be the first hour of the subsequent day of recordings since daily totals are usually recorded at 7. Omit defining this parameter if you intend to pertain recordings to 0-23h. Returns: A dataframe with full days only. ### Response: def drop_incomplete_days(dataframe, shift=0): dropped = 0 if shift > 23 or shift < 0: print("Invalid shift parameter setting! Using defaults.") shift = 0 first = shift last = first - 1 if last < 0: last += 24 try: n = len(dataframe.index) except: print() return dataframe delete = list() for i in range(0, n): if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 for i in range(n-1, 0, -1): if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 return dataframe.drop(dataframe.index[[delete]])
async def get_tracks(self, query): log.debug(.format(query)) async with self.http.get(self.rest_uri + quote(query), headers={: self.password}) as res: return await res.json(content_type=None)
Returns a Dictionary containing search results for a given query.
### Input: Returns a Dictionary containing search results for a given query. ### Response: async def get_tracks(self, query): log.debug(.format(query)) async with self.http.get(self.rest_uri + quote(query), headers={: self.password}) as res: return await res.json(content_type=None)
def _check_index_in_compilations(context: BaseContext, index: str): compilations = if compilations not in context.shared_data: return False return index in context.shared_data[compilations]
Store compilation flag at specified index in context's shared data.
### Input: Store compilation flag at specified index in context's shared data. ### Response: def _check_index_in_compilations(context: BaseContext, index: str): compilations = if compilations not in context.shared_data: return False return index in context.shared_data[compilations]
def handle_ipopo_event(self, event): kind = event.get_kind() if kind == IPopoEvent.REGISTERED: try: with use_ipopo(self.__context) as ipopo: factory = event.get_factory_name() with self.__lock: components = self.__queue[factory].copy() for component in components: self._try_instantiate(ipopo, factory, component) except BundleException: pass except KeyError: pass
Handles an iPOPO event :param event: iPOPO event bean
### Input: Handles an iPOPO event :param event: iPOPO event bean ### Response: def handle_ipopo_event(self, event): kind = event.get_kind() if kind == IPopoEvent.REGISTERED: try: with use_ipopo(self.__context) as ipopo: factory = event.get_factory_name() with self.__lock: components = self.__queue[factory].copy() for component in components: self._try_instantiate(ipopo, factory, component) except BundleException: pass except KeyError: pass
def session_path(self, path=None): if path is not None: if path.startswith(self.directory_sep()) is True: self.__session_path = self.normalize_path(path) else: self.__session_path = self.join_path(self.__session_path, path) return self.__session_path
Set and/or get current session path :param path: if defined, then set this value as a current session directory. If this value starts \ with a directory separator, then it is treated as an absolute path. In this case this value replaces a current one, otherwise this value is appended to a current one. :return: str
### Input: Set and/or get current session path :param path: if defined, then set this value as a current session directory. If this value starts \ with a directory separator, then it is treated as an absolute path. In this case this value replaces a current one, otherwise this value is appended to a current one. :return: str ### Response: def session_path(self, path=None): if path is not None: if path.startswith(self.directory_sep()) is True: self.__session_path = self.normalize_path(path) else: self.__session_path = self.join_path(self.__session_path, path) return self.__session_path
def PushItem(self, item, block=True): try: self._queue.put(item, block=block) except Queue.Full as exception: raise errors.QueueFull(exception)
Pushes an item onto the queue. Args: item (object): item to add. block (Optional[bool]): True to block the process when the queue is full. Raises: QueueFull: if the item could not be pushed the queue because it's full.
### Input: Pushes an item onto the queue. Args: item (object): item to add. block (Optional[bool]): True to block the process when the queue is full. Raises: QueueFull: if the item could not be pushed the queue because it's full. ### Response: def PushItem(self, item, block=True): try: self._queue.put(item, block=block) except Queue.Full as exception: raise errors.QueueFull(exception)
def Failed(self): interval = self._current_interval_sec self._current_interval_sec = min( self.max_interval_sec, self._current_interval_sec * self.multiplier) return interval
Indicates that a request has failed. Returns: Time interval to wait before retrying (in seconds).
### Input: Indicates that a request has failed. Returns: Time interval to wait before retrying (in seconds). ### Response: def Failed(self): interval = self._current_interval_sec self._current_interval_sec = min( self.max_interval_sec, self._current_interval_sec * self.multiplier) return interval
def template(filename, **locals): path = .format(filename) with open(path) as source: code = compile(source.read(), path, ) global_vars = {: t, : f, : e, : html.escape, : time.ctime} exec(code, global_vars, locals) return locals[]
Returns
### Input: Returns ### Response: def template(filename, **locals): path = .format(filename) with open(path) as source: code = compile(source.read(), path, ) global_vars = {: t, : f, : e, : html.escape, : time.ctime} exec(code, global_vars, locals) return locals[]
def set_printer(self, file_name, basename): self.graph_file = open(file_name, "w+") self.printer = VCGPrinter(self.graph_file) self.printer.open_graph( title=basename, layoutalgorithm="dfs", late_edge_labels="yes", port_sharing="no", manhattan_edges="yes", ) self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge
initialize VCGWriter for a UML graph
### Input: initialize VCGWriter for a UML graph ### Response: def set_printer(self, file_name, basename): self.graph_file = open(file_name, "w+") self.printer = VCGPrinter(self.graph_file) self.printer.open_graph( title=basename, layoutalgorithm="dfs", late_edge_labels="yes", port_sharing="no", manhattan_edges="yes", ) self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge
def search(self, remote_path, keyword, recurrent=, **kwargs): params = { : remote_path, : keyword, : recurrent, } return self._request(, , extra_params=params, **kwargs)
按文件名搜索文件(不支持查找目录). :param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param keyword: 关键词 :type keyword: str :param recurrent: 是否递归。 * "0"表示不递归 * "1"表示递归 :type recurrent: str :return: Response 对象
### Input: 按文件名搜索文件(不支持查找目录). :param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :type remote_path: str :param keyword: 关键词 :type keyword: str :param recurrent: 是否递归。 * "0"表示不递归 * "1"表示递归 :type recurrent: str :return: Response 对象 ### Response: def search(self, remote_path, keyword, recurrent=, **kwargs): params = { : remote_path, : keyword, : recurrent, } return self._request(, , extra_params=params, **kwargs)
def get_root_dir(self): if os.path.isdir(self.root_path): return self.root_path else: return os.path.dirname(self.root_path)
Retrieve the absolute path to the root directory of this data source. Returns: str: absolute path to the root directory of this data source.
### Input: Retrieve the absolute path to the root directory of this data source. Returns: str: absolute path to the root directory of this data source. ### Response: def get_root_dir(self): if os.path.isdir(self.root_path): return self.root_path else: return os.path.dirname(self.root_path)
def add_resources(self, resources): new_resources = self._build_resource_dictionary(resources) for key in new_resources: self._resources[key] = new_resources[key] self._dirty_attributes.add(u)
Adds new resources to the event. *resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
### Input: Adds new resources to the event. *resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. ### Response: def add_resources(self, resources): new_resources = self._build_resource_dictionary(resources) for key in new_resources: self._resources[key] = new_resources[key] self._dirty_attributes.add(u)
def get_system_by_name(self, name): for elem in self.systems: if elem.name == name: return elem return None
Return a system with that name or None.
### Input: Return a system with that name or None. ### Response: def get_system_by_name(self, name): for elem in self.systems: if elem.name == name: return elem return None
def configure(self, kubernetes_host, kubernetes_ca_cert=, token_reviewer_jwt=, pem_keys=None, mount_point=DEFAULT_MOUNT_POINT): if pem_keys is None: pem_keys = [] list_of_pem_params = { : kubernetes_ca_cert, : pem_keys } for param_name, param_argument in list_of_pem_params.items(): validate_pem_format( param_name=param_name, param_argument=param_argument, ) params = { : kubernetes_host, : kubernetes_ca_cert, : token_reviewer_jwt, : pem_keys, } api_path = .format( mount_point=mount_point ) return self._adapter.post( url=api_path, json=params, )
Configure the connection parameters for Kubernetes. This path honors the distinction between the create and update capabilities inside ACL policies. Supported methods: POST: /auth/{mount_point}/config. Produces: 204 (empty body) :param kubernetes_host: Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server. Example: https://k8s.example.com:443 :type kubernetes_host: str | unicode :param kubernetes_ca_cert: PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API. NOTE: Every line must end with a newline: \n :type kubernetes_ca_cert: str | unicode :param token_reviewer_jwt: A service account JWT used to access the TokenReview API to validate other JWTs during login. If not set the JWT used for login will be used to access the API. :type token_reviewer_jwt: str | unicode :param pem_keys: Optional list of PEM-formatted public keys or certificates used to verify the signatures of Kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every installation of Kubernetes exposes these keys. :type pem_keys: list :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the configure_method request. :rtype: requests.Response
### Input: Configure the connection parameters for Kubernetes. This path honors the distinction between the create and update capabilities inside ACL policies. Supported methods: POST: /auth/{mount_point}/config. Produces: 204 (empty body) :param kubernetes_host: Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server. Example: https://k8s.example.com:443 :type kubernetes_host: str | unicode :param kubernetes_ca_cert: PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API. NOTE: Every line must end with a newline: \n :type kubernetes_ca_cert: str | unicode :param token_reviewer_jwt: A service account JWT used to access the TokenReview API to validate other JWTs during login. If not set the JWT used for login will be used to access the API. :type token_reviewer_jwt: str | unicode :param pem_keys: Optional list of PEM-formatted public keys or certificates used to verify the signatures of Kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every installation of Kubernetes exposes these keys. :type pem_keys: list :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the configure_method request. :rtype: requests.Response ### Response: def configure(self, kubernetes_host, kubernetes_ca_cert=, token_reviewer_jwt=, pem_keys=None, mount_point=DEFAULT_MOUNT_POINT): if pem_keys is None: pem_keys = [] list_of_pem_params = { : kubernetes_ca_cert, : pem_keys } for param_name, param_argument in list_of_pem_params.items(): validate_pem_format( param_name=param_name, param_argument=param_argument, ) params = { : kubernetes_host, : kubernetes_ca_cert, : token_reviewer_jwt, : pem_keys, } api_path = .format( mount_point=mount_point ) return self._adapter.post( url=api_path, json=params, )
def guess_lineno(file): offset = file.tell() file.seek(0) startpos = 0 lineno = 1 while True: line = file.readline() if not line: break endpos = file.tell() if startpos <= offset < endpos: break lineno += 1 file.seek(offset) return lineno
Guess current line number in a file. Guessing is done in a very crude way - scanning file from beginning until current offset and counting newlines. Only meant to be used in exceptional cases - generating line number for error message.
### Input: Guess current line number in a file. Guessing is done in a very crude way - scanning file from beginning until current offset and counting newlines. Only meant to be used in exceptional cases - generating line number for error message. ### Response: def guess_lineno(file): offset = file.tell() file.seek(0) startpos = 0 lineno = 1 while True: line = file.readline() if not line: break endpos = file.tell() if startpos <= offset < endpos: break lineno += 1 file.seek(offset) return lineno
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_system_description(self, **kwargs): config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop() remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop() remote_system_description = ET.SubElement(lldp_neighbor_detail, "remote-system-description") remote_system_description.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
### Input: Auto Generated Code ### Response: def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_system_description(self, **kwargs): config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop() remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop() remote_system_description = ET.SubElement(lldp_neighbor_detail, "remote-system-description") remote_system_description.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
def serialize_on_post_delete(sender, instance, using, **kwargs): try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=False, deleted=True)
Creates a serialized OutgoingTransaction when a model instance is deleted. Skip those not registered.
### Input: Creates a serialized OutgoingTransaction when a model instance is deleted. Skip those not registered. ### Response: def serialize_on_post_delete(sender, instance, using, **kwargs): try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=False, deleted=True)
def _set_request_user_agent_metrics(self, request): if in request.META and request.META[]: user_agent = request.META[] monitoring.set_custom_metric(, user_agent) if user_agent: user_agent_parts = user_agent.split() if len(user_agent_parts) == 3 and user_agent_parts[1].startswith(): monitoring.set_custom_metric(, user_agent_parts[2])
Add metrics for user agent for python. Metrics: request_user_agent request_client_name: The client name from edx-rest-api-client calls.
### Input: Add metrics for user agent for python. Metrics: request_user_agent request_client_name: The client name from edx-rest-api-client calls. ### Response: def _set_request_user_agent_metrics(self, request): if in request.META and request.META[]: user_agent = request.META[] monitoring.set_custom_metric(, user_agent) if user_agent: user_agent_parts = user_agent.split() if len(user_agent_parts) == 3 and user_agent_parts[1].startswith(): monitoring.set_custom_metric(, user_agent_parts[2])
def from_dict(cls, d): return cls( d[], d[], list(d.get(, {}).items()), d.get(, False) )
Instantiate a Role from a dictionary representation.
### Input: Instantiate a Role from a dictionary representation. ### Response: def from_dict(cls, d): return cls( d[], d[], list(d.get(, {}).items()), d.get(, False) )
def getfieldindex(data, commdct, objkey, fname): objindex = data.dtls.index(objkey) objcomm = commdct[objindex] for i_index, item in enumerate(objcomm): try: if item[] == [fname]: break except KeyError as err: pass return i_index
given objkey and fieldname, return its index
### Input: given objkey and fieldname, return its index ### Response: def getfieldindex(data, commdct, objkey, fname): objindex = data.dtls.index(objkey) objcomm = commdct[objindex] for i_index, item in enumerate(objcomm): try: if item[] == [fname]: break except KeyError as err: pass return i_index
def frontal_get_update_factor(F, r, nn, na): N = len(r) Li = matrix(0.0,(N,N)) N1 = 0 for i in range(len(r)): if r[i] >= nn: break else: N1 += 1 N2 = N-N1 Li[:,:N1] = F[r,r[:N1]] if N2 > 0: C = F[nn:,r[N1:]] for ii in range(N2): col = r[N-1-ii] - nn t = (na - col) - (ii + 1) if t == 0: continue tau = lapack.larfg(C, C, n = t+1, offseta = na*(N2-ii)-1-ii, offsetx = na*(N2-ii)-1-ii-t) tmp = C[na*(N2-ii)-1-ii] C[na*(N2-ii)-1-ii] = 1.0 lapack.larfx(C, tau, C, m = t+1, n = N2-1-ii, ldC = na, offsetv = na*(N2-ii)-1-ii-t, offsetC = na-1-ii-t) C[na*(N2-ii)-1-ii] = tmp lapack.lacpy(C, Li, uplo = , m = N2, n = N2, offsetA = na-N2, ldA = na, offsetB = (N+1)*N1, ldB = N) blas.syrk(Li, Li, trans = , n = N1, k = N2,\ ldA = N, ldC = N, offsetA = N1, alpha = -1.0, beta = 1.0) At = matrix(Li[:N1,:N1][::-1],(N1,N1)) lapack.potrf(At, uplo = ) Li[:N1,:N1] = matrix(At[::-1],(N1,N1)) return Li
Computes factorization Vi = Li'*Li, given F F = [ S_{N_k,N_k} S_{Ak,Nk}' ] [ S_{Ak,Nk}' Lk'*Lk ] where Vk = Lk'*Lk. Note that the 2,2 block of the argument 'F' stores Lk and not Lk'*Lk. The argument 'r' is a vector of relative indices r = [ i1, i2, ..., iN ], and the arguments 'nn' and 'na' are equal to |Nk| and |Ak|, respectively. Partition r as r = [r1;r2] such that indices in r1 are less than |Nk| and indices in r2 are greater than or equal to |Nk|. Then Vi = F[r,r] = [ A B' ] [ B C'*C ] = [ Li_{11}' Li_{21}' ] [ Li_{11} 0 ] [ 0 Li_{22}' ] [ Li_{21} Li_{22} ] = Li'*Li where A = F[r1,r1], B = F[r2,r1], C = F[nn:,r2]. The matrix L_{22} is obtained from C by means of a series of Householder transformations, Li_{21} can be computed by solving Li_{22}'*Li_{21} = B, and Li_{11} can be computed by factoring the matrix A - Li_{21}'*Li_{21} = Li_{11}'*Li_{11}. ARGUMENTS F square matrix r vector of indices nn integer na integer RETURNS Li matrix with lower triangular Cholesky factor
### Input: Computes factorization Vi = Li'*Li, given F F = [ S_{N_k,N_k} S_{Ak,Nk}' ] [ S_{Ak,Nk}' Lk'*Lk ] where Vk = Lk'*Lk. Note that the 2,2 block of the argument 'F' stores Lk and not Lk'*Lk. The argument 'r' is a vector of relative indices r = [ i1, i2, ..., iN ], and the arguments 'nn' and 'na' are equal to |Nk| and |Ak|, respectively. Partition r as r = [r1;r2] such that indices in r1 are less than |Nk| and indices in r2 are greater than or equal to |Nk|. Then Vi = F[r,r] = [ A B' ] [ B C'*C ] = [ Li_{11}' Li_{21}' ] [ Li_{11} 0 ] [ 0 Li_{22}' ] [ Li_{21} Li_{22} ] = Li'*Li where A = F[r1,r1], B = F[r2,r1], C = F[nn:,r2]. The matrix L_{22} is obtained from C by means of a series of Householder transformations, Li_{21} can be computed by solving Li_{22}'*Li_{21} = B, and Li_{11} can be computed by factoring the matrix A - Li_{21}'*Li_{21} = Li_{11}'*Li_{11}. ARGUMENTS F square matrix r vector of indices nn integer na integer RETURNS Li matrix with lower triangular Cholesky factor ### Response: def frontal_get_update_factor(F, r, nn, na): N = len(r) Li = matrix(0.0,(N,N)) N1 = 0 for i in range(len(r)): if r[i] >= nn: break else: N1 += 1 N2 = N-N1 Li[:,:N1] = F[r,r[:N1]] if N2 > 0: C = F[nn:,r[N1:]] for ii in range(N2): col = r[N-1-ii] - nn t = (na - col) - (ii + 1) if t == 0: continue tau = lapack.larfg(C, C, n = t+1, offseta = na*(N2-ii)-1-ii, offsetx = na*(N2-ii)-1-ii-t) tmp = C[na*(N2-ii)-1-ii] C[na*(N2-ii)-1-ii] = 1.0 lapack.larfx(C, tau, C, m = t+1, n = N2-1-ii, ldC = na, offsetv = na*(N2-ii)-1-ii-t, offsetC = na-1-ii-t) C[na*(N2-ii)-1-ii] = tmp lapack.lacpy(C, Li, uplo = , m = N2, n = N2, offsetA = na-N2, ldA = na, offsetB = (N+1)*N1, ldB = N) blas.syrk(Li, Li, trans = , n = N1, k = N2,\ ldA = N, ldC = N, offsetA = N1, alpha = -1.0, beta = 1.0) At = matrix(Li[:N1,:N1][::-1],(N1,N1)) lapack.potrf(At, uplo = ) Li[:N1,:N1] = matrix(At[::-1],(N1,N1)) return Li
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False): if self.params_initialized and not force_init: warnings.warn("Parameters already initialized and force_init=False. " "init_params call ignored.", stacklevel=2) return assert self.binded, def _impl(name, arr, cache): if cache is not None: if name in cache: cache_arr = cache[name] if cache_arr is not arr: cache_arr.copyto(arr) else: if not allow_missing: raise RuntimeError("%s is not presented" % name) if initializer is not None: initializer(name, arr) else: initializer(name, arr) attrs = self._symbol.attr_dict() for name, arr in sorted(self._arg_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, arg_params) for name, arr in sorted(self._aux_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, aux_params) self.params_initialized = True self._params_dirty = False self._exec_group.set_params(self._arg_params, self._aux_params, allow_extra=allow_extra)
Initializes the parameters and auxiliary states. Parameters ---------- initializer : Initializer Called to initialize parameters if needed. arg_params : dict If not ``None``, should be a dictionary of existing arg_params. Initialization will be copied from that. aux_params : dict If not ``None``, should be a dictionary of existing aux_params. Initialization will be copied from that. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor.
### Input: Initializes the parameters and auxiliary states. Parameters ---------- initializer : Initializer Called to initialize parameters if needed. arg_params : dict If not ``None``, should be a dictionary of existing arg_params. Initialization will be copied from that. aux_params : dict If not ``None``, should be a dictionary of existing aux_params. Initialization will be copied from that. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. ### Response: def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False): if self.params_initialized and not force_init: warnings.warn("Parameters already initialized and force_init=False. " "init_params call ignored.", stacklevel=2) return assert self.binded, def _impl(name, arr, cache): if cache is not None: if name in cache: cache_arr = cache[name] if cache_arr is not arr: cache_arr.copyto(arr) else: if not allow_missing: raise RuntimeError("%s is not presented" % name) if initializer is not None: initializer(name, arr) else: initializer(name, arr) attrs = self._symbol.attr_dict() for name, arr in sorted(self._arg_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, arg_params) for name, arr in sorted(self._aux_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, aux_params) self.params_initialized = True self._params_dirty = False self._exec_group.set_params(self._arg_params, self._aux_params, allow_extra=allow_extra)
def ping_connection(dbapi_connection, connection_record, connection_proxy): cursor = dbapi_connection.cursor() try: cursor.execute("SELECT 1") except Exception: raise sa.exc.DisconnectionError() cursor.close()
Ensure connections are valid. From: `http://docs.sqlalchemy.org/en/rel_0_8/core/pooling.html` In case db has been restarted pool may return invalid connections.
### Input: Ensure connections are valid. From: `http://docs.sqlalchemy.org/en/rel_0_8/core/pooling.html` In case db has been restarted pool may return invalid connections. ### Response: def ping_connection(dbapi_connection, connection_record, connection_proxy): cursor = dbapi_connection.cursor() try: cursor.execute("SELECT 1") except Exception: raise sa.exc.DisconnectionError() cursor.close()
def _read_coefficients(self): coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24) coeff = list(struct.unpack(, bytes(coeff))) coeff = [float(i) for i in coeff] self._temp_calib = coeff[:3] self._pressure_calib = coeff[3:] self._humidity_calib = [0]*6 self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1) coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7) coeff = list(struct.unpack(, bytes(coeff))) self._humidity_calib[1] = float(coeff[0]) self._humidity_calib[2] = float(coeff[1]) self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF)) self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4)) self._humidity_calib[5] = float(coeff[5])
Read & save the calibration coefficients
### Input: Read & save the calibration coefficients ### Response: def _read_coefficients(self): coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24) coeff = list(struct.unpack(, bytes(coeff))) coeff = [float(i) for i in coeff] self._temp_calib = coeff[:3] self._pressure_calib = coeff[3:] self._humidity_calib = [0]*6 self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1) coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7) coeff = list(struct.unpack(, bytes(coeff))) self._humidity_calib[1] = float(coeff[0]) self._humidity_calib[2] = float(coeff[1]) self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF)) self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4)) self._humidity_calib[5] = float(coeff[5])
def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == : y = if m == : m = if d == : d = date = y if m: date += % m if d: date += % d return date
Generate a single value from multi-part form data. Constructs a W3C date based on values that are set, leaving out day and month if they are not present. :param data: dictionary of data submitted by the form :param files: - unused :param name: base name of the form field :returns: string value
### Input: Generate a single value from multi-part form data. Constructs a W3C date based on values that are set, leaving out day and month if they are not present. :param data: dictionary of data submitted by the form :param files: - unused :param name: base name of the form field :returns: string value ### Response: def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == : y = if m == : m = if d == : d = date = y if m: date += % m if d: date += % d return date
def lockfile(path): /hehe/haha.lock with genfile(path) as fd: fcntl.lockf(fd, fcntl.LOCK_EX) yield None
A file lock with-block helper. Args: path (str): A path to a lock file. Examples: Get the lock on a file and dostuff while having the lock:: path = '/hehe/haha.lock' with lockfile(path): dostuff() Notes: This is curently based on fcntl.lockf(), and as such, it is purely advisory locking. If multiple processes are attempting to obtain a lock on the same file, this will block until the process which has the current lock releases it. Yields: None
### Input: A file lock with-block helper. Args: path (str): A path to a lock file. Examples: Get the lock on a file and dostuff while having the lock:: path = '/hehe/haha.lock' with lockfile(path): dostuff() Notes: This is curently based on fcntl.lockf(), and as such, it is purely advisory locking. If multiple processes are attempting to obtain a lock on the same file, this will block until the process which has the current lock releases it. Yields: None ### Response: def lockfile(path): /hehe/haha.lock with genfile(path) as fd: fcntl.lockf(fd, fcntl.LOCK_EX) yield None
def inject_context(self): navbar = filter(lambda entry: entry.visible, self.navbar_entries) return {: navbar}
Return a dict used for a template context.
### Input: Return a dict used for a template context. ### Response: def inject_context(self): navbar = filter(lambda entry: entry.visible, self.navbar_entries) return {: navbar}
def _copy_if_necessary(self, local_path, overwrite): local_path = abspath(local_path) if not exists(local_path): raise MissingLocalFile(local_path) elif not self.copy_local_files_to_cache: return local_path else: cached_path = self.cached_path(local_path) if exists(cached_path) and not overwrite: return cached_path copy2(local_path, cached_path) return cached_path
Return cached path to local file, copying it to the cache if necessary.
### Input: Return cached path to local file, copying it to the cache if necessary. ### Response: def _copy_if_necessary(self, local_path, overwrite): local_path = abspath(local_path) if not exists(local_path): raise MissingLocalFile(local_path) elif not self.copy_local_files_to_cache: return local_path else: cached_path = self.cached_path(local_path) if exists(cached_path) and not overwrite: return cached_path copy2(local_path, cached_path) return cached_path
def _init_net_specs(conf): for net_name, net_spec in conf.get(, {}).items(): net_spec[] = net_name net_spec[] = {} net_spec.setdefault(, ) return conf
Given a configuration specification, initializes all the net definitions in it so they can be used comfortably Args: conf (dict): Configuration specification Returns: dict: the adapted new conf
### Input: Given a configuration specification, initializes all the net definitions in it so they can be used comfortably Args: conf (dict): Configuration specification Returns: dict: the adapted new conf ### Response: def _init_net_specs(conf): for net_name, net_spec in conf.get(, {}).items(): net_spec[] = net_name net_spec[] = {} net_spec.setdefault(, ) return conf
def close(self): if self.filepath is not None: if path.isfile(self.filepath+): remove(self.filepath+) self.filepath = None self.read_only = False self.lock() return True else: raise KPError(t close a not opened file')
This method closes the database correctly.
### Input: This method closes the database correctly. ### Response: def close(self): if self.filepath is not None: if path.isfile(self.filepath+): remove(self.filepath+) self.filepath = None self.read_only = False self.lock() return True else: raise KPError(t close a not opened file')
def load_yaml(self, _yaml: YamlDocument) -> AugmentedDict: return self.augment(self._load_plain_yaml(_yaml), document=_yaml)
Loads a partial yaml and augments it. A partial yaml in this context is a yaml that is syntactically correct, but is not yet complete in terms of content. The yaml will be completed by augmenting with some external resources and/or executing so called extensions. Args: _yaml: Whether a stream (e.g. file pointer), a file name of an existing file or string containing yaml data. Returns: Returns the yaml data as an augmented python dictionary.
### Input: Loads a partial yaml and augments it. A partial yaml in this context is a yaml that is syntactically correct, but is not yet complete in terms of content. The yaml will be completed by augmenting with some external resources and/or executing so called extensions. Args: _yaml: Whether a stream (e.g. file pointer), a file name of an existing file or string containing yaml data. Returns: Returns the yaml data as an augmented python dictionary. ### Response: def load_yaml(self, _yaml: YamlDocument) -> AugmentedDict: return self.augment(self._load_plain_yaml(_yaml), document=_yaml)
def get_uint_info(self, field): length = ctypes.c_ulong() ret = ctypes.POINTER(ctypes.c_uint)() _check_call(_LIB.XGDMatrixGetUIntInfo(self.handle, c_str(field), ctypes.byref(length), ctypes.byref(ret))) return ctypes2numpy(ret, length.value, np.uint32)
Get unsigned integer property from the DMatrix. Parameters ---------- field: str The field name of the information Returns ------- info : array a numpy array of float information of the data
### Input: Get unsigned integer property from the DMatrix. Parameters ---------- field: str The field name of the information Returns ------- info : array a numpy array of float information of the data ### Response: def get_uint_info(self, field): length = ctypes.c_ulong() ret = ctypes.POINTER(ctypes.c_uint)() _check_call(_LIB.XGDMatrixGetUIntInfo(self.handle, c_str(field), ctypes.byref(length), ctypes.byref(ret))) return ctypes2numpy(ret, length.value, np.uint32)
def add_listener(self, include_value=False, item_added_func=None, item_removed_func=None): request = queue_add_listener_codec.encode_request(self.name, include_value, False) def handle_event_item(item, uuid, event_type): item = item if include_value else None member = self._client.cluster.get_member_by_uuid(uuid) item_event = ItemEvent(self.name, item, event_type, member, self._to_object) if event_type == ItemEventType.added: if item_added_func: item_added_func(item_event) else: if item_removed_func: item_removed_func(item_event) return self._start_listening(request, lambda m: queue_add_listener_codec.handle(m, handle_event_item), lambda r: queue_add_listener_codec.decode_response(r)[], self.partition_key)
Adds an item listener for this queue. Listener will be notified for all queue add/remove events. :param include_value: (bool), whether received events include the updated item or not (optional). :param item_added_func: Function to be called when an item is added to this set (optional). :param item_removed_func: Function to be called when an item is deleted from this set (optional). :return: (str), a registration id which is used as a key to remove the listener.
### Input: Adds an item listener for this queue. Listener will be notified for all queue add/remove events. :param include_value: (bool), whether received events include the updated item or not (optional). :param item_added_func: Function to be called when an item is added to this set (optional). :param item_removed_func: Function to be called when an item is deleted from this set (optional). :return: (str), a registration id which is used as a key to remove the listener. ### Response: def add_listener(self, include_value=False, item_added_func=None, item_removed_func=None): request = queue_add_listener_codec.encode_request(self.name, include_value, False) def handle_event_item(item, uuid, event_type): item = item if include_value else None member = self._client.cluster.get_member_by_uuid(uuid) item_event = ItemEvent(self.name, item, event_type, member, self._to_object) if event_type == ItemEventType.added: if item_added_func: item_added_func(item_event) else: if item_removed_func: item_removed_func(item_event) return self._start_listening(request, lambda m: queue_add_listener_codec.handle(m, handle_event_item), lambda r: queue_add_listener_codec.decode_response(r)[], self.partition_key)
def getService(self, serviceIdentifier): if serviceIdentifier in self._services: return self._services[serviceIdentifier] else: message = "Application - getService() - " \ "Service with identifier {} does not exist." \ .format(serviceIdentifier) raise Exception(message)
Return the requested service instance. :param serviceIdentifier: <str> service identifier :return: <object> service instance
### Input: Return the requested service instance. :param serviceIdentifier: <str> service identifier :return: <object> service instance ### Response: def getService(self, serviceIdentifier): if serviceIdentifier in self._services: return self._services[serviceIdentifier] else: message = "Application - getService() - " \ "Service with identifier {} does not exist." \ .format(serviceIdentifier) raise Exception(message)
def clear(self, domain=None, path=None, name=None): if name is not None: if (domain is None) or (path is None): raise ValueError( "domain and path must be given to remove a cookie by name") del self._cookies[domain][path][name] elif path is not None: if domain is None: raise ValueError( "domain must be given to remove cookies by path") del self._cookies[domain][path] elif domain is not None: del self._cookies[domain] else: self._cookies = {}
Clear some cookies. Invoking this method without arguments will clear all cookies. If given a single argument, only cookies belonging to that domain will be removed. If given two arguments, cookies belonging to the specified path within that domain are removed. If given three arguments, then the cookie with the specified name, path and domain is removed. Raises KeyError if no matching cookie exists.
### Input: Clear some cookies. Invoking this method without arguments will clear all cookies. If given a single argument, only cookies belonging to that domain will be removed. If given two arguments, cookies belonging to the specified path within that domain are removed. If given three arguments, then the cookie with the specified name, path and domain is removed. Raises KeyError if no matching cookie exists. ### Response: def clear(self, domain=None, path=None, name=None): if name is not None: if (domain is None) or (path is None): raise ValueError( "domain and path must be given to remove a cookie by name") del self._cookies[domain][path][name] elif path is not None: if domain is None: raise ValueError( "domain must be given to remove cookies by path") del self._cookies[domain][path] elif domain is not None: del self._cookies[domain] else: self._cookies = {}
def encode(val): if isinstance(val, (list, tuple)): return [encode(v) for v in val] elif isinstance(val, str): return val.encode() else: return val
Encode a string assuming the encoding is UTF-8. :param: a unicode or bytes object :returns: bytes
### Input: Encode a string assuming the encoding is UTF-8. :param: a unicode or bytes object :returns: bytes ### Response: def encode(val): if isinstance(val, (list, tuple)): return [encode(v) for v in val] elif isinstance(val, str): return val.encode() else: return val
def enforce_type(self, attr, val): if not attr in self.types: return utfstr(val) elif self.types[attr] == : return int(float(val)) elif self.types[attr] == : return float(val) else: return utfstr(val)
converts a value to the attribute's type
### Input: converts a value to the attribute's type ### Response: def enforce_type(self, attr, val): if not attr in self.types: return utfstr(val) elif self.types[attr] == : return int(float(val)) elif self.types[attr] == : return float(val) else: return utfstr(val)
def info( self, path="/", namespaces=None, **kwargs ): walker = self._make_walker(**kwargs) return walker.info(self.fs, path=path, namespaces=namespaces)
Walk a filesystem, yielding path and `Info` of resources. Arguments: path (str): A path to a directory. namespaces (list, optional): A list of namespaces to include in the resource information, e.g. ``['basic', 'access']`` (defaults to ``['basic']``). Keyword Arguments: ignore_errors (bool): If `True`, any errors reading a directory will be ignored, otherwise exceptions will be raised. on_error (callable): If ``ignore_errors`` is `False`, then this callable will be invoked with a path and the exception object. It should return `True` to ignore the error, or `False` to re-raise it. search (str): If ``'breadth'`` then the directory will be walked *top down*. Set to ``'depth'`` to walk *bottom up*. filter (list): If supplied, this parameter should be a list of file name patterns, e.g. ``['*.py']``. Files will only be returned if the final component matches one of the patterns. exclude (list, optional): If supplied, this parameter should be a list of filename patterns, e.g. ``['~*', '.*']``. Files matching any of these patterns will be removed from the walk. filter_dirs (list, optional): A list of patterns that will be used to match directories paths. The walk will only open directories that match at least one of these patterns. exclude_dirs (list): A list of patterns that will be used to filter out directories from the walk, e.g. ``['*.svn', '*.git']``. max_depth (int, optional): Maximum directory depth to walk. Returns: ~collections.Iterable: an iterable yielding tuples of ``(<absolute path>, <resource info>)``. This method invokes `Walker.info` with the bound `FS` object.
### Input: Walk a filesystem, yielding path and `Info` of resources. Arguments: path (str): A path to a directory. namespaces (list, optional): A list of namespaces to include in the resource information, e.g. ``['basic', 'access']`` (defaults to ``['basic']``). Keyword Arguments: ignore_errors (bool): If `True`, any errors reading a directory will be ignored, otherwise exceptions will be raised. on_error (callable): If ``ignore_errors`` is `False`, then this callable will be invoked with a path and the exception object. It should return `True` to ignore the error, or `False` to re-raise it. search (str): If ``'breadth'`` then the directory will be walked *top down*. Set to ``'depth'`` to walk *bottom up*. filter (list): If supplied, this parameter should be a list of file name patterns, e.g. ``['*.py']``. Files will only be returned if the final component matches one of the patterns. exclude (list, optional): If supplied, this parameter should be a list of filename patterns, e.g. ``['~*', '.*']``. Files matching any of these patterns will be removed from the walk. filter_dirs (list, optional): A list of patterns that will be used to match directories paths. The walk will only open directories that match at least one of these patterns. exclude_dirs (list): A list of patterns that will be used to filter out directories from the walk, e.g. ``['*.svn', '*.git']``. max_depth (int, optional): Maximum directory depth to walk. Returns: ~collections.Iterable: an iterable yielding tuples of ``(<absolute path>, <resource info>)``. This method invokes `Walker.info` with the bound `FS` object. ### Response: def info( self, path="/", namespaces=None, **kwargs ): walker = self._make_walker(**kwargs) return walker.info(self.fs, path=path, namespaces=namespaces)
def headers(headerDict={}, **headerskwargs): _headerDict = headerDict.copy() _headerDict.update(headerskwargs) def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): resp = make_response(f(*args, **kwargs)) h = resp.headers for header, value in _headerDict.items(): h[str(header)] = str(value) return resp return decorated_function return decorator
This function is the decorator which is used to wrap a Flask route with. Either pass a dictionary of headers to be set as the headerDict keyword argument, or pass header values as keyword arguments. Or, do both :-) The key and value of items in a dictionary will be converted to strings using the `str` method, ensure both keys and values are serializable thusly. :param headerDict: A dictionary of headers to be injected into the response headers. Note, the supplied dictionary is first copied then mutated. :type origins: dict :param headerskwargs: The headers to be injected into the response headers. :type headerskwargs: identical to the `dict` constructor.
### Input: This function is the decorator which is used to wrap a Flask route with. Either pass a dictionary of headers to be set as the headerDict keyword argument, or pass header values as keyword arguments. Or, do both :-) The key and value of items in a dictionary will be converted to strings using the `str` method, ensure both keys and values are serializable thusly. :param headerDict: A dictionary of headers to be injected into the response headers. Note, the supplied dictionary is first copied then mutated. :type origins: dict :param headerskwargs: The headers to be injected into the response headers. :type headerskwargs: identical to the `dict` constructor. ### Response: def headers(headerDict={}, **headerskwargs): _headerDict = headerDict.copy() _headerDict.update(headerskwargs) def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): resp = make_response(f(*args, **kwargs)) h = resp.headers for header, value in _headerDict.items(): h[str(header)] = str(value) return resp return decorated_function return decorator
def set_reason(self, reason): if reason is None: self._delete_reason() elif not isinstance(reason, bytes): raise TypeError("reason must be None or a byte string") else: reason = reason.lower().replace(b, b) reason_code = [r.lower() for r in self._crl_reasons].index(reason) new_reason_ext = _lib.ASN1_ENUMERATED_new() _openssl_assert(new_reason_ext != _ffi.NULL) new_reason_ext = _ffi.gc(new_reason_ext, _lib.ASN1_ENUMERATED_free) set_result = _lib.ASN1_ENUMERATED_set(new_reason_ext, reason_code) _openssl_assert(set_result != _ffi.NULL) self._delete_reason() add_result = _lib.X509_REVOKED_add1_ext_i2d( self._revoked, _lib.NID_crl_reason, new_reason_ext, 0, 0) _openssl_assert(add_result == 1)
Set the reason of this revocation. If :data:`reason` is ``None``, delete the reason instead. :param reason: The reason string. :type reason: :class:`bytes` or :class:`NoneType` :return: ``None`` .. seealso:: :meth:`all_reasons`, which gives you a list of all supported reasons which you might pass to this method.
### Input: Set the reason of this revocation. If :data:`reason` is ``None``, delete the reason instead. :param reason: The reason string. :type reason: :class:`bytes` or :class:`NoneType` :return: ``None`` .. seealso:: :meth:`all_reasons`, which gives you a list of all supported reasons which you might pass to this method. ### Response: def set_reason(self, reason): if reason is None: self._delete_reason() elif not isinstance(reason, bytes): raise TypeError("reason must be None or a byte string") else: reason = reason.lower().replace(b, b) reason_code = [r.lower() for r in self._crl_reasons].index(reason) new_reason_ext = _lib.ASN1_ENUMERATED_new() _openssl_assert(new_reason_ext != _ffi.NULL) new_reason_ext = _ffi.gc(new_reason_ext, _lib.ASN1_ENUMERATED_free) set_result = _lib.ASN1_ENUMERATED_set(new_reason_ext, reason_code) _openssl_assert(set_result != _ffi.NULL) self._delete_reason() add_result = _lib.X509_REVOKED_add1_ext_i2d( self._revoked, _lib.NID_crl_reason, new_reason_ext, 0, 0) _openssl_assert(add_result == 1)
def scale(val, src, dst): if val < src[0]: return dst[0] if val > src[1]: return dst[1] return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
Scale value from src range to dst range. If value outside bounds, it is clipped and set to the low or high bound of dst. Ex: scale(0, (0.0, 99.0), (-1.0, 1.0)) == -1.0 scale(-5, (0.0, 99.0), (-1.0, 1.0)) == -1.0
### Input: Scale value from src range to dst range. If value outside bounds, it is clipped and set to the low or high bound of dst. Ex: scale(0, (0.0, 99.0), (-1.0, 1.0)) == -1.0 scale(-5, (0.0, 99.0), (-1.0, 1.0)) == -1.0 ### Response: def scale(val, src, dst): if val < src[0]: return dst[0] if val > src[1]: return dst[1] return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
def _handle_reduce(self, ufunc, input_args, outputs): t. This seems safer anyway. np supports reduce only for binary ops. ' assert len(input_args) == 1 if outputs: output = outputs[0] else: output = None if ufunc.__name__ in BINARY_OPS: return self._reduce_op(BINARY_OPS[ufunc.__name__], result=output)
TODO: describe. TODO: For multi-dimensional case, might need extra work here if the reduction is being performed only along a certain axis. We force evaluation at the end of the reduction - weird errors if we don't. This seems safer anyway. np supports reduce only for binary ops.
### Input: TODO: describe. TODO: For multi-dimensional case, might need extra work here if the reduction is being performed only along a certain axis. We force evaluation at the end of the reduction - weird errors if we don't. This seems safer anyway. np supports reduce only for binary ops. ### Response: def _handle_reduce(self, ufunc, input_args, outputs): t. This seems safer anyway. np supports reduce only for binary ops. ' assert len(input_args) == 1 if outputs: output = outputs[0] else: output = None if ufunc.__name__ in BINARY_OPS: return self._reduce_op(BINARY_OPS[ufunc.__name__], result=output)
def get_pkg_version(pkg_name, parse=False): import pkg_resources try: version = pkg_resources.require(pkg_name)[0].version return pkg_resources.parse_version(version) if parse else version except pkg_resources.DistributionNotFound: return None
Verify and get installed python package version. :param pkg_name: python package name :param parse: parse version number with pkg_resourc.parse_version -function :return: None if pkg is not installed, otherwise version as a string or parsed version when parse=True
### Input: Verify and get installed python package version. :param pkg_name: python package name :param parse: parse version number with pkg_resourc.parse_version -function :return: None if pkg is not installed, otherwise version as a string or parsed version when parse=True ### Response: def get_pkg_version(pkg_name, parse=False): import pkg_resources try: version = pkg_resources.require(pkg_name)[0].version return pkg_resources.parse_version(version) if parse else version except pkg_resources.DistributionNotFound: return None
def getUTC(self): timeUTC = self.time.getUTC(self.utcoffset) dateUTC = Date(round(self.jd)) return Datetime(dateUTC, timeUTC)
Returns this Datetime localized for UTC.
### Input: Returns this Datetime localized for UTC. ### Response: def getUTC(self): timeUTC = self.time.getUTC(self.utcoffset) dateUTC = Date(round(self.jd)) return Datetime(dateUTC, timeUTC)
def Page_setDeviceOrientationOverride(self, alpha, beta, gamma): assert isinstance(alpha, (float, int) ), "Argument must be of type floatint. Received type: " % type( alpha) assert isinstance(beta, (float, int) ), "Argument must be of type floatint. Received type: " % type( beta) assert isinstance(gamma, (float, int) ), "Argument must be of type floatint. Received type: " % type( gamma) subdom_funcs = self.synchronous_command(, alpha=alpha, beta=beta, gamma=gamma) return subdom_funcs
Function path: Page.setDeviceOrientationOverride Domain: Page Method name: setDeviceOrientationOverride WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'alpha' (type: number) -> Mock alpha 'beta' (type: number) -> Mock beta 'gamma' (type: number) -> Mock gamma No return value. Description: Overrides the Device Orientation.
### Input: Function path: Page.setDeviceOrientationOverride Domain: Page Method name: setDeviceOrientationOverride WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'alpha' (type: number) -> Mock alpha 'beta' (type: number) -> Mock beta 'gamma' (type: number) -> Mock gamma No return value. Description: Overrides the Device Orientation. ### Response: def Page_setDeviceOrientationOverride(self, alpha, beta, gamma): assert isinstance(alpha, (float, int) ), "Argument must be of type floatint. Received type: " % type( alpha) assert isinstance(beta, (float, int) ), "Argument must be of type floatint. Received type: " % type( beta) assert isinstance(gamma, (float, int) ), "Argument must be of type floatint. Received type: " % type( gamma) subdom_funcs = self.synchronous_command(, alpha=alpha, beta=beta, gamma=gamma) return subdom_funcs
def diff(self): done = set(self.done) return [name for name in self.todo if name not in done]
Calculate difference between fs and db.
### Input: Calculate difference between fs and db. ### Response: def diff(self): done = set(self.done) return [name for name in self.todo if name not in done]
def make_signer(self, salt=None): if salt is None: salt = self.salt return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
A method that creates a new instance of the signer to be used. The default implementation uses the :class:`Signer` baseclass.
### Input: A method that creates a new instance of the signer to be used. The default implementation uses the :class:`Signer` baseclass. ### Response: def make_signer(self, salt=None): if salt is None: salt = self.salt return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def _getImpliedEdges(roots): nodes = set() for root in roots: root._dfs(nodes) extraEdges = dict([(n, []) for n in nodes]) for job in nodes: if len(job._followOns) > 0: reacheable = set() for child in job._children: child._dfs(reacheable) for descendant in reacheable: extraEdges[descendant] += job._followOns[:] return extraEdges
Gets the set of implied edges. See Job.checkJobGraphAcylic
### Input: Gets the set of implied edges. See Job.checkJobGraphAcylic ### Response: def _getImpliedEdges(roots): nodes = set() for root in roots: root._dfs(nodes) extraEdges = dict([(n, []) for n in nodes]) for job in nodes: if len(job._followOns) > 0: reacheable = set() for child in job._children: child._dfs(reacheable) for descendant in reacheable: extraEdges[descendant] += job._followOns[:] return extraEdges
def normalize_text(text: str) -> str: return .join([token for token in text.lower().strip(STRIPPED_CHARACTERS).split() if token not in IGNORED_TOKENS])
Performs a normalization that is very similar to that done by the normalization functions in SQuAD and TriviaQA. This involves splitting and rejoining the text, and could be a somewhat expensive operation.
### Input: Performs a normalization that is very similar to that done by the normalization functions in SQuAD and TriviaQA. This involves splitting and rejoining the text, and could be a somewhat expensive operation. ### Response: def normalize_text(text: str) -> str: return .join([token for token in text.lower().strip(STRIPPED_CHARACTERS).split() if token not in IGNORED_TOKENS])
def _fetch_stock_data(self, stock_list): pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None]
获取股票信息
### Input: 获取股票信息 ### Response: def _fetch_stock_data(self, stock_list): pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None]
def docEntity(self, name): ret = libxml2mod.xmlGetDocEntity(self._o, name) if ret is None:raise treeError() __tmp = xmlEntity(_obj=ret) return __tmp
Do an entity lookup in the document entity hash table and
### Input: Do an entity lookup in the document entity hash table and ### Response: def docEntity(self, name): ret = libxml2mod.xmlGetDocEntity(self._o, name) if ret is None:raise treeError() __tmp = xmlEntity(_obj=ret) return __tmp
def handle(self): while True: try: callable = None callable = self._Q.get(True, 1.0) except Empty: continue try: if callable is self._stopit: break callable() except: _log.exception("Error from WorkQueue") finally: self._Q.task_done()
Process queued work until interrupt() is called
### Input: Process queued work until interrupt() is called ### Response: def handle(self): while True: try: callable = None callable = self._Q.get(True, 1.0) except Empty: continue try: if callable is self._stopit: break callable() except: _log.exception("Error from WorkQueue") finally: self._Q.task_done()
def _registerInterface(self, iName, intf, isPrivate=False): nameAvailabilityCheck(self, iName, intf) assert intf._parent is None intf._parent = self intf._name = iName intf._ctx = self._ctx if isPrivate: self._private_interfaces.append(intf) intf._isExtern = False else: self._interfaces.append(intf) intf._isExtern = True
Register interface object on interface level object
### Input: Register interface object on interface level object ### Response: def _registerInterface(self, iName, intf, isPrivate=False): nameAvailabilityCheck(self, iName, intf) assert intf._parent is None intf._parent = self intf._name = iName intf._ctx = self._ctx if isPrivate: self._private_interfaces.append(intf) intf._isExtern = False else: self._interfaces.append(intf) intf._isExtern = True
def add_bond(self, atom1, atom2, bond): if atom1 == atom2: raise KeyError() if atom1 not in self._node or atom2 not in self._node: raise KeyError() if atom1 in self._adj[atom2]: raise KeyError() attr_dict = self.edge_attr_dict_factory() if isinstance(bond, int): attr_dict.order = bond else: attr_dict.update(bond) self._adj[atom1][atom2] = self._adj[atom2][atom1] = attr_dict self.flush_cache()
implementation of bond addition
### Input: implementation of bond addition ### Response: def add_bond(self, atom1, atom2, bond): if atom1 == atom2: raise KeyError() if atom1 not in self._node or atom2 not in self._node: raise KeyError() if atom1 in self._adj[atom2]: raise KeyError() attr_dict = self.edge_attr_dict_factory() if isinstance(bond, int): attr_dict.order = bond else: attr_dict.update(bond) self._adj[atom1][atom2] = self._adj[atom2][atom1] = attr_dict self.flush_cache()
def send(self, *args, **kwargs): if not self.transport: raise ValueError( "This RecipeWrapper object does not contain " "a reference to a transport object." ) if not self.recipe_step: raise ValueError( "This RecipeWrapper object does not contain " "a recipe with a selected step." ) if "output" not in self.recipe_step: return if isinstance(self.recipe_step["output"], dict): if self.default_channel: self.send_to(self.default_channel, *args, **kwargs) else: self._send_to_destinations(self.recipe_step["output"], *args, **kwargs)
Send messages to another service that is connected to the currently running service via the recipe. The 'send' method will either use a default channel name, set via the set_default_channel method, or an unnamed output definition.
### Input: Send messages to another service that is connected to the currently running service via the recipe. The 'send' method will either use a default channel name, set via the set_default_channel method, or an unnamed output definition. ### Response: def send(self, *args, **kwargs): if not self.transport: raise ValueError( "This RecipeWrapper object does not contain " "a reference to a transport object." ) if not self.recipe_step: raise ValueError( "This RecipeWrapper object does not contain " "a recipe with a selected step." ) if "output" not in self.recipe_step: return if isinstance(self.recipe_step["output"], dict): if self.default_channel: self.send_to(self.default_channel, *args, **kwargs) else: self._send_to_destinations(self.recipe_step["output"], *args, **kwargs)
def set_description(self, value: Union[Literal, Identifier, str], lang: str= None): return self.metadata.add(key=DC.description, value=value, lang=lang)
Set the DC Description literal value :param value: Value of the title node :param lang: Language in which the value is
### Input: Set the DC Description literal value :param value: Value of the title node :param lang: Language in which the value is ### Response: def set_description(self, value: Union[Literal, Identifier, str], lang: str= None): return self.metadata.add(key=DC.description, value=value, lang=lang)
def dup(self): fd = dup(self.fileno()) sock = self.__class__(self.family, self.type, self.proto, fileno=fd) sock.settimeout(self.gettimeout()) return sock
dup() -> socket object Return a new socket object connected to the same system resource.
### Input: dup() -> socket object Return a new socket object connected to the same system resource. ### Response: def dup(self): fd = dup(self.fileno()) sock = self.__class__(self.family, self.type, self.proto, fileno=fd) sock.settimeout(self.gettimeout()) return sock
def update_db(matches): for match in matches: try: match.save() except IntegrityError: args = (match.text_log_error_id, match.matcher_name, match.classified_failure_id) logger.warning( "Tried to create duplicate match for TextLogError %i with matcher %s and classified_failure %i", args, )
Save TextLogErrorMatch instances to the DB We loop each Match instance instead of calling bulk_create() so we can catch any potential IntegrityErrors and continue.
### Input: Save TextLogErrorMatch instances to the DB We loop each Match instance instead of calling bulk_create() so we can catch any potential IntegrityErrors and continue. ### Response: def update_db(matches): for match in matches: try: match.save() except IntegrityError: args = (match.text_log_error_id, match.matcher_name, match.classified_failure_id) logger.warning( "Tried to create duplicate match for TextLogError %i with matcher %s and classified_failure %i", args, )
def selected(self, ids): result = copy(self.choices) result = filter(lambda x: x[0] in ids, result) return list(result)
Return the selected options as a list of tuples
### Input: Return the selected options as a list of tuples ### Response: def selected(self, ids): result = copy(self.choices) result = filter(lambda x: x[0] in ids, result) return list(result)
def run(self, hql, parameters=None): return super().run(self._strip_sql(hql), parameters)
Execute the statement against Presto. Can be used to create views.
### Input: Execute the statement against Presto. Can be used to create views. ### Response: def run(self, hql, parameters=None): return super().run(self._strip_sql(hql), parameters)
def ethnicities_clean(): eths_clean = {} fname = pkg_resources.resource_filename(__name__, ) with open(fname, ) as csvfile: reader = csv.reader(csvfile, delimiter = ) first = [] for row in reader: if first: for i in range(len(first)): if first[i] and row[i]: eths_clean[first[i]] = row[i] first = [] else: first = deepcopy(row) return eths_clean
Get dictionary of unformatted ethnicity types mapped to clean corresponding ethnicity strings
### Input: Get dictionary of unformatted ethnicity types mapped to clean corresponding ethnicity strings ### Response: def ethnicities_clean(): eths_clean = {} fname = pkg_resources.resource_filename(__name__, ) with open(fname, ) as csvfile: reader = csv.reader(csvfile, delimiter = ) first = [] for row in reader: if first: for i in range(len(first)): if first[i] and row[i]: eths_clean[first[i]] = row[i] first = [] else: first = deepcopy(row) return eths_clean
def _imported_module(self, node, mod_path, relative): module = node.root() context_name = module.name if relative: mod_path = "%s.%s" % (".".join(context_name.split(".")[:-1]), mod_path) if self.compute_module(context_name, mod_path): if not hasattr(module, "depends"): module.depends = [] mod_paths = module.depends if mod_path not in mod_paths: mod_paths.append(mod_path)
Notify an imported module, used to analyze dependencies
### Input: Notify an imported module, used to analyze dependencies ### Response: def _imported_module(self, node, mod_path, relative): module = node.root() context_name = module.name if relative: mod_path = "%s.%s" % (".".join(context_name.split(".")[:-1]), mod_path) if self.compute_module(context_name, mod_path): if not hasattr(module, "depends"): module.depends = [] mod_paths = module.depends if mod_path not in mod_paths: mod_paths.append(mod_path)
def get_request_id(self, renew=False): if not AppRequest.__request_id or renew: self.set_request_id(uuid.uuid1()) return AppRequest.__request_id
:Brief: This method is used in every place to get the already generated request ID or generate new request ID and sent off
### Input: :Brief: This method is used in every place to get the already generated request ID or generate new request ID and sent off ### Response: def get_request_id(self, renew=False): if not AppRequest.__request_id or renew: self.set_request_id(uuid.uuid1()) return AppRequest.__request_id
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug(, task_id) family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {: task_id} serialized[task_id] = { : [], : UNKNOWN, : [], : UNKNOWN, : params, : family, : task_id, : 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id][] if len(serialized) >= self._config.max_graph_nodes: break return serialized
Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node
### Input: Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node ### Response: def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug(, task_id) family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {: task_id} serialized[task_id] = { : [], : UNKNOWN, : [], : UNKNOWN, : params, : family, : task_id, : 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id][] if len(serialized) >= self._config.max_graph_nodes: break return serialized
def _harmonize_subset_types(set_, subset_tuples): if is_tuple(set_): return subset_tuples else: subset_class = set_.__class__ if is_set(set_) else tuple return imap(subset_class, subset_tuples)
Possibly convert an iterable of tuples with subsets of given "set", to an iterable of :class:`set` objects if original "set" was so too.
### Input: Possibly convert an iterable of tuples with subsets of given "set", to an iterable of :class:`set` objects if original "set" was so too. ### Response: def _harmonize_subset_types(set_, subset_tuples): if is_tuple(set_): return subset_tuples else: subset_class = set_.__class__ if is_set(set_) else tuple return imap(subset_class, subset_tuples)
def package_name(package): if not package: return "" lastdot = package.rfind(".") if lastdot == -1: return package return package[:lastdot]
Returns the package name of the given module name
### Input: Returns the package name of the given module name ### Response: def package_name(package): if not package: return "" lastdot = package.rfind(".") if lastdot == -1: return package return package[:lastdot]
def connectionLostForKey(self, key): if key in self.cachedConnections: del self.cachedConnections[key] if self._shuttingDown and self._shuttingDown.get(key): d, self._shuttingDown[key] = self._shuttingDown[key], None d.callback(None)
Remove lost connection from cache. @param key: key of connection that was lost @type key: L{tuple} of L{IAddress} and C{extraHash}
### Input: Remove lost connection from cache. @param key: key of connection that was lost @type key: L{tuple} of L{IAddress} and C{extraHash} ### Response: def connectionLostForKey(self, key): if key in self.cachedConnections: del self.cachedConnections[key] if self._shuttingDown and self._shuttingDown.get(key): d, self._shuttingDown[key] = self._shuttingDown[key], None d.callback(None)
def _process_diff(self, yes_work, maybe_work, work_dir, ym_results_path, yn_results_path, stats): distinct_results_path = os.path.join( work_dir, .format(maybe_work)) results = [yn_results_path, ym_results_path] labels = [self._no_label, self._maybe_label] self._run_query(distinct_results_path, self._store.diff_supplied, [results, labels, self._tokenizer]) return self._update_stats(, work_dir, distinct_results_path, yes_work, maybe_work, stats, SHARED, COMMON)
Returns statistics on the difference between the intersection of `yes_work` and `maybe_work` and the intersection of `yes_work` and "no" works. :param yes_work: name of work for which stats are collected :type yes_work: `str` :param maybe_work: name of work being compared with `yes_work` :type maybe_work: `str` :param work_dir: directory where generated files are saved :type work_dir: `str` :param ym_results_path: path to results intersecting `yes_work` with `maybe_work` :type yn_results_path: `str` :param yn_results_path: path to results intersecting `yes_work` with "no" works :type yn_results_path: `str` :param stats: data structure to hold the statistical data :type stats: `dict` :rtype: `dict`
### Input: Returns statistics on the difference between the intersection of `yes_work` and `maybe_work` and the intersection of `yes_work` and "no" works. :param yes_work: name of work for which stats are collected :type yes_work: `str` :param maybe_work: name of work being compared with `yes_work` :type maybe_work: `str` :param work_dir: directory where generated files are saved :type work_dir: `str` :param ym_results_path: path to results intersecting `yes_work` with `maybe_work` :type yn_results_path: `str` :param yn_results_path: path to results intersecting `yes_work` with "no" works :type yn_results_path: `str` :param stats: data structure to hold the statistical data :type stats: `dict` :rtype: `dict` ### Response: def _process_diff(self, yes_work, maybe_work, work_dir, ym_results_path, yn_results_path, stats): distinct_results_path = os.path.join( work_dir, .format(maybe_work)) results = [yn_results_path, ym_results_path] labels = [self._no_label, self._maybe_label] self._run_query(distinct_results_path, self._store.diff_supplied, [results, labels, self._tokenizer]) return self._update_stats(, work_dir, distinct_results_path, yes_work, maybe_work, stats, SHARED, COMMON)
def _parse(self, date_str, format=): rv = pd.to_datetime(date_str, format=format) if hasattr(rv, ): rv = rv.to_pydatetime() return rv
helper function for parsing FRED date string into datetime
### Input: helper function for parsing FRED date string into datetime ### Response: def _parse(self, date_str, format=): rv = pd.to_datetime(date_str, format=format) if hasattr(rv, ): rv = rv.to_pydatetime() return rv
def encode(self, word, version=2): word = word.lower() word = .join(c for c in word if c in self._lc_set) def _squeeze_replace(word, char): while char * 2 in word: word = word.replace(char * 2, char) return word.replace(char, char.upper()) if version != 1 and word[-1:] == : word = word[:-1] if word: if word[:5] == : word = + word[5:] if word[:5] == : word = + word[5:] if word[:5] == : word = + word[5:] if word[:6] == : word = + word[6:] if version != 1 and word[:6] == : word = + word[6:] if word[:2] == : word = + word[2:] if word[-2:] == : word = word[:-1] + for src, tar in ( (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), ): word = word.replace(src, tar) if word[0] in self._lc_v_set: word = + word[1:] for vowel in : word = word.replace(vowel, ) if version != 1: word = word.replace(, ) if word[:2] == : word = + word[2:] if word[:1] == : word = + word[1:] word = word.replace(, ) for src, tar in ((, ), (, ), (, )): word = word.replace(src, tar) for char in : word = _squeeze_replace(word, char) word = word.replace(, ) if version == 1: word = word.replace(, ) word = word.replace(, ) if version == 1: word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) if word[:1] == : word = + word[1:] word = word.replace(, ) word = word.replace(, ) if version == 1: word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) word = word.replace(, ) if version == 1: word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) if version == 1: word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) word += * 10 if version != 1: word = word[:10] else: word = word[:6] return word
Return the Caverphone code for a word. Parameters ---------- word : str The word to transform version : int The version of Caverphone to employ for encoding (defaults to 2) Returns ------- str The Caverphone value Examples -------- >>> pe = Caverphone() >>> pe.encode('Christopher') 'KRSTFA1111' >>> pe.encode('Niall') 'NA11111111' >>> pe.encode('Smith') 'SMT1111111' >>> pe.encode('Schmidt') 'SKMT111111' >>> pe.encode('Christopher', 1) 'KRSTF1' >>> pe.encode('Niall', 1) 'N11111' >>> pe.encode('Smith', 1) 'SMT111' >>> pe.encode('Schmidt', 1) 'SKMT11'
### Input: Return the Caverphone code for a word. Parameters ---------- word : str The word to transform version : int The version of Caverphone to employ for encoding (defaults to 2) Returns ------- str The Caverphone value Examples -------- >>> pe = Caverphone() >>> pe.encode('Christopher') 'KRSTFA1111' >>> pe.encode('Niall') 'NA11111111' >>> pe.encode('Smith') 'SMT1111111' >>> pe.encode('Schmidt') 'SKMT111111' >>> pe.encode('Christopher', 1) 'KRSTF1' >>> pe.encode('Niall', 1) 'N11111' >>> pe.encode('Smith', 1) 'SMT111' >>> pe.encode('Schmidt', 1) 'SKMT11' ### Response: def encode(self, word, version=2): word = word.lower() word = .join(c for c in word if c in self._lc_set) def _squeeze_replace(word, char): while char * 2 in word: word = word.replace(char * 2, char) return word.replace(char, char.upper()) if version != 1 and word[-1:] == : word = word[:-1] if word: if word[:5] == : word = + word[5:] if word[:5] == : word = + word[5:] if word[:5] == : word = + word[5:] if word[:6] == : word = + word[6:] if version != 1 and word[:6] == : word = + word[6:] if word[:2] == : word = + word[2:] if word[-2:] == : word = word[:-1] + for src, tar in ( (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), (, ), ): word = word.replace(src, tar) if word[0] in self._lc_v_set: word = + word[1:] for vowel in : word = word.replace(vowel, ) if version != 1: word = word.replace(, ) if word[:2] == : word = + word[2:] if word[:1] == : word = + word[1:] word = word.replace(, ) for src, tar in ((, ), (, ), (, )): word = word.replace(src, tar) for char in : word = _squeeze_replace(word, char) word = word.replace(, ) if version == 1: word = word.replace(, ) word = word.replace(, ) if version == 1: word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) if word[:1] == : word = + word[1:] word = word.replace(, ) word = word.replace(, ) if version == 1: word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) word = word.replace(, ) if version == 1: word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) if version == 1: word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) word = word.replace(, ) if version != 1 and word[-1:] == : word = word[:-1] + word = word.replace(, ) word += * 10 if version != 1: word = word[:10] else: word = word[:6] return word
def find_all_headers( pipeline_files=[], label_rules=None): log.info("find_all_headers - START") headers = ["src_file"] headers_dict = {"src_file": None} if label_rules: headers = ["src_file", "label_value", "label_name"] headers_dict = {"src_file": None, "label_value": None, "label_name": None} for c in pipeline_files: df = pd.read_csv(c) for h in df.columns.values: if h not in headers_dict: headers_dict[h] = "{}_{}".format( c, h) headers.append(h) log.info(("headers={}") .format(len(headers))) log.info("find_all_headers - END") return headers, headers_dict
find_all_headers :param pipeline_files: files to process :param label_rules: labeling rules
### Input: find_all_headers :param pipeline_files: files to process :param label_rules: labeling rules ### Response: def find_all_headers( pipeline_files=[], label_rules=None): log.info("find_all_headers - START") headers = ["src_file"] headers_dict = {"src_file": None} if label_rules: headers = ["src_file", "label_value", "label_name"] headers_dict = {"src_file": None, "label_value": None, "label_name": None} for c in pipeline_files: df = pd.read_csv(c) for h in df.columns.values: if h not in headers_dict: headers_dict[h] = "{}_{}".format( c, h) headers.append(h) log.info(("headers={}") .format(len(headers))) log.info("find_all_headers - END") return headers, headers_dict
def scan_for_spec(keyword): keyword = keyword.lstrip().rstrip() matches = release_line_re.findall(keyword) if matches: return Spec(">={}".format(matches[0])) try: return Spec(keyword) except ValueError: return None
Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived.
### Input: Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived. ### Response: def scan_for_spec(keyword): keyword = keyword.lstrip().rstrip() matches = release_line_re.findall(keyword) if matches: return Spec(">={}".format(matches[0])) try: return Spec(keyword) except ValueError: return None
def operate_selected_rows(self, operator): model, tree_paths = self.selection.get_selected_rows() if not tree_paths: return fs_ids = [] for tree_path in tree_paths: fs_ids.append(model[tree_path][FSID_COL]) for fs_id in fs_ids: row = self.get_row_by_fsid(fs_id) if not row: return operator(row, scan=False) self.check_commit(force=True) self.scan_tasks(ignore_shutdown=True)
对选中的条目进行操作. operator - 处理函数
### Input: 对选中的条目进行操作. operator - 处理函数 ### Response: def operate_selected_rows(self, operator): model, tree_paths = self.selection.get_selected_rows() if not tree_paths: return fs_ids = [] for tree_path in tree_paths: fs_ids.append(model[tree_path][FSID_COL]) for fs_id in fs_ids: row = self.get_row_by_fsid(fs_id) if not row: return operator(row, scan=False) self.check_commit(force=True) self.scan_tasks(ignore_shutdown=True)
def commit(self, commit): c = self.commit if c: if not commit: commit = c[0] if commit in c: self._checkout(treeish=commit)
.. seealso:: :attr:`commit`
### Input: .. seealso:: :attr:`commit` ### Response: def commit(self, commit): c = self.commit if c: if not commit: commit = c[0] if commit in c: self._checkout(treeish=commit)
def get_hash160data(self, rawtx, output_index): tx = deserialize.unsignedtx(rawtx) output_index = deserialize.positive_integer(output_index) data = control.get_hash160_data(tx, output_index) return serialize.data(data)
TODO doc string
### Input: TODO doc string ### Response: def get_hash160data(self, rawtx, output_index): tx = deserialize.unsignedtx(rawtx) output_index = deserialize.positive_integer(output_index) data = control.get_hash160_data(tx, output_index) return serialize.data(data)
def _remove_debug_only(self): LOGGER.debug() for handler in self.config[self.HANDLERS]: if self.DEBUG_ONLY in self.config[self.HANDLERS][handler]: del self.config[self.HANDLERS][handler][self.DEBUG_ONLY]
Iterate through each handler removing the invalid dictConfig key of debug_only.
### Input: Iterate through each handler removing the invalid dictConfig key of debug_only. ### Response: def _remove_debug_only(self): LOGGER.debug() for handler in self.config[self.HANDLERS]: if self.DEBUG_ONLY in self.config[self.HANDLERS][handler]: del self.config[self.HANDLERS][handler][self.DEBUG_ONLY]
def _fix_mutation_df(mutation_df, only_unique=False): orig_len = len(mutation_df) mutation_df = mutation_df[mutation_df.Variant_Classification.isin(variant_snv)] type_len = len(mutation_df) log_msg = ( .format(num_dropped=orig_len-type_len, mut_types=.join(variant_snv))) logger.info(log_msg) valid_nuc_flag = (mutation_df[].apply(is_valid_nuc) & \ mutation_df[].apply(is_valid_nuc)) mutation_df = mutation_df[valid_nuc_flag] mutation_df = mutation_df[mutation_df[].apply(lambda x: len(x)==1)] mutation_df = mutation_df[mutation_df[].apply(lambda x: len(x)==1)] valid_len = len(mutation_df) log_msg = ( .format(num_dropped=type_len-valid_len)) logger.info(log_msg) if only_unique: dup_cols = [, , , , , ] mutation_df = mutation_df.drop_duplicates(subset=dup_cols) dedup_len = len(mutation_df) log_msg = ( .format(num_dropped=valid_len-dedup_len)) logger.info(log_msg) if not in mutation_df.columns: mutation_df[] = if not in mutation_df.columns: mutation_df[] = mutation_df[] = mutation_df[].astype(int) - 1 return mutation_df
Drops invalid mutations and corrects for 1-based coordinates. TODO: Be smarter about what coordinate system is put in the provided mutations. Parameters ---------- mutation_df : pd.DataFrame user provided mutations only_unique : bool flag indicating whether only unique mutations for each tumor sample should be kept. This avoids issues when the same mutation has duplicate reportings. Returns ------- mutation_df : pd.DataFrame mutations filtered for being valid and correct mutation type. Also converted 1-base coordinates to 0-based.
### Input: Drops invalid mutations and corrects for 1-based coordinates. TODO: Be smarter about what coordinate system is put in the provided mutations. Parameters ---------- mutation_df : pd.DataFrame user provided mutations only_unique : bool flag indicating whether only unique mutations for each tumor sample should be kept. This avoids issues when the same mutation has duplicate reportings. Returns ------- mutation_df : pd.DataFrame mutations filtered for being valid and correct mutation type. Also converted 1-base coordinates to 0-based. ### Response: def _fix_mutation_df(mutation_df, only_unique=False): orig_len = len(mutation_df) mutation_df = mutation_df[mutation_df.Variant_Classification.isin(variant_snv)] type_len = len(mutation_df) log_msg = ( .format(num_dropped=orig_len-type_len, mut_types=.join(variant_snv))) logger.info(log_msg) valid_nuc_flag = (mutation_df[].apply(is_valid_nuc) & \ mutation_df[].apply(is_valid_nuc)) mutation_df = mutation_df[valid_nuc_flag] mutation_df = mutation_df[mutation_df[].apply(lambda x: len(x)==1)] mutation_df = mutation_df[mutation_df[].apply(lambda x: len(x)==1)] valid_len = len(mutation_df) log_msg = ( .format(num_dropped=type_len-valid_len)) logger.info(log_msg) if only_unique: dup_cols = [, , , , , ] mutation_df = mutation_df.drop_duplicates(subset=dup_cols) dedup_len = len(mutation_df) log_msg = ( .format(num_dropped=valid_len-dedup_len)) logger.info(log_msg) if not in mutation_df.columns: mutation_df[] = if not in mutation_df.columns: mutation_df[] = mutation_df[] = mutation_df[].astype(int) - 1 return mutation_df
def save_mets(self): log.info("Saving mets " % self.mets_target) if self.automatic_backup: WorkspaceBackupManager(self).add() with open(self.mets_target, ) as f: f.write(self.mets.to_xml(xmllint=True))
Write out the current state of the METS file.
### Input: Write out the current state of the METS file. ### Response: def save_mets(self): log.info("Saving mets " % self.mets_target) if self.automatic_backup: WorkspaceBackupManager(self).add() with open(self.mets_target, ) as f: f.write(self.mets.to_xml(xmllint=True))
def podcast_episodes_iter(self, *, device_id=None, page_size=250): if device_id is None: device_id = self.device_id start_token = None prev_items = None while True: response = self._call( mc_calls.PodcastEpisode, device_id, max_results=page_size, start_token=start_token ) items = response.body.get(, {}).get(, []) if items != prev_items: yield items prev_items = items else: break start_token = response.body.get() if start_token is None: break
Get a paged iterator of podcast episode for all subscribed podcasts. Parameters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Podcast episode dicts.
### Input: Get a paged iterator of podcast episode for all subscribed podcasts. Parameters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Podcast episode dicts. ### Response: def podcast_episodes_iter(self, *, device_id=None, page_size=250): if device_id is None: device_id = self.device_id start_token = None prev_items = None while True: response = self._call( mc_calls.PodcastEpisode, device_id, max_results=page_size, start_token=start_token ) items = response.body.get(, {}).get(, []) if items != prev_items: yield items prev_items = items else: break start_token = response.body.get() if start_token is None: break
def find(self, target, relation): query = for i in self._execute(query, (relation, self.serialize(target))): yield self.deserialize(i[0])
returns back all elements the target has a relation to
### Input: returns back all elements the target has a relation to ### Response: def find(self, target, relation): query = for i in self._execute(query, (relation, self.serialize(target))): yield self.deserialize(i[0])
def incoordination_score(self, data_frame): diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0 duration = math.ceil(data_frame.td[-1]) return inc_s, duration
This method calculates the variance of the time interval in msec between taps :param data_frame: the data frame :type data_frame: pandas.DataFrame :return is: incoordination score :rtype is: float
### Input: This method calculates the variance of the time interval in msec between taps :param data_frame: the data frame :type data_frame: pandas.DataFrame :return is: incoordination score :rtype is: float ### Response: def incoordination_score(self, data_frame): diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0 duration = math.ceil(data_frame.td[-1]) return inc_s, duration
def load_elements(self): atomicData.setDataFile() self.S2_atom = Atom(, 2) self.S3_atom = Atom(, 3) self.Ar3_atom = Atom(, 3) self.Ar4_atom = Atom(, 4) self.N2_atom = Atom(, 2) self.O2_atom = Atom(, 2) self.O3_atom = Atom(, 3) self.H1_atom = RecAtom(, 1) self.He1_atom = RecAtom(, 1) self.He2_atom = RecAtom(, 2) self.diags = Diagnostics() self.logSI_OI_Gradient = random.normal(-1.53, 0.05, size = self.MC_array_len) self.OI_SI = power(10, -self.logSI_OI_Gradient) self.S3_ratio = self.S3_atom.getEmissivity(10000, 100, wave = 9531) / self.S3_atom.getEmissivity(10000, 100, wave = 9069) self.S3_9000_ratio = random.normal(self.S3_atom.getEmissivity(10000, 100, wave = 9531) / self.S3_atom.getEmissivity(10000, 100, wave = 9069), 0.01, size = self.MC_array_len) self.N2_6000_ratio = self.N2_atom.getEmissivity(10000, 100, wave = 6584) / self.N2_atom.getEmissivity(10000, 100, wave = 6548) self.O3_5000_ratio = self.O3_atom.getEmissivity(10000, 100, wave = 5007) / self.O3_atom.getEmissivity(10000, 100, wave = 4959) self.lines_factors = {} self.lines_factors[] = 1 + self.S3_ratio self.lines_factors[] = 1 + 1/self.S3_ratio self.m_SIV_correction = random.normal(1.1628, 0.00559, size = self.MC_array_len) self.n_SIV_correction = random.normal(0.0470, 0.0097, size = self.MC_array_len) self.m_TNII_correction = random.normal(0.762, 0.044, size = self.MC_array_len) self.n_TNII_correction = random.normal(0.239, 0.046, size = self.MC_array_len) lower_trunc, upper_trunc = (1.0 - 50.0) / 25.0, (100 - 50) / 25.0 self.Truncated_gaussian = truncnorm(lower_trunc, upper_trunc, loc = 50, scale = 25) print return
S3: All energy and A values: Podobedova, Kelleher, and Wiese 2009, J. Phys. Chem. Ref. Data, Vol.
### Input: S3: All energy and A values: Podobedova, Kelleher, and Wiese 2009, J. Phys. Chem. Ref. Data, Vol. ### Response: def load_elements(self): atomicData.setDataFile() self.S2_atom = Atom(, 2) self.S3_atom = Atom(, 3) self.Ar3_atom = Atom(, 3) self.Ar4_atom = Atom(, 4) self.N2_atom = Atom(, 2) self.O2_atom = Atom(, 2) self.O3_atom = Atom(, 3) self.H1_atom = RecAtom(, 1) self.He1_atom = RecAtom(, 1) self.He2_atom = RecAtom(, 2) self.diags = Diagnostics() self.logSI_OI_Gradient = random.normal(-1.53, 0.05, size = self.MC_array_len) self.OI_SI = power(10, -self.logSI_OI_Gradient) self.S3_ratio = self.S3_atom.getEmissivity(10000, 100, wave = 9531) / self.S3_atom.getEmissivity(10000, 100, wave = 9069) self.S3_9000_ratio = random.normal(self.S3_atom.getEmissivity(10000, 100, wave = 9531) / self.S3_atom.getEmissivity(10000, 100, wave = 9069), 0.01, size = self.MC_array_len) self.N2_6000_ratio = self.N2_atom.getEmissivity(10000, 100, wave = 6584) / self.N2_atom.getEmissivity(10000, 100, wave = 6548) self.O3_5000_ratio = self.O3_atom.getEmissivity(10000, 100, wave = 5007) / self.O3_atom.getEmissivity(10000, 100, wave = 4959) self.lines_factors = {} self.lines_factors[] = 1 + self.S3_ratio self.lines_factors[] = 1 + 1/self.S3_ratio self.m_SIV_correction = random.normal(1.1628, 0.00559, size = self.MC_array_len) self.n_SIV_correction = random.normal(0.0470, 0.0097, size = self.MC_array_len) self.m_TNII_correction = random.normal(0.762, 0.044, size = self.MC_array_len) self.n_TNII_correction = random.normal(0.239, 0.046, size = self.MC_array_len) lower_trunc, upper_trunc = (1.0 - 50.0) / 25.0, (100 - 50) / 25.0 self.Truncated_gaussian = truncnorm(lower_trunc, upper_trunc, loc = 50, scale = 25) print return
def k_ion(self, E): return self.n_p * _np.power(_spc.e, 2) / (2*_sltr.GeV2joule(E) * _spc.epsilon_0)
Geometric focusing force due to ion column for given plasma density as a function of *E*
### Input: Geometric focusing force due to ion column for given plasma density as a function of *E* ### Response: def k_ion(self, E): return self.n_p * _np.power(_spc.e, 2) / (2*_sltr.GeV2joule(E) * _spc.epsilon_0)
def resize(image, target_size, **kwargs): if isinstance(target_size, int): target_size = (target_size, target_size) if not isinstance(target_size, (list, tuple, np.ndarray)): message = ( "`target_size` should be a single number (width) or a list" "/tuple/ndarray (width, height), not {}.".format(type(target_size)) ) raise ValueError(message) rank = len(image.shape) assert 3 <= rank <= 4 original_size = image.shape[-3:-1] if original_size == target_size: return image ratios = [t / o for t, o in zip(target_size, original_size)] zoom = [1] * rank zoom[-3:-1] = ratios roughly_resized = ndimage.zoom(image, zoom, **kwargs) return roughly_resized[..., : target_size[0], : target_size[1], :]
Resize an ndarray image of rank 3 or 4. target_size can be a tuple `(width, height)` or scalar `width`.
### Input: Resize an ndarray image of rank 3 or 4. target_size can be a tuple `(width, height)` or scalar `width`. ### Response: def resize(image, target_size, **kwargs): if isinstance(target_size, int): target_size = (target_size, target_size) if not isinstance(target_size, (list, tuple, np.ndarray)): message = ( "`target_size` should be a single number (width) or a list" "/tuple/ndarray (width, height), not {}.".format(type(target_size)) ) raise ValueError(message) rank = len(image.shape) assert 3 <= rank <= 4 original_size = image.shape[-3:-1] if original_size == target_size: return image ratios = [t / o for t, o in zip(target_size, original_size)] zoom = [1] * rank zoom[-3:-1] = ratios roughly_resized = ndimage.zoom(image, zoom, **kwargs) return roughly_resized[..., : target_size[0], : target_size[1], :]
def PBToPy(numpb): return PhoneNumber(country_code=numpb.country_code if numpb.HasField("country_code") else None, national_number=numpb.national_number if numpb.HasField("national_number") else None, extension=numpb.extension if numpb.HasField("extension") else None, italian_leading_zero=numpb.italian_leading_zero if numpb.HasField("italian_leading_zero") else None, number_of_leading_zeros=numpb.number_of_leading_zeros if numpb.HasField("number_of_leading_zeros") else None, raw_input=numpb.raw_input if numpb.HasField("raw_input") else None, country_code_source=numpb.country_code_source if numpb.HasField("country_code_source") else CountryCodeSource.UNSPECIFIED, preferred_domestic_carrier_code=numpb.preferred_domestic_carrier_code if numpb.HasField("preferred_domestic_carrier_code") else None)
Convert phonenumber_pb2.PhoneNumber to phonenumber.PhoneNumber
### Input: Convert phonenumber_pb2.PhoneNumber to phonenumber.PhoneNumber ### Response: def PBToPy(numpb): return PhoneNumber(country_code=numpb.country_code if numpb.HasField("country_code") else None, national_number=numpb.national_number if numpb.HasField("national_number") else None, extension=numpb.extension if numpb.HasField("extension") else None, italian_leading_zero=numpb.italian_leading_zero if numpb.HasField("italian_leading_zero") else None, number_of_leading_zeros=numpb.number_of_leading_zeros if numpb.HasField("number_of_leading_zeros") else None, raw_input=numpb.raw_input if numpb.HasField("raw_input") else None, country_code_source=numpb.country_code_source if numpb.HasField("country_code_source") else CountryCodeSource.UNSPECIFIED, preferred_domestic_carrier_code=numpb.preferred_domestic_carrier_code if numpb.HasField("preferred_domestic_carrier_code") else None)
def retries(timeout=30, intervals=1, time=time): start = time() end = start + timeout if isinstance(intervals, Iterable): intervals = iter(intervals) else: intervals = repeat(intervals) return gen_retries(start, end, intervals, time)
Helper for retrying something, sleeping between attempts. Returns a generator that yields ``(elapsed, remaining, wait)`` tuples, giving times in seconds. The last item, `wait`, is the suggested amount of time to sleep before trying again. :param timeout: From now, how long to keep iterating, in seconds. This can be specified as a number, or as an iterable. In the latter case, the iterator is advanced each time an interval is needed. This allows for back-off strategies. :param intervals: The sleep between each iteration, in seconds, an an iterable from which to obtain intervals. :param time: A callable that returns the current time in seconds.
### Input: Helper for retrying something, sleeping between attempts. Returns a generator that yields ``(elapsed, remaining, wait)`` tuples, giving times in seconds. The last item, `wait`, is the suggested amount of time to sleep before trying again. :param timeout: From now, how long to keep iterating, in seconds. This can be specified as a number, or as an iterable. In the latter case, the iterator is advanced each time an interval is needed. This allows for back-off strategies. :param intervals: The sleep between each iteration, in seconds, an an iterable from which to obtain intervals. :param time: A callable that returns the current time in seconds. ### Response: def retries(timeout=30, intervals=1, time=time): start = time() end = start + timeout if isinstance(intervals, Iterable): intervals = iter(intervals) else: intervals = repeat(intervals) return gen_retries(start, end, intervals, time)
def geometry_from_grid(self, grid, buffer=1e-8): y_min = np.min(grid[:, 0]) - buffer y_max = np.max(grid[:, 0]) + buffer x_min = np.min(grid[:, 1]) - buffer x_max = np.max(grid[:, 1]) + buffer pixel_scales = (float((y_max - y_min) / self.shape[0]), float((x_max - x_min) / self.shape[1])) origin = ((y_max + y_min) / 2.0, (x_max + x_min) / 2.0) pixel_neighbors, pixel_neighbors_size = self.neighbors_from_pixelization() return self.Geometry(shape=self.shape, pixel_scales=pixel_scales, origin=origin, pixel_neighbors=pixel_neighbors, pixel_neighbors_size=pixel_neighbors_size)
Determine the geometry of the rectangular grid, by overlaying it over a grid of coordinates such that its \ outer-most pixels align with the grid's outer most coordinates plus a small buffer. Parameters ----------- grid : ndarray The (y,x) grid of coordinates over which the rectangular pixelization is placed to determine its geometry. buffer : float The size the pixelization is buffered relative to the grid.
### Input: Determine the geometry of the rectangular grid, by overlaying it over a grid of coordinates such that its \ outer-most pixels align with the grid's outer most coordinates plus a small buffer. Parameters ----------- grid : ndarray The (y,x) grid of coordinates over which the rectangular pixelization is placed to determine its geometry. buffer : float The size the pixelization is buffered relative to the grid. ### Response: def geometry_from_grid(self, grid, buffer=1e-8): y_min = np.min(grid[:, 0]) - buffer y_max = np.max(grid[:, 0]) + buffer x_min = np.min(grid[:, 1]) - buffer x_max = np.max(grid[:, 1]) + buffer pixel_scales = (float((y_max - y_min) / self.shape[0]), float((x_max - x_min) / self.shape[1])) origin = ((y_max + y_min) / 2.0, (x_max + x_min) / 2.0) pixel_neighbors, pixel_neighbors_size = self.neighbors_from_pixelization() return self.Geometry(shape=self.shape, pixel_scales=pixel_scales, origin=origin, pixel_neighbors=pixel_neighbors, pixel_neighbors_size=pixel_neighbors_size)
def add_marker_to_qtls(qtlfile, mapfile, outputfile=): qtl_list = read_input_file(qtlfile, ) map_list = read_input_file(mapfile, ) if not qtl_list or not map_list: return qtl_list[0].append() qtls = [] qtls.append(qtl_list[0]) for qtl in qtl_list[1:]: qtl.append(add_marker_to_qtl(qtl, map_list)) qtls.append(qtl) LOG.info( % (len(qtls), qtlfile)) write_matrix(outputfile, qtls)
This function adds to a list of QTLs, the closest marker to the QTL peak. :arg qtlfile: a CSV list of all the QTLs found. The file should be structured as follow:: Trait, Linkage group, position, other columns The other columns will not matter as long as the first three columns are as such. :arg mapfile: a CSV representation of the map used for the QTL mapping analysis. The file should be structured as follow:: Marker, Linkage group, position :kwarg outputfile: the name of the output file in which the list of QTLs with their closest marker will be written.
### Input: This function adds to a list of QTLs, the closest marker to the QTL peak. :arg qtlfile: a CSV list of all the QTLs found. The file should be structured as follow:: Trait, Linkage group, position, other columns The other columns will not matter as long as the first three columns are as such. :arg mapfile: a CSV representation of the map used for the QTL mapping analysis. The file should be structured as follow:: Marker, Linkage group, position :kwarg outputfile: the name of the output file in which the list of QTLs with their closest marker will be written. ### Response: def add_marker_to_qtls(qtlfile, mapfile, outputfile=): qtl_list = read_input_file(qtlfile, ) map_list = read_input_file(mapfile, ) if not qtl_list or not map_list: return qtl_list[0].append() qtls = [] qtls.append(qtl_list[0]) for qtl in qtl_list[1:]: qtl.append(add_marker_to_qtl(qtl, map_list)) qtls.append(qtl) LOG.info( % (len(qtls), qtlfile)) write_matrix(outputfile, qtls)
def _get_workflow_with_uuid_or_name(uuid_or_name, user_uuid): from reana_db.models import Workflow if not uuid_or_name: raise ValueError() try: uuid_or_name.encode() except UnicodeEncodeError: raise ValueError(.format(uuid_or_name)) try: is_uuid = UUID( + uuid_or_name + , version=4) except (TypeError, ValueError): is_uuid = None if is_uuid: return _get_workflow_by_uuid(uuid_or_name) else: try: workflow_name, run_number = uuid_or_name.rsplit(, maxsplit=1) except ValueError: return _get_workflow_by_name(uuid_or_name, user_uuid) workflow = Workflow.query.filter( Workflow.name == workflow_name, Workflow.run_number == run_number, Workflow.owner_id == user_uuid).\ one_or_none() if not workflow: raise ValueError( . format(workflow_name, run_number)) return workflow
Get Workflow from database with uuid or name. :param uuid_or_name: String representing a valid UUIDv4 or valid Workflow name. Valid name contains only ASCII alphanumerics. Name might be in format 'reana.workflow.123' with arbitrary number of dot-delimited substrings, where last substring specifies the run number of the workflow this workflow name refers to. If name does not contain a valid run number, but it is a valid name, workflow with latest run number of all the workflows with this name is returned. :type uuid_or_name: String :rtype: reana-db.models.Workflow
### Input: Get Workflow from database with uuid or name. :param uuid_or_name: String representing a valid UUIDv4 or valid Workflow name. Valid name contains only ASCII alphanumerics. Name might be in format 'reana.workflow.123' with arbitrary number of dot-delimited substrings, where last substring specifies the run number of the workflow this workflow name refers to. If name does not contain a valid run number, but it is a valid name, workflow with latest run number of all the workflows with this name is returned. :type uuid_or_name: String :rtype: reana-db.models.Workflow ### Response: def _get_workflow_with_uuid_or_name(uuid_or_name, user_uuid): from reana_db.models import Workflow if not uuid_or_name: raise ValueError() try: uuid_or_name.encode() except UnicodeEncodeError: raise ValueError(.format(uuid_or_name)) try: is_uuid = UUID( + uuid_or_name + , version=4) except (TypeError, ValueError): is_uuid = None if is_uuid: return _get_workflow_by_uuid(uuid_or_name) else: try: workflow_name, run_number = uuid_or_name.rsplit(, maxsplit=1) except ValueError: return _get_workflow_by_name(uuid_or_name, user_uuid) workflow = Workflow.query.filter( Workflow.name == workflow_name, Workflow.run_number == run_number, Workflow.owner_id == user_uuid).\ one_or_none() if not workflow: raise ValueError( . format(workflow_name, run_number)) return workflow
def smartResizeColumnsToContents( self ): self.blockSignals(True) self.setUpdatesEnabled(False) header = self.header() header.blockSignals(True) columns = range(self.columnCount()) sizes = [self.columnWidth(c) for c in columns] header.resizeSections(header.ResizeToContents) for col in columns: width = self.columnWidth(col) if ( width < sizes[col] ): self.setColumnWidth(col, sizes[col]) header.blockSignals(False) self.setUpdatesEnabled(True) self.blockSignals(False)
Resizes the columns to the contents based on the user preferences.
### Input: Resizes the columns to the contents based on the user preferences. ### Response: def smartResizeColumnsToContents( self ): self.blockSignals(True) self.setUpdatesEnabled(False) header = self.header() header.blockSignals(True) columns = range(self.columnCount()) sizes = [self.columnWidth(c) for c in columns] header.resizeSections(header.ResizeToContents) for col in columns: width = self.columnWidth(col) if ( width < sizes[col] ): self.setColumnWidth(col, sizes[col]) header.blockSignals(False) self.setUpdatesEnabled(True) self.blockSignals(False)
def list(self): before, after = self.filename_template.split(, 1) filename_re = re.compile(r % (re.escape(before), re.escape(after))) result = [] for filename in os.listdir(self.path): if filename.endswith(_fs_transaction_suffix): continue match = filename_re.match(filename) if match is not None: result.append(match.group(1)) return result
Lists all sessions in the store. .. versionadded:: 0.6
### Input: Lists all sessions in the store. .. versionadded:: 0.6 ### Response: def list(self): before, after = self.filename_template.split(, 1) filename_re = re.compile(r % (re.escape(before), re.escape(after))) result = [] for filename in os.listdir(self.path): if filename.endswith(_fs_transaction_suffix): continue match = filename_re.match(filename) if match is not None: result.append(match.group(1)) return result