code
stringlengths
70
11.9k
docstring
stringlengths
4
7.08k
text
stringlengths
128
15k
def _adj_include_only(self): if in self.kws: goids = self._get_goids(self.kws[]) if goids: self.kws[] = goids else: raise Exception("NO GO IDs FOUND IN include_only")
Adjust keywords, if needed.
### Input: Adjust keywords, if needed. ### Response: def _adj_include_only(self): if in self.kws: goids = self._get_goids(self.kws[]) if goids: self.kws[] = goids else: raise Exception("NO GO IDs FOUND IN include_only")
def loadFromURL(self, url): if isfile(url) is True: file = open(url, ) else: file = urlopen(url) try: result = self.loadDocument(file) except Exception, ex: file.close() raise ParseError(( %url,) + ex.args) else: file.close() return result
Load an xml file from a URL and return a DOM document.
### Input: Load an xml file from a URL and return a DOM document. ### Response: def loadFromURL(self, url): if isfile(url) is True: file = open(url, ) else: file = urlopen(url) try: result = self.loadDocument(file) except Exception, ex: file.close() raise ParseError(( %url,) + ex.args) else: file.close() return result
def LogHttpAdminUIAccess(self, request, response): event_id = self.GetNewEventId() api_method = response.headers.get("X-API-Method", "unknown") api_reason = response.headers.get("X-GRR-Reason", "none") log_msg = "%s API call [%s] by %s (reason: %s): %s [%d]" % ( event_id, api_method, request.user, api_reason, request.full_path, response.status_code) logging.info(log_msg) if response.headers.get("X-No-Log") != "True": if data_store.RelationalDBEnabled(): entry = rdf_objects.APIAuditEntry.FromHttpRequestResponse( request, response) data_store.REL_DB.WriteAPIAuditEntry(entry)
Log an http based api call. Args: request: A WSGI request object. response: A WSGI response object.
### Input: Log an http based api call. Args: request: A WSGI request object. response: A WSGI response object. ### Response: def LogHttpAdminUIAccess(self, request, response): event_id = self.GetNewEventId() api_method = response.headers.get("X-API-Method", "unknown") api_reason = response.headers.get("X-GRR-Reason", "none") log_msg = "%s API call [%s] by %s (reason: %s): %s [%d]" % ( event_id, api_method, request.user, api_reason, request.full_path, response.status_code) logging.info(log_msg) if response.headers.get("X-No-Log") != "True": if data_store.RelationalDBEnabled(): entry = rdf_objects.APIAuditEntry.FromHttpRequestResponse( request, response) data_store.REL_DB.WriteAPIAuditEntry(entry)
def lookup_class(fully_qualified_name): module_name, class_name = str(fully_qualified_name).rsplit(".", 1) module = __import__(module_name, globals(), locals(), [class_name], 0) Class = getattr(module, class_name) if not inspect.isclass(Class): raise TypeError( "%s is not of type class: %s" % (class_name, type(Class))) return Class
Given its fully qualified name, finds the desired class and imports it. Returns the Class object if found.
### Input: Given its fully qualified name, finds the desired class and imports it. Returns the Class object if found. ### Response: def lookup_class(fully_qualified_name): module_name, class_name = str(fully_qualified_name).rsplit(".", 1) module = __import__(module_name, globals(), locals(), [class_name], 0) Class = getattr(module, class_name) if not inspect.isclass(Class): raise TypeError( "%s is not of type class: %s" % (class_name, type(Class))) return Class
def _convert_template_option(template): option = {} extraction_method = template.get() if extraction_method == : option[] = True elif extraction_method == : option[] = True elif extraction_method == : option[] = True option[] = template.get() option[] = [round(template[], 3), round(template[], 3), round(template[], 3), round(template[], 3)] return option
Convert Tabula app template to tabula-py option Args: template (dict): Tabula app template Returns: `obj`:dict: tabula-py option
### Input: Convert Tabula app template to tabula-py option Args: template (dict): Tabula app template Returns: `obj`:dict: tabula-py option ### Response: def _convert_template_option(template): option = {} extraction_method = template.get() if extraction_method == : option[] = True elif extraction_method == : option[] = True elif extraction_method == : option[] = True option[] = template.get() option[] = [round(template[], 3), round(template[], 3), round(template[], 3), round(template[], 3)] return option
def blurred_image_1d_from_1d_unblurred_and_blurring_images(unblurred_image_1d, blurring_image_1d, convolver): return convolver.convolve_image(image_array=unblurred_image_1d, blurring_array=blurring_image_1d)
For a 1D masked image and 1D blurring image (the regions outside the mask whose light blurs \ into the mask after PSF convolution), use both to compute the blurred image within the mask via PSF convolution. The convolution uses each image's convolver (*See ccd.convolution*). Parameters ---------- unblurred_image_1d : ndarray The 1D masked datas which is blurred. blurring_image_1d : ndarray The 1D masked blurring image which is used for blurring. convolver : ccd.convolution.ConvolverImage The image-convolver which performs the convolution in 1D.
### Input: For a 1D masked image and 1D blurring image (the regions outside the mask whose light blurs \ into the mask after PSF convolution), use both to compute the blurred image within the mask via PSF convolution. The convolution uses each image's convolver (*See ccd.convolution*). Parameters ---------- unblurred_image_1d : ndarray The 1D masked datas which is blurred. blurring_image_1d : ndarray The 1D masked blurring image which is used for blurring. convolver : ccd.convolution.ConvolverImage The image-convolver which performs the convolution in 1D. ### Response: def blurred_image_1d_from_1d_unblurred_and_blurring_images(unblurred_image_1d, blurring_image_1d, convolver): return convolver.convolve_image(image_array=unblurred_image_1d, blurring_array=blurring_image_1d)
def _get_dacl(path, objectType): try: dacl = win32security.GetNamedSecurityInfo( path, objectType, win32security.DACL_SECURITY_INFORMATION ).GetSecurityDescriptorDacl() except Exception: dacl = None return dacl
Gets the DACL of a path
### Input: Gets the DACL of a path ### Response: def _get_dacl(path, objectType): try: dacl = win32security.GetNamedSecurityInfo( path, objectType, win32security.DACL_SECURITY_INFORMATION ).GetSecurityDescriptorDacl() except Exception: dacl = None return dacl
def _check_log_level(self, level): if level not in list(self.level_dict.keys()): self.log_level = self.logger.warn("Unknown log level , defaulting to DEBUG" .format(lev=level))
Ensures a valid log level @param level: the asked for level
### Input: Ensures a valid log level @param level: the asked for level ### Response: def _check_log_level(self, level): if level not in list(self.level_dict.keys()): self.log_level = self.logger.warn("Unknown log level , defaulting to DEBUG" .format(lev=level))
def list_functions(*args, **kwargs): ****sys.list_**module.specific_function if not args: for func in __salt__: if func.startswith(moduledot): names.add(func) return sorted(names)
List the functions for all modules. Optionally, specify a module or modules from which to list. CLI Example: .. code-block:: bash salt '*' sys.list_functions salt '*' sys.list_functions sys salt '*' sys.list_functions sys user Function names can be specified as globs. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' sys.list_functions 'sys.list_*' .. versionadded:: ? .. code-block:: bash salt '*' sys.list_functions 'module.specific_function'
### Input: List the functions for all modules. Optionally, specify a module or modules from which to list. CLI Example: .. code-block:: bash salt '*' sys.list_functions salt '*' sys.list_functions sys salt '*' sys.list_functions sys user Function names can be specified as globs. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' sys.list_functions 'sys.list_*' .. versionadded:: ? .. code-block:: bash salt '*' sys.list_functions 'module.specific_function' ### Response: def list_functions(*args, **kwargs): ****sys.list_**module.specific_function if not args: for func in __salt__: if func.startswith(moduledot): names.add(func) return sorted(names)
def _get_popularity_baseline(self): response = self.__proxy__.get_popularity_baseline() from .popularity_recommender import PopularityRecommender return PopularityRecommender(response)
Returns a new popularity model matching the data set this model was trained with. Can be used for comparison purposes.
### Input: Returns a new popularity model matching the data set this model was trained with. Can be used for comparison purposes. ### Response: def _get_popularity_baseline(self): response = self.__proxy__.get_popularity_baseline() from .popularity_recommender import PopularityRecommender return PopularityRecommender(response)
def render_vars(self): return { : [ { : record.getMessage(), : dt.datetime.fromtimestamp(record.created).strftime(), } for record in self.handler.records ] }
Template variables.
### Input: Template variables. ### Response: def render_vars(self): return { : [ { : record.getMessage(), : dt.datetime.fromtimestamp(record.created).strftime(), } for record in self.handler.records ] }
def snapshots_get(container, name, remote_addr=None, cert=None, key=None, verify_cert=True): certkeyremote_addr* container = container_get( container, remote_addr, cert, key, verify_cert, _raw=True ) return container.snapshots.get(name)
Get information about snapshot for a container container : The name of the container to get. name : The name of the snapshot. remote_addr : An URL to a remote server. The 'cert' and 'key' fields must also be provided if 'remote_addr' is defined. Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Verify the ssl certificate. Default: True CLI Examples: .. code-block:: bash $ salt '*' lxd.snapshots_get test-container test-snapshot
### Input: Get information about snapshot for a container container : The name of the container to get. name : The name of the snapshot. remote_addr : An URL to a remote server. The 'cert' and 'key' fields must also be provided if 'remote_addr' is defined. Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Verify the ssl certificate. Default: True CLI Examples: .. code-block:: bash $ salt '*' lxd.snapshots_get test-container test-snapshot ### Response: def snapshots_get(container, name, remote_addr=None, cert=None, key=None, verify_cert=True): certkeyremote_addr* container = container_get( container, remote_addr, cert, key, verify_cert, _raw=True ) return container.snapshots.get(name)
def remove_declaration(self, decl): del self.declarations[self.declarations.index(decl)] decl.cache.reset()
Removes declaration from members list. :param decl: declaration to be removed :type decl: :class:`declaration_t`
### Input: Removes declaration from members list. :param decl: declaration to be removed :type decl: :class:`declaration_t` ### Response: def remove_declaration(self, decl): del self.declarations[self.declarations.index(decl)] decl.cache.reset()
def fix_style(style=, ax=None, **kwargs): t be changed directly in matplotlib.rcParams or with styles. Apply this function to every axe you created. Parameters ---------- ax: a matplotlib axe. If None, the last axe generated is used style: string or list of string [, , , ,,] one of the styles previously defined. It should match the style you chose in set_style but nothing forces you to. kwargs: dict edit any of the style_params keys. ex: >>> tight_layout=False Examples -------- plb.set_style() plt.plot(a,np.cos(a)) plb.fix_style(,**{:False}) See Also -------- :func:`~publib.publib.set_style` :func:`~publib.tools.tools.reset_defaults` .mplstyle.mplstyle{0} is not a valid style. Please pick a style from the list available in {0}: {1}'.format(_get_lib(), avail)) _fix_style(style, ax, **kwargs)
Add an extra formatting layer to an axe, that couldn't be changed directly in matplotlib.rcParams or with styles. Apply this function to every axe you created. Parameters ---------- ax: a matplotlib axe. If None, the last axe generated is used style: string or list of string ['basic', 'article', 'poster', 'B&W','talk','origin'] one of the styles previously defined. It should match the style you chose in set_style but nothing forces you to. kwargs: dict edit any of the style_params keys. ex: >>> tight_layout=False Examples -------- plb.set_style('poster') plt.plot(a,np.cos(a)) plb.fix_style('poster',**{'draggable_legend':False}) See Also -------- :func:`~publib.publib.set_style` :func:`~publib.tools.tools.reset_defaults`
### Input: Add an extra formatting layer to an axe, that couldn't be changed directly in matplotlib.rcParams or with styles. Apply this function to every axe you created. Parameters ---------- ax: a matplotlib axe. If None, the last axe generated is used style: string or list of string ['basic', 'article', 'poster', 'B&W','talk','origin'] one of the styles previously defined. It should match the style you chose in set_style but nothing forces you to. kwargs: dict edit any of the style_params keys. ex: >>> tight_layout=False Examples -------- plb.set_style('poster') plt.plot(a,np.cos(a)) plb.fix_style('poster',**{'draggable_legend':False}) See Also -------- :func:`~publib.publib.set_style` :func:`~publib.tools.tools.reset_defaults` ### Response: def fix_style(style=, ax=None, **kwargs): t be changed directly in matplotlib.rcParams or with styles. Apply this function to every axe you created. Parameters ---------- ax: a matplotlib axe. If None, the last axe generated is used style: string or list of string [, , , ,,] one of the styles previously defined. It should match the style you chose in set_style but nothing forces you to. kwargs: dict edit any of the style_params keys. ex: >>> tight_layout=False Examples -------- plb.set_style() plt.plot(a,np.cos(a)) plb.fix_style(,**{:False}) See Also -------- :func:`~publib.publib.set_style` :func:`~publib.tools.tools.reset_defaults` .mplstyle.mplstyle{0} is not a valid style. Please pick a style from the list available in {0}: {1}'.format(_get_lib(), avail)) _fix_style(style, ax, **kwargs)
def replace_namespaced_cron_job(self, name, namespace, body, **kwargs): kwargs[] = True if kwargs.get(): return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) return data
replace_namespaced_cron_job # noqa: E501 replace the specified CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_cron_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V2alpha1CronJob body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V2alpha1CronJob If the method is called asynchronously, returns the request thread.
### Input: replace_namespaced_cron_job # noqa: E501 replace the specified CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_cron_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V2alpha1CronJob body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V2alpha1CronJob If the method is called asynchronously, returns the request thread. ### Response: def replace_namespaced_cron_job(self, name, namespace, body, **kwargs): kwargs[] = True if kwargs.get(): return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) return data
def flow_rect_weir(Height, Width): ut.check_range([Height, ">0", "Height"], [Width, ">0", "Width"]) return ((2/3) * con.VC_ORIFICE_RATIO * (np.sqrt(2*gravity.magnitude) * Height**(3/2)) * Width)
Return the flow of a rectangular weir.
### Input: Return the flow of a rectangular weir. ### Response: def flow_rect_weir(Height, Width): ut.check_range([Height, ">0", "Height"], [Width, ">0", "Width"]) return ((2/3) * con.VC_ORIFICE_RATIO * (np.sqrt(2*gravity.magnitude) * Height**(3/2)) * Width)
def disable_stdout_buffering(): stdout_orig = sys.stdout sys.stdout = os.fdopen(sys.stdout.fileno(), , 0) return stdout_orig
This turns off stdout buffering so that outputs are immediately materialized and log messages show up before the program exits
### Input: This turns off stdout buffering so that outputs are immediately materialized and log messages show up before the program exits ### Response: def disable_stdout_buffering(): stdout_orig = sys.stdout sys.stdout = os.fdopen(sys.stdout.fileno(), , 0) return stdout_orig
def read_json(path, default=None, fatal=True, logger=None): path = resolved_path(path) if not path or not os.path.exists(path): if default is None: return abort("No file %s", short(path), fatal=(fatal, default)) return default try: with io.open(path, "rt") as fh: data = json.load(fh) if default is not None and type(data) != type(default): return abort("Wrong type %s for %s, expecting %s", type(data), short(path), type(default), fatal=(fatal, default)) if logger: logger("Read %s", short(path)) return data except Exception as e: return abort("Couldn't read %s: %s", short(path), e, fatal=(fatal, default))
:param str|None path: Path to file to deserialize :param dict|list|None default: Default if file is not present, or if it's not json :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :return dict|list: Deserialized data from file
### Input: :param str|None path: Path to file to deserialize :param dict|list|None default: Default if file is not present, or if it's not json :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :return dict|list: Deserialized data from file ### Response: def read_json(path, default=None, fatal=True, logger=None): path = resolved_path(path) if not path or not os.path.exists(path): if default is None: return abort("No file %s", short(path), fatal=(fatal, default)) return default try: with io.open(path, "rt") as fh: data = json.load(fh) if default is not None and type(data) != type(default): return abort("Wrong type %s for %s, expecting %s", type(data), short(path), type(default), fatal=(fatal, default)) if logger: logger("Read %s", short(path)) return data except Exception as e: return abort("Couldn't read %s: %s", short(path), e, fatal=(fatal, default))
def as4_capability(self, **kwargs): enabled = kwargs.pop(, True) callback = kwargs.pop(, self._callback) if not isinstance(enabled, bool): raise ValueError( % repr(enabled)) as4_capability_args = dict(vrf_name=kwargs.pop(, ), rbridge_id=kwargs.pop(, )) as4_capability = getattr(self._rbridge, ) config = as4_capability(**as4_capability_args) if not enabled: capability = config.find() capability.set(, ) return callback(config)
Set Spanning Tree state. Args: enabled (bool): Is AS4 Capability enabled? (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: ValueError: if `enabled` are invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.local_asn(local_as='65535', ... rbridge_id='225') ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=True) ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=False)
### Input: Set Spanning Tree state. Args: enabled (bool): Is AS4 Capability enabled? (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: ValueError: if `enabled` are invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.local_asn(local_as='65535', ... rbridge_id='225') ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=True) ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=False) ### Response: def as4_capability(self, **kwargs): enabled = kwargs.pop(, True) callback = kwargs.pop(, self._callback) if not isinstance(enabled, bool): raise ValueError( % repr(enabled)) as4_capability_args = dict(vrf_name=kwargs.pop(, ), rbridge_id=kwargs.pop(, )) as4_capability = getattr(self._rbridge, ) config = as4_capability(**as4_capability_args) if not enabled: capability = config.find() capability.set(, ) return callback(config)
def variable(dims=1): if dims == 1: return Poly({(1,): 1}, dim=1, shape=()) return Poly({ tuple(indices): indices for indices in numpy.eye(dims, dtype=int) }, dim=dims, shape=(dims,))
Simple constructor to create single variables to create polynomials. Args: dims (int): Number of dimensions in the array. Returns: (Poly): Polynomial array with unit components in each dimension. Examples: >>> print(variable()) q0 >>> print(variable(3)) [q0, q1, q2]
### Input: Simple constructor to create single variables to create polynomials. Args: dims (int): Number of dimensions in the array. Returns: (Poly): Polynomial array with unit components in each dimension. Examples: >>> print(variable()) q0 >>> print(variable(3)) [q0, q1, q2] ### Response: def variable(dims=1): if dims == 1: return Poly({(1,): 1}, dim=1, shape=()) return Poly({ tuple(indices): indices for indices in numpy.eye(dims, dtype=int) }, dim=dims, shape=(dims,))
def setWritePrivs(fname, makeWritable, ignoreErrors=False): privs = os.stat(fname).st_mode try: if makeWritable: os.chmod(fname, privs | stat.S_IWUSR) else: os.chmod(fname, privs & (~ stat.S_IWUSR)) except OSError: if ignoreErrors: pass else: raise
Set a file named fname to be writable (or not) by user, with the option to ignore errors. There is nothing ground-breaking here, but I was annoyed with having to repeate this little bit of code.
### Input: Set a file named fname to be writable (or not) by user, with the option to ignore errors. There is nothing ground-breaking here, but I was annoyed with having to repeate this little bit of code. ### Response: def setWritePrivs(fname, makeWritable, ignoreErrors=False): privs = os.stat(fname).st_mode try: if makeWritable: os.chmod(fname, privs | stat.S_IWUSR) else: os.chmod(fname, privs & (~ stat.S_IWUSR)) except OSError: if ignoreErrors: pass else: raise
def get_wififirmware(self,callb=None): if self.wifi_firmware_version is None: mypartial=partial(self.resp_set_wififirmware) if callb: mycallb=lambda x,y:(mypartial(y),callb(x,y)) else: mycallb=lambda x,y:mypartial(y) response = self.req_with_resp(GetWifiFirmware, StateWifiFirmware,mycallb ) return (self.wifi_firmware_version,self.wifi_firmware_build_timestamp)
Convenience method to request the wifi firmware info from the device This method will check whether the value has already been retrieved from the device, if so, it will simply return it. If no, it will request the information from the device and request that callb be executed when a response is received. The default callback will simply cache the value. :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: The cached value (version, timestamp) :rtype: 2-tuple
### Input: Convenience method to request the wifi firmware info from the device This method will check whether the value has already been retrieved from the device, if so, it will simply return it. If no, it will request the information from the device and request that callb be executed when a response is received. The default callback will simply cache the value. :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: The cached value (version, timestamp) :rtype: 2-tuple ### Response: def get_wififirmware(self,callb=None): if self.wifi_firmware_version is None: mypartial=partial(self.resp_set_wififirmware) if callb: mycallb=lambda x,y:(mypartial(y),callb(x,y)) else: mycallb=lambda x,y:mypartial(y) response = self.req_with_resp(GetWifiFirmware, StateWifiFirmware,mycallb ) return (self.wifi_firmware_version,self.wifi_firmware_build_timestamp)
def load(self): mod = import_module(self.module_name) obj = mod if self.object_name: for attr in self.object_name.split(): obj = getattr(obj, attr) return obj
Load the object to which this entry point refers.
### Input: Load the object to which this entry point refers. ### Response: def load(self): mod = import_module(self.module_name) obj = mod if self.object_name: for attr in self.object_name.split(): obj = getattr(obj, attr) return obj
def load_private_key(source, password=None): if isinstance(source, keys.PrivateKeyInfo): private_object = source else: if password is not None: if isinstance(password, str_cls): password = password.encode() if not isinstance(password, byte_cls): raise TypeError(pretty_message( , type_name(password) )) if isinstance(source, str_cls): with open(source, ) as f: source = f.read() elif not isinstance(source, byte_cls): raise TypeError(pretty_message( , type_name(source) )) private_object = parse_private(source, password) return _load_key(private_object)
Loads a private key into a PrivateKey object :param source: A byte string of file contents, a unicode string filename or an asn1crypto.keys.PrivateKeyInfo object :param password: A byte or unicode string to decrypt the private key file. Unicode strings will be encoded using UTF-8. Not used is the source is a PrivateKeyInfo object. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type oscrypto.errors.AsymmetricKeyError - when the private key is incompatible with the OS crypto library OSError - when an error is returned by the OS crypto library :return: A PrivateKey object
### Input: Loads a private key into a PrivateKey object :param source: A byte string of file contents, a unicode string filename or an asn1crypto.keys.PrivateKeyInfo object :param password: A byte or unicode string to decrypt the private key file. Unicode strings will be encoded using UTF-8. Not used is the source is a PrivateKeyInfo object. :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type oscrypto.errors.AsymmetricKeyError - when the private key is incompatible with the OS crypto library OSError - when an error is returned by the OS crypto library :return: A PrivateKey object ### Response: def load_private_key(source, password=None): if isinstance(source, keys.PrivateKeyInfo): private_object = source else: if password is not None: if isinstance(password, str_cls): password = password.encode() if not isinstance(password, byte_cls): raise TypeError(pretty_message( , type_name(password) )) if isinstance(source, str_cls): with open(source, ) as f: source = f.read() elif not isinstance(source, byte_cls): raise TypeError(pretty_message( , type_name(source) )) private_object = parse_private(source, password) return _load_key(private_object)
def read(self, n): while len(self.pool) < n: self.cur = self.files.next() self.pool = numpy.append(self.pool, self.fetch(self.cur), axis=0) rt = self.pool[:n] if n == len(self.pool): self.pool = self.fetch(None) else: self.pool = self.pool[n:] return rt
return at most n array items, move the cursor.
### Input: return at most n array items, move the cursor. ### Response: def read(self, n): while len(self.pool) < n: self.cur = self.files.next() self.pool = numpy.append(self.pool, self.fetch(self.cur), axis=0) rt = self.pool[:n] if n == len(self.pool): self.pool = self.fetch(None) else: self.pool = self.pool[n:] return rt
def openEmbedded(self, name): ndx = self.toc.find(name) if ndx == -1: raise KeyError, "Member not found in %s" % (name, self.path) (dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx) if flag: raise ValueError, "Cannot open compressed archive %s in place" return CArchive(self.path, self.pkgstart+dpos, dlen)
Open a CArchive of name NAME embedded within this CArchive.
### Input: Open a CArchive of name NAME embedded within this CArchive. ### Response: def openEmbedded(self, name): ndx = self.toc.find(name) if ndx == -1: raise KeyError, "Member not found in %s" % (name, self.path) (dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx) if flag: raise ValueError, "Cannot open compressed archive %s in place" return CArchive(self.path, self.pkgstart+dpos, dlen)
def compute_sensori_effect(self, m): if self.use_basis_functions: func = simulation.step(m, 1000*self.dt) else: traj = self.bf.trajectory(m).flatten() func = lambda t: traj[int(1000*self.dt * t)] states = simulation.simulate(self.n, self.x0, self.dt, func) last_state_cartesian = simulation.cartesian(self.n, states[-1]) end_effector_pos = array([last_state_cartesian[self.n], last_state_cartesian[2*self.n]]) end_effector_pos += self.noise * random.randn(len(end_effector_pos)) return end_effector_pos
This function generates the end effector position at the end of the movement. .. note:: The duration of the movement is 1000*self.dt .. note:: To use basis functions rather than step functions, set :use_basis_functions: to 1
### Input: This function generates the end effector position at the end of the movement. .. note:: The duration of the movement is 1000*self.dt .. note:: To use basis functions rather than step functions, set :use_basis_functions: to 1 ### Response: def compute_sensori_effect(self, m): if self.use_basis_functions: func = simulation.step(m, 1000*self.dt) else: traj = self.bf.trajectory(m).flatten() func = lambda t: traj[int(1000*self.dt * t)] states = simulation.simulate(self.n, self.x0, self.dt, func) last_state_cartesian = simulation.cartesian(self.n, states[-1]) end_effector_pos = array([last_state_cartesian[self.n], last_state_cartesian[2*self.n]]) end_effector_pos += self.noise * random.randn(len(end_effector_pos)) return end_effector_pos
def calculate_frequencies(data, ndim: int, binnings, weights=None, dtype=None) -> Tuple[np.ndarray, np.ndarray, float]: if data is not None: data = np.asarray(data) if data.ndim != 2: raise RuntimeError("histogram_nd.calculate_frequencies requires 2D input data.") if weights is None: if not dtype: dtype = np.int64 if data is not None: weights = np.ones(data.shape[0], dtype=dtype) else: weights = np.asarray(weights) if data is None: raise RuntimeError("Weights specified but data not.") else: if data.shape[0] != weights.shape[0]: raise RuntimeError("Different number of entries in data and weights.") if dtype: dtype = np.dtype(dtype) if dtype.kind in "iu" and weights.dtype.kind == "f": raise RuntimeError("Integer histogram requested but float weights entered.") else: dtype = weights.dtype edges_and_mask = [binning.numpy_bins_with_mask for binning in binnings] edges = [em[0] for em in edges_and_mask] masks = [em[1] for em in edges_and_mask] ixgrid = np.ix_(*masks) if data.shape[0]: frequencies, _ = np.histogramdd(data, edges, weights=weights) frequencies = frequencies.astype(dtype) frequencies = frequencies[ixgrid] missing = weights.sum() - frequencies.sum() err_freq, _ = np.histogramdd(data, edges, weights=weights ** 2) errors2 = err_freq[ixgrid].astype(dtype) else: frequencies = None missing = 0 errors2 = None return frequencies, errors2, missing
Get frequencies and bin errors from the data (n-dimensional variant). Parameters ---------- data : array_like 2D array with ndim columns and row for each entry. ndim : int Dimensionality od the data. binnings: Binnings to apply in all axes. weights : Optional[array_like] 1D array of weights to assign to values. (If present, must have same length as the number of rows.) dtype : Optional[type] Underlying type for the histogram. (If weights are specified, default is float. Otherwise int64.) Returns ------- frequencies : array_like errors2 : array_like missing : scalar[dtype]
### Input: Get frequencies and bin errors from the data (n-dimensional variant). Parameters ---------- data : array_like 2D array with ndim columns and row for each entry. ndim : int Dimensionality od the data. binnings: Binnings to apply in all axes. weights : Optional[array_like] 1D array of weights to assign to values. (If present, must have same length as the number of rows.) dtype : Optional[type] Underlying type for the histogram. (If weights are specified, default is float. Otherwise int64.) Returns ------- frequencies : array_like errors2 : array_like missing : scalar[dtype] ### Response: def calculate_frequencies(data, ndim: int, binnings, weights=None, dtype=None) -> Tuple[np.ndarray, np.ndarray, float]: if data is not None: data = np.asarray(data) if data.ndim != 2: raise RuntimeError("histogram_nd.calculate_frequencies requires 2D input data.") if weights is None: if not dtype: dtype = np.int64 if data is not None: weights = np.ones(data.shape[0], dtype=dtype) else: weights = np.asarray(weights) if data is None: raise RuntimeError("Weights specified but data not.") else: if data.shape[0] != weights.shape[0]: raise RuntimeError("Different number of entries in data and weights.") if dtype: dtype = np.dtype(dtype) if dtype.kind in "iu" and weights.dtype.kind == "f": raise RuntimeError("Integer histogram requested but float weights entered.") else: dtype = weights.dtype edges_and_mask = [binning.numpy_bins_with_mask for binning in binnings] edges = [em[0] for em in edges_and_mask] masks = [em[1] for em in edges_and_mask] ixgrid = np.ix_(*masks) if data.shape[0]: frequencies, _ = np.histogramdd(data, edges, weights=weights) frequencies = frequencies.astype(dtype) frequencies = frequencies[ixgrid] missing = weights.sum() - frequencies.sum() err_freq, _ = np.histogramdd(data, edges, weights=weights ** 2) errors2 = err_freq[ixgrid].astype(dtype) else: frequencies = None missing = 0 errors2 = None return frequencies, errors2, missing
def microsoft(self, key, x86=False): node64 = if self.pi.current_is_x86() or x86 else return os.path.join(, node64, , key)
Return key in Microsoft software registry. Parameters ---------- key: str Registry key path where look. x86: str Force x86 software registry. Return ------ str: value
### Input: Return key in Microsoft software registry. Parameters ---------- key: str Registry key path where look. x86: str Force x86 software registry. Return ------ str: value ### Response: def microsoft(self, key, x86=False): node64 = if self.pi.current_is_x86() or x86 else return os.path.join(, node64, , key)
def extract_subsection(im, shape): r shape = sp.array(shape) if shape[0] < 1: shape = sp.array(im.shape) * shape center = sp.array(im.shape) / 2 s_im = [] for dim in range(im.ndim): r = shape[dim] / 2 lower_im = sp.amax((center[dim] - r, 0)) upper_im = sp.amin((center[dim] + r, im.shape[dim])) s_im.append(slice(int(lower_im), int(upper_im))) return im[tuple(s_im)]
r""" Extracts the middle section of a image Parameters ---------- im : ND-array Image from which to extract the subsection shape : array_like Can either specify the size of the extracted section or the fractional size of the image to extact. Returns ------- image : ND-array An ND-array of size given by the ``shape`` argument, taken from the center of the image. Examples -------- >>> import scipy as sp >>> from porespy.tools import extract_subsection >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]]) >>> print(im) [[1 1 1 1] [1 2 2 2] [1 2 3 3] [1 2 3 4]] >>> im = extract_subsection(im=im, shape=[2, 2]) >>> print(im) [[2 2] [2 3]]
### Input: r""" Extracts the middle section of a image Parameters ---------- im : ND-array Image from which to extract the subsection shape : array_like Can either specify the size of the extracted section or the fractional size of the image to extact. Returns ------- image : ND-array An ND-array of size given by the ``shape`` argument, taken from the center of the image. Examples -------- >>> import scipy as sp >>> from porespy.tools import extract_subsection >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]]) >>> print(im) [[1 1 1 1] [1 2 2 2] [1 2 3 3] [1 2 3 4]] >>> im = extract_subsection(im=im, shape=[2, 2]) >>> print(im) [[2 2] [2 3]] ### Response: def extract_subsection(im, shape): r shape = sp.array(shape) if shape[0] < 1: shape = sp.array(im.shape) * shape center = sp.array(im.shape) / 2 s_im = [] for dim in range(im.ndim): r = shape[dim] / 2 lower_im = sp.amax((center[dim] - r, 0)) upper_im = sp.amin((center[dim] + r, im.shape[dim])) s_im.append(slice(int(lower_im), int(upper_im))) return im[tuple(s_im)]
def get_params(self, *keys): if len(keys) == 0: return vars(self) else: return [vars(self)[k] for k in keys]
Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary
### Input: Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary ### Response: def get_params(self, *keys): if len(keys) == 0: return vars(self) else: return [vars(self)[k] for k in keys]
def set_image(self, img): with warnings.catch_warnings(): warnings.simplefilter() if hasattr(img, ): (width, height) = (img.shape[1], img.shape[0]) self._bmp = wx.BitmapFromBuffer(width, height, np.uint8(img)) elif hasattr(img, ): self._bmp = wx.BitmapFromImage(img) else: print("Unsupported image type: %s" % type(img)) return self.SetMinSize((self._bmp.GetWidth(), self._bmp.GetHeight()))
set the image to be displayed
### Input: set the image to be displayed ### Response: def set_image(self, img): with warnings.catch_warnings(): warnings.simplefilter() if hasattr(img, ): (width, height) = (img.shape[1], img.shape[0]) self._bmp = wx.BitmapFromBuffer(width, height, np.uint8(img)) elif hasattr(img, ): self._bmp = wx.BitmapFromImage(img) else: print("Unsupported image type: %s" % type(img)) return self.SetMinSize((self._bmp.GetWidth(), self._bmp.GetHeight()))
def forward(self, inputs: torch.Tensor, word_inputs: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]: if self._word_embedding is not None and word_inputs is not None: try: mask_without_bos_eos = (word_inputs > 0).long() embedded_inputs = self._word_embedding(word_inputs) type_representation, mask = add_sentence_boundary_token_ids( embedded_inputs, mask_without_bos_eos, self._bos_embedding, self._eos_embedding ) except RuntimeError: token_embedding = self._token_embedder(inputs) mask = token_embedding[] type_representation = token_embedding[] else: token_embedding = self._token_embedder(inputs) mask = token_embedding[] type_representation = token_embedding[] lstm_outputs = self._elmo_lstm(type_representation, mask) output_tensors = [ torch.cat([type_representation, type_representation], dim=-1) * mask.float().unsqueeze(-1) ] for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0): output_tensors.append(layer_activations.squeeze(0)) return { : output_tensors, : mask, }
Parameters ---------- inputs: ``torch.Tensor``, required. Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch. word_inputs : ``torch.Tensor``, required. If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``, which represent word ids which have been pre-cached. Returns ------- Dict with keys: ``'activations'``: ``List[torch.Tensor]`` A list of activations at each layer of the network, each of shape ``(batch_size, timesteps + 2, embedding_dim)`` ``'mask'``: ``torch.Tensor`` Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask. Note that the output tensors all include additional special begin and end of sequence markers.
### Input: Parameters ---------- inputs: ``torch.Tensor``, required. Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch. word_inputs : ``torch.Tensor``, required. If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``, which represent word ids which have been pre-cached. Returns ------- Dict with keys: ``'activations'``: ``List[torch.Tensor]`` A list of activations at each layer of the network, each of shape ``(batch_size, timesteps + 2, embedding_dim)`` ``'mask'``: ``torch.Tensor`` Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask. Note that the output tensors all include additional special begin and end of sequence markers. ### Response: def forward(self, inputs: torch.Tensor, word_inputs: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]: if self._word_embedding is not None and word_inputs is not None: try: mask_without_bos_eos = (word_inputs > 0).long() embedded_inputs = self._word_embedding(word_inputs) type_representation, mask = add_sentence_boundary_token_ids( embedded_inputs, mask_without_bos_eos, self._bos_embedding, self._eos_embedding ) except RuntimeError: token_embedding = self._token_embedder(inputs) mask = token_embedding[] type_representation = token_embedding[] else: token_embedding = self._token_embedder(inputs) mask = token_embedding[] type_representation = token_embedding[] lstm_outputs = self._elmo_lstm(type_representation, mask) output_tensors = [ torch.cat([type_representation, type_representation], dim=-1) * mask.float().unsqueeze(-1) ] for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0): output_tensors.append(layer_activations.squeeze(0)) return { : output_tensors, : mask, }
def merge_logs(dataset, sort=True): ourlog = {} ourlog[] = [] for d in dataset: ourlog[] = ourlog[] + d[] if sort: ourlog[].sort(key= lambda x: x[]) return ourlog
Merge log dictionaries together into one log dictionary
### Input: Merge log dictionaries together into one log dictionary ### Response: def merge_logs(dataset, sort=True): ourlog = {} ourlog[] = [] for d in dataset: ourlog[] = ourlog[] + d[] if sort: ourlog[].sort(key= lambda x: x[]) return ourlog
def create_zone(args): action = True password = get_password(args) token = connect.get_token(args.username, password, args.server) domain = args.domain template = args.domain.replace(, ) master = None dnsaddr = args.dnsaddr if args.__dict__.get(): dtype = master = dnsaddr elif args.__dict__.get(): dtype = else: dtype = o = JSONConverter(domain) o.generate_template(domain, dnsaddr, desc=) processing.create_template(args.server, token, template, o.record) processing.create_zone(args.server, token, domain, template, dtype, master) processing.delete_template(args.server, token, template)
Create zone. Argument: args: arguments object
### Input: Create zone. Argument: args: arguments object ### Response: def create_zone(args): action = True password = get_password(args) token = connect.get_token(args.username, password, args.server) domain = args.domain template = args.domain.replace(, ) master = None dnsaddr = args.dnsaddr if args.__dict__.get(): dtype = master = dnsaddr elif args.__dict__.get(): dtype = else: dtype = o = JSONConverter(domain) o.generate_template(domain, dnsaddr, desc=) processing.create_template(args.server, token, template, o.record) processing.create_zone(args.server, token, domain, template, dtype, master) processing.delete_template(args.server, token, template)
def info(ctx): controller = ctx.obj[] if controller.is_fips: click.echo(.format( if controller.is_in_fips_mode else )) else: if controller.has_pin: try: click.echo( .format( controller.get_pin_retries())) except CtapError as e: if e.code == CtapError.ERR.PIN_BLOCKED: click.echo() else: click.echo()
Display status of FIDO2 application.
### Input: Display status of FIDO2 application. ### Response: def info(ctx): controller = ctx.obj[] if controller.is_fips: click.echo(.format( if controller.is_in_fips_mode else )) else: if controller.has_pin: try: click.echo( .format( controller.get_pin_retries())) except CtapError as e: if e.code == CtapError.ERR.PIN_BLOCKED: click.echo() else: click.echo()
def resize(self, size): result = self._client.post(.format(Volume.api_endpoint, model=self, data={ "size": size })) self._populate(result.json) return True
Resizes this Volume
### Input: Resizes this Volume ### Response: def resize(self, size): result = self._client.post(.format(Volume.api_endpoint, model=self, data={ "size": size })) self._populate(result.json) return True
def send_message(self, msg_dict): with self._writer_lock: try: self.output_stream.flush() self.output_stream.write(self.serialize_dict(msg_dict)) self.output_stream.flush() except IOError: raise StormWentAwayError() except: log.exception("Failed to send message: %r", msg_dict)
Serialize a message dictionary and write it to the output stream.
### Input: Serialize a message dictionary and write it to the output stream. ### Response: def send_message(self, msg_dict): with self._writer_lock: try: self.output_stream.flush() self.output_stream.write(self.serialize_dict(msg_dict)) self.output_stream.flush() except IOError: raise StormWentAwayError() except: log.exception("Failed to send message: %r", msg_dict)
def convert_ensembl_to_entrez(self, ensembl): if in ensembl: pass else: raise (IndexError) server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format( ensembl) r = requests.get(server, headers={"Content-Type": "text/xml"}) if not r.ok: r.raise_for_status() sys.exit() response = r.text info = xmltodict.parse(response) try: geneId = info[][][] except TypeError: raise (TypeError) return geneId
Convert Ensembl Id to Entrez Gene Id
### Input: Convert Ensembl Id to Entrez Gene Id ### Response: def convert_ensembl_to_entrez(self, ensembl): if in ensembl: pass else: raise (IndexError) server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format( ensembl) r = requests.get(server, headers={"Content-Type": "text/xml"}) if not r.ok: r.raise_for_status() sys.exit() response = r.text info = xmltodict.parse(response) try: geneId = info[][][] except TypeError: raise (TypeError) return geneId
def init_nova_consumer(self, mq): if not self.enable_component_notification(Openstack.Nova): log.debug("disable listening nova notification") return for i in range(self.config.nova_mq_consumer_count): mq.create_consumer(self.config.nova_mq_exchange, self.config.nova_mq_queue, ProcessFactory.process(Openstack.Nova)) log.debug("enable listening openstack nova notification.")
Init openstack nova mq 1. Check if enable listening nova notification 2. Create consumer :param mq: class ternya.mq.MQ
### Input: Init openstack nova mq 1. Check if enable listening nova notification 2. Create consumer :param mq: class ternya.mq.MQ ### Response: def init_nova_consumer(self, mq): if not self.enable_component_notification(Openstack.Nova): log.debug("disable listening nova notification") return for i in range(self.config.nova_mq_consumer_count): mq.create_consumer(self.config.nova_mq_exchange, self.config.nova_mq_queue, ProcessFactory.process(Openstack.Nova)) log.debug("enable listening openstack nova notification.")
def replace_file_content(filepath, old, new, max=1): with open(filepath, ) as f: content = f.read() content = content.replace(old, new, max) with open(filepath, ) as f: f.write(content)
Modify the content of `filepath`, replacing `old` for `new`. Parameters ---------- filepath: str Path to the file to be modified. It will be overwritten. old: str This is old substring to be replaced. new: str This is new substring, which would replace old substring. max: int If larger than 0, Only the first `max` occurrences are replaced.
### Input: Modify the content of `filepath`, replacing `old` for `new`. Parameters ---------- filepath: str Path to the file to be modified. It will be overwritten. old: str This is old substring to be replaced. new: str This is new substring, which would replace old substring. max: int If larger than 0, Only the first `max` occurrences are replaced. ### Response: def replace_file_content(filepath, old, new, max=1): with open(filepath, ) as f: content = f.read() content = content.replace(old, new, max) with open(filepath, ) as f: f.write(content)
def on_button_release(self, event): affected_models = {} for inmotion in self._movable_items: inmotion.move((event.x, event.y)) rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item, inmotion.item.handles()[NW]) if isinstance(inmotion.item, StateView): state_v = inmotion.item state_m = state_v.model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()[] != rel_pos: state_m.set_meta_data_editor(, rel_pos) affected_models[state_m] = ("position", True, state_v) elif isinstance(inmotion.item, NameView): state_v = inmotion.item state_m = self.view.canvas.get_parent(state_v).model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()[][] != rel_pos: state_m.set_meta_data_editor(, rel_pos) affected_models[state_m] = ("name_position", False, state_v) elif isinstance(inmotion.item, TransitionView): transition_v = inmotion.item transition_m = transition_v.model self.view.canvas.request_update(transition_v) current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v) old_waypoints = transition_m.get_meta_data_editor()[] if current_waypoints != old_waypoints: transition_m.set_meta_data_editor(, current_waypoints) affected_models[transition_m] = ("waypoints", False, transition_v) if len(affected_models) == 1: model = next(iter(affected_models)) change, affects_children, view = affected_models[model] self.view.graphical_editor.emit(, model, change, affects_children) elif len(affected_models) > 1: common_parents = None for change, affects_children, view in affected_models.values(): parents_of_view = set(self.view.canvas.get_ancestors(view)) if common_parents is None: common_parents = parents_of_view else: common_parents = common_parents.intersection(parents_of_view) assert len(common_parents) > 0, "The selected elements do not have common parent element" for state_v in common_parents: children_of_state_v = self.view.canvas.get_all_children(state_v) if any(common_parent in children_of_state_v for common_parent in common_parents): continue self.view.graphical_editor.emit(, state_v.model, "positions", True) break if not affected_models and self._old_selection is not None: self.view.unselect_all() self.view.select_item(self._old_selection) self.view.handle_new_selection(self._item) self._move_name_v = False self._old_selection = None return super(MoveItemTool, self).on_button_release(event)
Write back changes If one or more items have been moved, the new position are stored in the corresponding meta data and a signal notifying the change is emitted. :param event: The button event
### Input: Write back changes If one or more items have been moved, the new position are stored in the corresponding meta data and a signal notifying the change is emitted. :param event: The button event ### Response: def on_button_release(self, event): affected_models = {} for inmotion in self._movable_items: inmotion.move((event.x, event.y)) rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item, inmotion.item.handles()[NW]) if isinstance(inmotion.item, StateView): state_v = inmotion.item state_m = state_v.model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()[] != rel_pos: state_m.set_meta_data_editor(, rel_pos) affected_models[state_m] = ("position", True, state_v) elif isinstance(inmotion.item, NameView): state_v = inmotion.item state_m = self.view.canvas.get_parent(state_v).model self.view.canvas.request_update(state_v) if state_m.get_meta_data_editor()[][] != rel_pos: state_m.set_meta_data_editor(, rel_pos) affected_models[state_m] = ("name_position", False, state_v) elif isinstance(inmotion.item, TransitionView): transition_v = inmotion.item transition_m = transition_v.model self.view.canvas.request_update(transition_v) current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v) old_waypoints = transition_m.get_meta_data_editor()[] if current_waypoints != old_waypoints: transition_m.set_meta_data_editor(, current_waypoints) affected_models[transition_m] = ("waypoints", False, transition_v) if len(affected_models) == 1: model = next(iter(affected_models)) change, affects_children, view = affected_models[model] self.view.graphical_editor.emit(, model, change, affects_children) elif len(affected_models) > 1: common_parents = None for change, affects_children, view in affected_models.values(): parents_of_view = set(self.view.canvas.get_ancestors(view)) if common_parents is None: common_parents = parents_of_view else: common_parents = common_parents.intersection(parents_of_view) assert len(common_parents) > 0, "The selected elements do not have common parent element" for state_v in common_parents: children_of_state_v = self.view.canvas.get_all_children(state_v) if any(common_parent in children_of_state_v for common_parent in common_parents): continue self.view.graphical_editor.emit(, state_v.model, "positions", True) break if not affected_models and self._old_selection is not None: self.view.unselect_all() self.view.select_item(self._old_selection) self.view.handle_new_selection(self._item) self._move_name_v = False self._old_selection = None return super(MoveItemTool, self).on_button_release(event)
def static_singleton(*args, **kwargs): def __static_singleton_wrapper(cls): if cls not in __singleton_instances: __singleton_instances[cls] = cls(*args, **kwargs) return __singleton_instances[cls] return __static_singleton_wrapper
STATIC Singleton Design Pattern Decorator Class is initialized with arguments passed into the decorator. :Usage: >>> @static_singleton('yop') class Bob(Person): def __init__(arg1): self.info = arg1 def says(self): print self.info b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance b2 = Bob #here b1 is the same object as b2 Bob.says() # it will display 'yop'
### Input: STATIC Singleton Design Pattern Decorator Class is initialized with arguments passed into the decorator. :Usage: >>> @static_singleton('yop') class Bob(Person): def __init__(arg1): self.info = arg1 def says(self): print self.info b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance b2 = Bob #here b1 is the same object as b2 Bob.says() # it will display 'yop' ### Response: def static_singleton(*args, **kwargs): def __static_singleton_wrapper(cls): if cls not in __singleton_instances: __singleton_instances[cls] = cls(*args, **kwargs) return __singleton_instances[cls] return __static_singleton_wrapper
async def import_wallet(self, indy_config: dict, path: str, access: str = None) -> None: LOGGER.debug(, indy_config, path) try: await wallet.import_wallet( json.dumps(indy_config), json.dumps({: access or self.default_access}), json.dumps({: path, : access or self.default_access})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.CommonInvalidStructure: LOGGER.debug( , indy_config.get(, )) raise BadAccess(.format(indy_config.get(, ))) LOGGER.debug( , x_indy.error_code, indy_config.get(, )) raise LOGGER.debug()
Import a VON anchor wallet. Raise BadAccess on bad access credential value. :param indy_config: indy wallet configuration to use, with: - 'id' - 'storage_type' (optional) - 'storage_config' (optional) :param path: path from which to import wallet file :param access: indy access credentials value (default value from wallet manager)
### Input: Import a VON anchor wallet. Raise BadAccess on bad access credential value. :param indy_config: indy wallet configuration to use, with: - 'id' - 'storage_type' (optional) - 'storage_config' (optional) :param path: path from which to import wallet file :param access: indy access credentials value (default value from wallet manager) ### Response: async def import_wallet(self, indy_config: dict, path: str, access: str = None) -> None: LOGGER.debug(, indy_config, path) try: await wallet.import_wallet( json.dumps(indy_config), json.dumps({: access or self.default_access}), json.dumps({: path, : access or self.default_access})) except IndyError as x_indy: if x_indy.error_code == ErrorCode.CommonInvalidStructure: LOGGER.debug( , indy_config.get(, )) raise BadAccess(.format(indy_config.get(, ))) LOGGER.debug( , x_indy.error_code, indy_config.get(, )) raise LOGGER.debug()
def count(self, *args, **kwargs): search = self.create_search(*args, **kwargs) try: return search.count() except NotFoundError: print_error("The index was not found, have you initialized the index?") except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch")
Returns the number of results after filtering with the given arguments.
### Input: Returns the number of results after filtering with the given arguments. ### Response: def count(self, *args, **kwargs): search = self.create_search(*args, **kwargs) try: return search.count() except NotFoundError: print_error("The index was not found, have you initialized the index?") except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch")
def colorbrewer2_url(self): url = return url.format(self.type.lower(), self.name, self.number)
URL that can be used to view the color map at colorbrewer2.org.
### Input: URL that can be used to view the color map at colorbrewer2.org. ### Response: def colorbrewer2_url(self): url = return url.format(self.type.lower(), self.name, self.number)
def win_find_exe(filename, installsubdir=None, env="ProgramFiles"): for fn in [filename, filename+".exe"]: try: if installsubdir is None: path = _where(fn) else: path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)]) except IOError: path = filename else: break return path
Find executable in current dir, system path or given ProgramFiles subdir
### Input: Find executable in current dir, system path or given ProgramFiles subdir ### Response: def win_find_exe(filename, installsubdir=None, env="ProgramFiles"): for fn in [filename, filename+".exe"]: try: if installsubdir is None: path = _where(fn) else: path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)]) except IOError: path = filename else: break return path
def _Bound_TP(T, P): region = None if 1073.15 < T <= 2273.15 and Pmin <= P <= 50: region = 5 elif Pmin <= P <= Ps_623: Tsat = _TSat_P(P) if 273.15 <= T <= Tsat: region = 1 elif Tsat < T <= 1073.15: region = 2 elif Ps_623 < P <= 100: T_b23 = _t_P(P) if 273.15 <= T <= 623.15: region = 1 elif 623.15 < T < T_b23: region = 3 elif T_b23 <= T <= 1073.15: region = 2 return region
Region definition for input T and P Parameters ---------- T : float Temperature, [K] P : float Pressure, [MPa] Returns ------- region : float IAPWS-97 region code References ---------- Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer, 2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.3
### Input: Region definition for input T and P Parameters ---------- T : float Temperature, [K] P : float Pressure, [MPa] Returns ------- region : float IAPWS-97 region code References ---------- Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer, 2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.3 ### Response: def _Bound_TP(T, P): region = None if 1073.15 < T <= 2273.15 and Pmin <= P <= 50: region = 5 elif Pmin <= P <= Ps_623: Tsat = _TSat_P(P) if 273.15 <= T <= Tsat: region = 1 elif Tsat < T <= 1073.15: region = 2 elif Ps_623 < P <= 100: T_b23 = _t_P(P) if 273.15 <= T <= 623.15: region = 1 elif 623.15 < T < T_b23: region = 3 elif T_b23 <= T <= 1073.15: region = 2 return region
def from_file(cls, filepath): for i in range(len(filepath)): if filepath[i:] in abi_extensions(): ext = filepath[i:] break else: raise ValueError("Cannot detect abinit extension in %s" % filepath) return cls(ext, filepath)
Build a :class:`Product` instance from a filepath.
### Input: Build a :class:`Product` instance from a filepath. ### Response: def from_file(cls, filepath): for i in range(len(filepath)): if filepath[i:] in abi_extensions(): ext = filepath[i:] break else: raise ValueError("Cannot detect abinit extension in %s" % filepath) return cls(ext, filepath)
def add_catalogue(self, catalogue): assert isinstance(catalogue, MessageCatalogue) if catalogue.locale != self.locale: raise ValueError( % (catalogue.locale, self.locale)) for domain, messages in list(catalogue.all().items()): self.add(messages, domain) for resource in catalogue.resources: self.add_resource(resource)
Merges translations from the given Catalogue into the current one. The two catalogues must have the same locale. @type id: The @param id: message id
### Input: Merges translations from the given Catalogue into the current one. The two catalogues must have the same locale. @type id: The @param id: message id ### Response: def add_catalogue(self, catalogue): assert isinstance(catalogue, MessageCatalogue) if catalogue.locale != self.locale: raise ValueError( % (catalogue.locale, self.locale)) for domain, messages in list(catalogue.all().items()): self.add(messages, domain) for resource in catalogue.resources: self.add_resource(resource)
def put_key(key_name, value, description, meta, modify, add, lock, key_type, stash, passphrase, backend): stash = _get_stash(backend, stash, passphrase) try: click.echo(.format(key_type)) stash.put( name=key_name, value=_build_dict_from_key_value(value), modify=modify, metadata=_build_dict_from_key_value(meta), description=description, lock=lock, key_type=key_type, add=add) click.echo() except GhostError as ex: sys.exit(ex)
Insert a key to the stash `KEY_NAME` is the name of the key to insert `VALUE` is a key=value argument which can be provided multiple times. it is the encrypted value of your key
### Input: Insert a key to the stash `KEY_NAME` is the name of the key to insert `VALUE` is a key=value argument which can be provided multiple times. it is the encrypted value of your key ### Response: def put_key(key_name, value, description, meta, modify, add, lock, key_type, stash, passphrase, backend): stash = _get_stash(backend, stash, passphrase) try: click.echo(.format(key_type)) stash.put( name=key_name, value=_build_dict_from_key_value(value), modify=modify, metadata=_build_dict_from_key_value(meta), description=description, lock=lock, key_type=key_type, add=add) click.echo() except GhostError as ex: sys.exit(ex)
def try_eval(self): item = self.symbol.item if isinstance(item, int): return item if isinstance(item, Label): if item.defined: if isinstance(item.value, Expr): return item.value.try_eval() else: return item.value else: if Expr.ignore: return None error(self.symbol.lineno, "Undefined label " % item.name) return None try: if isinstance(item, tuple): return tuple([x.try_eval() for x in item]) if isinstance(item, list): return [x.try_eval() for x in item] if item == and len(self.children) == 1: return -self.left.try_eval() try: return self.funct[item](self.left.try_eval(), self.right.try_eval()) except ZeroDivisionError: error(self.symbol.lineno, ) except KeyError: pass except TypeError: pass return None
Recursively evals the node. Returns None if it is still unresolved.
### Input: Recursively evals the node. Returns None if it is still unresolved. ### Response: def try_eval(self): item = self.symbol.item if isinstance(item, int): return item if isinstance(item, Label): if item.defined: if isinstance(item.value, Expr): return item.value.try_eval() else: return item.value else: if Expr.ignore: return None error(self.symbol.lineno, "Undefined label " % item.name) return None try: if isinstance(item, tuple): return tuple([x.try_eval() for x in item]) if isinstance(item, list): return [x.try_eval() for x in item] if item == and len(self.children) == 1: return -self.left.try_eval() try: return self.funct[item](self.left.try_eval(), self.right.try_eval()) except ZeroDivisionError: error(self.symbol.lineno, ) except KeyError: pass except TypeError: pass return None
def get_catalogs_by_ids(self, *args, **kwargs): catalogs = self._get_provider_session().get_catalogs_by_ids(*args, **kwargs) cat_list = [] for cat in catalogs: cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy)) return CatalogList(cat_list)
Pass through to provider CatalogLookupSession.get_catalogs_by_ids
### Input: Pass through to provider CatalogLookupSession.get_catalogs_by_ids ### Response: def get_catalogs_by_ids(self, *args, **kwargs): catalogs = self._get_provider_session().get_catalogs_by_ids(*args, **kwargs) cat_list = [] for cat in catalogs: cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy)) return CatalogList(cat_list)
def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5, center=True, scale=True, **unused_kwargs): mean = np.mean(x, axis, keepdims=True) m1 = np.mean(x**2, axis, keepdims=True) var = m1 - mean**2 z = (x - mean) / np.sqrt(var + epsilon) beta, gamma = params ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x))) beta = beta[ed] gamma = gamma[ed] if center and scale: return gamma * z + beta if center: return z + beta if scale: return gamma * z return z
Layer construction function for a batch normalization layer.
### Input: Layer construction function for a batch normalization layer. ### Response: def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5, center=True, scale=True, **unused_kwargs): mean = np.mean(x, axis, keepdims=True) m1 = np.mean(x**2, axis, keepdims=True) var = m1 - mean**2 z = (x - mean) / np.sqrt(var + epsilon) beta, gamma = params ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x))) beta = beta[ed] gamma = gamma[ed] if center and scale: return gamma * z + beta if center: return z + beta if scale: return gamma * z return z
def list_namespaced_role_binding(self, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.list_namespaced_role_binding_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_role_binding_with_http_info(namespace, **kwargs) return data
list or watch objects of kind RoleBinding This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_role_binding(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1RoleBindingList If the method is called asynchronously, returns the request thread.
### Input: list or watch objects of kind RoleBinding This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_role_binding(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1RoleBindingList If the method is called asynchronously, returns the request thread. ### Response: def list_namespaced_role_binding(self, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.list_namespaced_role_binding_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_role_binding_with_http_info(namespace, **kwargs) return data
def parse_frequency(variant, info_key): raw_annotation = variant.INFO.get(info_key) raw_annotation = None if raw_annotation == else raw_annotation frequency = float(raw_annotation) if raw_annotation else None return frequency
Parse any frequency from the info dict Args: variant(cyvcf2.Variant) info_key(str) Returns: frequency(float): or None if frequency does not exist
### Input: Parse any frequency from the info dict Args: variant(cyvcf2.Variant) info_key(str) Returns: frequency(float): or None if frequency does not exist ### Response: def parse_frequency(variant, info_key): raw_annotation = variant.INFO.get(info_key) raw_annotation = None if raw_annotation == else raw_annotation frequency = float(raw_annotation) if raw_annotation else None return frequency
def to_alu_hlu_map(input_str): ret = {} if input_str is not None: pattern = re.compile(r) for line in input_str.split(): line = line.strip() if len(line) == 0: continue matched = re.search(pattern, line) if matched is None or len(matched.groups()) < 2: continue else: hlu = matched.group(1) alu = matched.group(2) ret[int(alu)] = int(hlu) return ret
Converter for alu hlu map Convert following input into a alu -> hlu map: Sample input: ``` HLU Number ALU Number ---------- ---------- 0 12 1 23 ``` ALU stands for array LUN number hlu stands for host LUN number :param input_str: raw input from naviseccli :return: alu -> hlu map
### Input: Converter for alu hlu map Convert following input into a alu -> hlu map: Sample input: ``` HLU Number ALU Number ---------- ---------- 0 12 1 23 ``` ALU stands for array LUN number hlu stands for host LUN number :param input_str: raw input from naviseccli :return: alu -> hlu map ### Response: def to_alu_hlu_map(input_str): ret = {} if input_str is not None: pattern = re.compile(r) for line in input_str.split(): line = line.strip() if len(line) == 0: continue matched = re.search(pattern, line) if matched is None or len(matched.groups()) < 2: continue else: hlu = matched.group(1) alu = matched.group(2) ret[int(alu)] = int(hlu) return ret
def _Bound_Ph(P, h): region = None if Pmin <= P <= Ps_623: h14 = _Region1(_TSat_P(P), P)["h"] h24 = _Region2(_TSat_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmin = _Region1(273.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h14: region = 1 elif h14 < h < h24: region = 4 elif h24 <= h <= h25: region = 2 elif h25 < h <= hmax: region = 5 elif Ps_623 < P < Pc: hmin = _Region1(273.15, P)["h"] h13 = _Region1(623.15, P)["h"] h32 = _Region2(_t_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h13: region = 1 elif h13 < h < h32: try: p34 = _PSat_h(h) except NotImplementedError: p34 = Pc if P < p34: region = 4 else: region = 3 elif h32 <= h <= h25: region = 2 elif h25 < h <= hmax: region = 5 elif Pc <= P <= 100: hmin = _Region1(273.15, P)["h"] h13 = _Region1(623.15, P)["h"] h32 = _Region2(_t_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h13: region = 1 elif h13 < h < h32: region = 3 elif h32 <= h <= h25: region = 2 elif P <= 50 and h25 <= h <= hmax: region = 5 return region
Region definition for input P y h Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- region : float IAPWS-97 region code References ---------- Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer, 2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5
### Input: Region definition for input P y h Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- region : float IAPWS-97 region code References ---------- Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer, 2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5 ### Response: def _Bound_Ph(P, h): region = None if Pmin <= P <= Ps_623: h14 = _Region1(_TSat_P(P), P)["h"] h24 = _Region2(_TSat_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmin = _Region1(273.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h14: region = 1 elif h14 < h < h24: region = 4 elif h24 <= h <= h25: region = 2 elif h25 < h <= hmax: region = 5 elif Ps_623 < P < Pc: hmin = _Region1(273.15, P)["h"] h13 = _Region1(623.15, P)["h"] h32 = _Region2(_t_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h13: region = 1 elif h13 < h < h32: try: p34 = _PSat_h(h) except NotImplementedError: p34 = Pc if P < p34: region = 4 else: region = 3 elif h32 <= h <= h25: region = 2 elif h25 < h <= hmax: region = 5 elif Pc <= P <= 100: hmin = _Region1(273.15, P)["h"] h13 = _Region1(623.15, P)["h"] h32 = _Region2(_t_P(P), P)["h"] h25 = _Region2(1073.15, P)["h"] hmax = _Region5(2273.15, P)["h"] if hmin <= h <= h13: region = 1 elif h13 < h < h32: region = 3 elif h32 <= h <= h25: region = 2 elif P <= 50 and h25 <= h <= hmax: region = 5 return region
def prepare_value(self, value): if value is None and self.required: choices =list(self.choices) if len(choices) == 1: value = choices[0][0] return super(TemplateChoiceField, self).prepare_value(value)
To avoid evaluating the lazysorted callable more than necessary to establish a potential initial value for the field, we do it here. If there's - only one template choice, and - the field is required, and - there's no prior initial set (either by being bound or by being set higher up the stack then forcibly select the only "good" value as the default.
### Input: To avoid evaluating the lazysorted callable more than necessary to establish a potential initial value for the field, we do it here. If there's - only one template choice, and - the field is required, and - there's no prior initial set (either by being bound or by being set higher up the stack then forcibly select the only "good" value as the default. ### Response: def prepare_value(self, value): if value is None and self.required: choices =list(self.choices) if len(choices) == 1: value = choices[0][0] return super(TemplateChoiceField, self).prepare_value(value)
def _rewrite_geometry(geom, new_lines): new_geom = [] x = 0 y = 0 for line in new_lines: moveto, endsat, lineto_cmds = line dx = moveto.x - x dy = moveto.y - y x = endsat.x y = endsat.y new_geom.append(9) new_geom.append(zigzag(dx)) new_geom.append(zigzag(dy)) new_geom.extend(lineto_cmds) del geom[:] geom.extend(new_geom)
Re-encode a list of Lines with absolute MoveTos as a continuous stream of MVT geometry commands, each relative to the last. Replace geom with that stream.
### Input: Re-encode a list of Lines with absolute MoveTos as a continuous stream of MVT geometry commands, each relative to the last. Replace geom with that stream. ### Response: def _rewrite_geometry(geom, new_lines): new_geom = [] x = 0 y = 0 for line in new_lines: moveto, endsat, lineto_cmds = line dx = moveto.x - x dy = moveto.y - y x = endsat.x y = endsat.y new_geom.append(9) new_geom.append(zigzag(dx)) new_geom.append(zigzag(dy)) new_geom.extend(lineto_cmds) del geom[:] geom.extend(new_geom)
def is_default_port(self): if self.port is None: return False default = DEFAULT_PORTS.get(self.scheme) if default is None: return False return self.port == default
A check for default port. Return True if port is default for specified scheme, e.g. 'http://python.org' or 'http://python.org:80', False otherwise.
### Input: A check for default port. Return True if port is default for specified scheme, e.g. 'http://python.org' or 'http://python.org:80', False otherwise. ### Response: def is_default_port(self): if self.port is None: return False default = DEFAULT_PORTS.get(self.scheme) if default is None: return False return self.port == default
def register_filter(self, attr_name, filterimage_cls): if attr_name.startswith(): raise UnallowedFilterName( % attr_name ) if not issubclass(filterimage_cls, FilteredImage): raise InvalidFilteredImageSubclass( ) if attr_name in self._filter_registry: raise AlreadyRegistered( % attr_name ) else: self._filter_registry[attr_name] = filterimage_cls
Register a new FilteredImage subclass (`filterimage_cls`). To be used via the attribute (filters.`attr_name`)
### Input: Register a new FilteredImage subclass (`filterimage_cls`). To be used via the attribute (filters.`attr_name`) ### Response: def register_filter(self, attr_name, filterimage_cls): if attr_name.startswith(): raise UnallowedFilterName( % attr_name ) if not issubclass(filterimage_cls, FilteredImage): raise InvalidFilteredImageSubclass( ) if attr_name in self._filter_registry: raise AlreadyRegistered( % attr_name ) else: self._filter_registry[attr_name] = filterimage_cls
def _character_matches(name1, name2): if name1[0] == "*": for i in range(len(name2) + 1): yield 1, i if name2[0] == "*": for i in range(len(name1) + 1): yield i, 1 if name1[0] == name2[0]: yield 1, 1
Yield the number of characters that match the beginning of each string.
### Input: Yield the number of characters that match the beginning of each string. ### Response: def _character_matches(name1, name2): if name1[0] == "*": for i in range(len(name2) + 1): yield 1, i if name2[0] == "*": for i in range(len(name1) + 1): yield i, 1 if name1[0] == name2[0]: yield 1, 1
def create_log(self): return EventLog( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of log services facade.
### Input: Get an instance of log services facade. ### Response: def create_log(self): return EventLog( self.networkapi_url, self.user, self.password, self.user_ldap)
def wrap_as_node(self, func): name = self.get_name(func) @wraps(func) def wrapped(*args, **kwargs): message = self.get_message_from_call(*args, **kwargs) self.logger.info(, name, message) result = func(message) if isinstance(result, GeneratorType): results = [ self.wrap_result(name, item) for item in result if item is not NoResult ] self.logger.debug( , func, len(results) ) [self.route(name, item) for item in results] return tuple(results) else: if result is NoResult: return result result = self.wrap_result(name, result) self.logger.debug( , func, result ) self.route(name, result) return result return wrapped
wrap a function as a node
### Input: wrap a function as a node ### Response: def wrap_as_node(self, func): name = self.get_name(func) @wraps(func) def wrapped(*args, **kwargs): message = self.get_message_from_call(*args, **kwargs) self.logger.info(, name, message) result = func(message) if isinstance(result, GeneratorType): results = [ self.wrap_result(name, item) for item in result if item is not NoResult ] self.logger.debug( , func, len(results) ) [self.route(name, item) for item in results] return tuple(results) else: if result is NoResult: return result result = self.wrap_result(name, result) self.logger.debug( , func, result ) self.route(name, result) return result return wrapped
def decrypt(self, ciphertext): plaintext = self._rx_tinh.dec(ciphertext) if plaintext is None: logger.error() raise s_exc.CryptoErr(mesg=) seqn = next(self._rx_sn) sn, mesg = s_msgpack.un(plaintext) if sn != seqn: logger.error(, sn, seqn) raise s_exc.CryptoErr(mesg=, expected=seqn, got=sn) return mesg
Decrypt a message, validating its sequence number is as we expect. Args: ciphertext (bytes): The message to decrypt and verify. Returns: mesg: A mesg. Raises: s_exc.CryptoErr: If the message decryption fails or the sequence number was unexpected.
### Input: Decrypt a message, validating its sequence number is as we expect. Args: ciphertext (bytes): The message to decrypt and verify. Returns: mesg: A mesg. Raises: s_exc.CryptoErr: If the message decryption fails or the sequence number was unexpected. ### Response: def decrypt(self, ciphertext): plaintext = self._rx_tinh.dec(ciphertext) if plaintext is None: logger.error() raise s_exc.CryptoErr(mesg=) seqn = next(self._rx_sn) sn, mesg = s_msgpack.un(plaintext) if sn != seqn: logger.error(, sn, seqn) raise s_exc.CryptoErr(mesg=, expected=seqn, got=sn) return mesg
def VectorLen(self, off): N.enforce_number(off, N.UOffsetTFlags) off += self.Pos off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return ret
VectorLen retrieves the length of the vector whose offset is stored at "off" in this object.
### Input: VectorLen retrieves the length of the vector whose offset is stored at "off" in this object. ### Response: def VectorLen(self, off): N.enforce_number(off, N.UOffsetTFlags) off += self.Pos off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return ret
def format(self, vertices): index = .join(str(vertices[vn].index) for vn in self.vnames) vcom = .join(self.vnames) return \ .format( index, self.interVertex, self.name, vcom)
Format instance to dump vertices is dict of name to Vertex
### Input: Format instance to dump vertices is dict of name to Vertex ### Response: def format(self, vertices): index = .join(str(vertices[vn].index) for vn in self.vnames) vcom = .join(self.vnames) return \ .format( index, self.interVertex, self.name, vcom)
def _set_permissions(zip_file_info, extracted_path): permission = zip_file_info.external_attr >> 16 if not permission: LOG.debug("File %s in zipfile does not have permission information", zip_file_info.filename) return os.chmod(extracted_path, permission)
Sets permissions on the extracted file by reading the ``external_attr`` property of given file info. Parameters ---------- zip_file_info : zipfile.ZipInfo Object containing information about a file within a zip archive extracted_path : str Path where the file has been extracted to
### Input: Sets permissions on the extracted file by reading the ``external_attr`` property of given file info. Parameters ---------- zip_file_info : zipfile.ZipInfo Object containing information about a file within a zip archive extracted_path : str Path where the file has been extracted to ### Response: def _set_permissions(zip_file_info, extracted_path): permission = zip_file_info.external_attr >> 16 if not permission: LOG.debug("File %s in zipfile does not have permission information", zip_file_info.filename) return os.chmod(extracted_path, permission)
def get_feature_names(self, features=None): if features: return self.feature_table.get_ordered_names(features) else: return self.feature_table.feature_names
Returns names of features. If features is None, returns all features. Otherwise assumes the user is trying to find the order of the features.
### Input: Returns names of features. If features is None, returns all features. Otherwise assumes the user is trying to find the order of the features. ### Response: def get_feature_names(self, features=None): if features: return self.feature_table.get_ordered_names(features) else: return self.feature_table.feature_names
def relpath_for(self, path): if self.parent_dir in (".", ""): return path if path == self.parent_dir: return "" dirname = os.path.dirname(path) or "." basename = os.path.basename(path) cached = self.relpath_cache.get(dirname, empty) if cached is empty: cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir) return os.path.join(cached, basename)
Find the relative path from here from the parent_dir
### Input: Find the relative path from here from the parent_dir ### Response: def relpath_for(self, path): if self.parent_dir in (".", ""): return path if path == self.parent_dir: return "" dirname = os.path.dirname(path) or "." basename = os.path.basename(path) cached = self.relpath_cache.get(dirname, empty) if cached is empty: cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir) return os.path.join(cached, basename)
def server_to_dict(server): soul = server.soul return { A.server.ID: server.href, A.server.PUBLIC_IPS: soul.get(, []), A.server.PRIVATE_IPS: soul.get(, []), }
Returns the :class:`dict` representation of a server object. The returned :class:`dict` is meant to be consumed by :class:`~bang.deployers.cloud.ServerDeployer` objects.
### Input: Returns the :class:`dict` representation of a server object. The returned :class:`dict` is meant to be consumed by :class:`~bang.deployers.cloud.ServerDeployer` objects. ### Response: def server_to_dict(server): soul = server.soul return { A.server.ID: server.href, A.server.PUBLIC_IPS: soul.get(, []), A.server.PRIVATE_IPS: soul.get(, []), }
def ask_and_eval(self, func, args=(), gradf=None, number=None, xmean=None, sigma_fac=1, evaluations=1, aggregation=np.median, kappa=1): popsize = self.sp.popsize if number is not None: popsize = number selective_mirroring = self.opts[] > 0 nmirrors = self.sp.lam_mirr if popsize != self.sp.popsize: nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize) assert new_injections or self.opts[] < 2 if new_injections and self.opts[] != 1: nmirrors = 0 assert nmirrors <= popsize // 2 self.mirrors_idx = np.arange(nmirrors) self.mirrors_rejected_idx = [] is_feasible = self.opts[] fit = [] X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=args) if xmean is None: xmean = self.mean X = [] for k in xrange(int(popsize)): x, f = X_first.pop(0), None rejected = -1 while rejected < 0 or not is_feasible(x, f): rejected += 1 if rejected: x = self.ask(1, xmean, sigma_fac)[0] elif k >= popsize - nmirrors: if k == popsize - nmirrors and selective_mirroring: self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1] x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]]) if rejected == 1 and k >= popsize - nmirrors: self.mirrors_rejected_idx.append(k) length_normalizer = 1 f = func(x, *args) if kappa == 1 else \ func(xmean + kappa * length_normalizer * (x - xmean), *args) if is_feasible(x, f) and evaluations > 1: f = aggregation([f] + [(func(x, *args) if kappa == 1 else func(xmean + kappa * length_normalizer * (x - xmean), *args)) for _i in xrange(int(evaluations - 1))]) if rejected + 1 % 1000 == 0: print( % (rejected, self.countiter)) fit.append(f) X.append(x) self.evaluations_per_f_value = int(evaluations) return X, fit
samples `number` solutions and evaluates them on `func`, where each solution `s` is resampled until ``self.is_feasible(s, func(s)) is True``. Arguments --------- `func` objective function, ``func(x)`` returns a scalar `args` additional parameters for `func` `gradf` gradient of objective function, ``g = gradf(x, *args)`` must satisfy ``len(g) == len(x)`` `number` number of solutions to be sampled, by default population size ``popsize`` (AKA lambda) `xmean` mean for sampling the solutions, by default ``self.mean``. `sigma_fac` multiplier for sampling width, standard deviation, for example to get a small perturbation of solution `xmean` `evaluations` number of evaluations for each sampled solution `aggregation` function that aggregates `evaluations` values to as single value. `kappa` multiplier used for the evaluation of the solutions, in that ``func(m + kappa*(x - m))`` is the f-value for x. Return ------ ``(X, fit)``, where X -- list of solutions fit -- list of respective function values Details ------- While ``not self.is_feasible(x, func(x))``new solutions are sampled. By default ``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``. The argument to `func` can be freely modified within `func`. Depending on the ``CMA_mirrors`` option, some solutions are not sampled independently but as mirrors of other bad solutions. This is a simple derandomization that can save 10-30% of the evaluations in particular with small populations, for example on the cigar function. Example ------- >>> import cma >>> x0, sigma0 = 8*[10], 1 # 8-D >>> es = cma.CMAEvolutionStrategy(x0, sigma0) >>> while not es.stop(): ... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling ... es.tell(X, fit) # pass on fitness values ... es.disp(20) # print every 20-th iteration >>> print('terminated on ' + str(es.stop())) <output omitted> A single iteration step can be expressed in one line, such that an entire optimization after initialization becomes :: while not es.stop(): es.tell(*es.ask_and_eval(cma.fcts.elli))
### Input: samples `number` solutions and evaluates them on `func`, where each solution `s` is resampled until ``self.is_feasible(s, func(s)) is True``. Arguments --------- `func` objective function, ``func(x)`` returns a scalar `args` additional parameters for `func` `gradf` gradient of objective function, ``g = gradf(x, *args)`` must satisfy ``len(g) == len(x)`` `number` number of solutions to be sampled, by default population size ``popsize`` (AKA lambda) `xmean` mean for sampling the solutions, by default ``self.mean``. `sigma_fac` multiplier for sampling width, standard deviation, for example to get a small perturbation of solution `xmean` `evaluations` number of evaluations for each sampled solution `aggregation` function that aggregates `evaluations` values to as single value. `kappa` multiplier used for the evaluation of the solutions, in that ``func(m + kappa*(x - m))`` is the f-value for x. Return ------ ``(X, fit)``, where X -- list of solutions fit -- list of respective function values Details ------- While ``not self.is_feasible(x, func(x))``new solutions are sampled. By default ``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``. The argument to `func` can be freely modified within `func`. Depending on the ``CMA_mirrors`` option, some solutions are not sampled independently but as mirrors of other bad solutions. This is a simple derandomization that can save 10-30% of the evaluations in particular with small populations, for example on the cigar function. Example ------- >>> import cma >>> x0, sigma0 = 8*[10], 1 # 8-D >>> es = cma.CMAEvolutionStrategy(x0, sigma0) >>> while not es.stop(): ... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling ... es.tell(X, fit) # pass on fitness values ... es.disp(20) # print every 20-th iteration >>> print('terminated on ' + str(es.stop())) <output omitted> A single iteration step can be expressed in one line, such that an entire optimization after initialization becomes :: while not es.stop(): es.tell(*es.ask_and_eval(cma.fcts.elli)) ### Response: def ask_and_eval(self, func, args=(), gradf=None, number=None, xmean=None, sigma_fac=1, evaluations=1, aggregation=np.median, kappa=1): popsize = self.sp.popsize if number is not None: popsize = number selective_mirroring = self.opts[] > 0 nmirrors = self.sp.lam_mirr if popsize != self.sp.popsize: nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize) assert new_injections or self.opts[] < 2 if new_injections and self.opts[] != 1: nmirrors = 0 assert nmirrors <= popsize // 2 self.mirrors_idx = np.arange(nmirrors) self.mirrors_rejected_idx = [] is_feasible = self.opts[] fit = [] X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=args) if xmean is None: xmean = self.mean X = [] for k in xrange(int(popsize)): x, f = X_first.pop(0), None rejected = -1 while rejected < 0 or not is_feasible(x, f): rejected += 1 if rejected: x = self.ask(1, xmean, sigma_fac)[0] elif k >= popsize - nmirrors: if k == popsize - nmirrors and selective_mirroring: self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1] x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]]) if rejected == 1 and k >= popsize - nmirrors: self.mirrors_rejected_idx.append(k) length_normalizer = 1 f = func(x, *args) if kappa == 1 else \ func(xmean + kappa * length_normalizer * (x - xmean), *args) if is_feasible(x, f) and evaluations > 1: f = aggregation([f] + [(func(x, *args) if kappa == 1 else func(xmean + kappa * length_normalizer * (x - xmean), *args)) for _i in xrange(int(evaluations - 1))]) if rejected + 1 % 1000 == 0: print( % (rejected, self.countiter)) fit.append(f) X.append(x) self.evaluations_per_f_value = int(evaluations) return X, fit
def _get_summary_struct(self): g = self.graph section_titles = [] graph_summary = [(k, _precomputed_field(v)) for k, v in six.iteritems(g.summary())] sections = [graph_summary] results = [(k, _precomputed_field(v)) for k, v in six.iteritems(self._result_fields())] methods = [(k, _precomputed_field(v)) for k, v in six.iteritems(self._method_fields())] settings = [(k, v) for k, v in six.iteritems(self._setting_fields())] metrics = [(k, v) for k, v in six.iteritems(self._metric_fields())] optional_sections = [(, results), (, settings), \ (, metrics), (, methods)] for (title, section) in optional_sections: if len(section) > 0: section_titles.append(title) sections.append(section) return (sections, section_titles)
Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object.
### Input: Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object. ### Response: def _get_summary_struct(self): g = self.graph section_titles = [] graph_summary = [(k, _precomputed_field(v)) for k, v in six.iteritems(g.summary())] sections = [graph_summary] results = [(k, _precomputed_field(v)) for k, v in six.iteritems(self._result_fields())] methods = [(k, _precomputed_field(v)) for k, v in six.iteritems(self._method_fields())] settings = [(k, v) for k, v in six.iteritems(self._setting_fields())] metrics = [(k, v) for k, v in six.iteritems(self._metric_fields())] optional_sections = [(, results), (, settings), \ (, metrics), (, methods)] for (title, section) in optional_sections: if len(section) > 0: section_titles.append(title) sections.append(section) return (sections, section_titles)
async def skip(source, n): source = transform.enumerate.raw(source) async with streamcontext(source) as streamer: async for i, item in streamer: if i >= n: yield item
Forward an asynchronous sequence, skipping the first ``n`` elements. If ``n`` is negative, no elements are skipped.
### Input: Forward an asynchronous sequence, skipping the first ``n`` elements. If ``n`` is negative, no elements are skipped. ### Response: async def skip(source, n): source = transform.enumerate.raw(source) async with streamcontext(source) as streamer: async for i, item in streamer: if i >= n: yield item
def google_analytics_js(parser, token): bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError(" takes no arguments" % bits[0]) return GoogleAnalyticsJsNode()
Google Analytics tracking template tag. Renders Javascript code to track page visits. You must supply your website property ID (as a string) in the ``GOOGLE_ANALYTICS_JS_PROPERTY_ID`` setting.
### Input: Google Analytics tracking template tag. Renders Javascript code to track page visits. You must supply your website property ID (as a string) in the ``GOOGLE_ANALYTICS_JS_PROPERTY_ID`` setting. ### Response: def google_analytics_js(parser, token): bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError(" takes no arguments" % bits[0]) return GoogleAnalyticsJsNode()
def load_from_output_metadata(output_metadata): impact_function = MultiExposureImpactFunction() provenance = output_metadata[] paths = get_provenance(provenance, provenance_multi_exposure_layers) if paths: for path in paths: impact_function.add_exposure(load_layer_from_registry(path)) path = get_provenance(provenance, provenance_hazard_layer) if path: impact_function.hazard = load_layer_from_registry(path) path = get_provenance(provenance, provenance_aggregation_layer) if path: impact_function.aggregation = load_layer_from_registry(path) extent = get_provenance(provenance, provenance_analysis_extent) if extent: impact_function._analysis_extent = QgsGeometry.fromWkt(extent) data_store_uri = get_provenance(provenance, provenance_data_store_uri) if data_store_uri: impact_function.datastore = Folder(data_store_uri) name = get_provenance(provenance, provenance_impact_function_name) impact_function._name = name start_datetime = get_provenance(provenance, provenance_start_datetime) impact_function._start_datetime = start_datetime end_datetime = get_provenance(provenance, provenance_end_datetime) impact_function._end_datetime = end_datetime duration = get_provenance(provenance, provenance_duration) impact_function._duration = duration debug_mode = get_provenance(provenance, provenance_debug_mode) impact_function.debug_mode = debug_mode path = get_provenance(provenance, provenance_layer_aggregation_summary) if path: impact_function._aggregation_summary = load_layer_from_registry( path) set_provenance( provenance, provenance_layer_aggregation_summary_id, impact_function._aggregation_summary.id()) path = get_provenance(provenance, provenance_layer_analysis_impacted) if path: impact_function._analysis_summary = load_layer_from_registry(path) set_provenance( provenance, provenance_layer_analysis_impacted_id, impact_function._analysis_summary.id()) dict_of_exposure_summary = get_provenance( provenance, provenance_multi_exposure_summary_layers) dict_of_exposure_summary_id = {} dict_of_analysis_summary_id = {} for exposure_key, exposure_summary in ( iter(list(dict_of_exposure_summary.items()))): layer = load_layer_from_registry(exposure_summary) keywords = KeywordIO.read_keywords(layer) serialized_impact_function = ( ImpactFunction.load_from_output_metadata(keywords)) impact_function._impact_functions.append( serialized_impact_function) impact_layer = serialized_impact_function.exposure_summary or ( serialized_impact_function.aggregate_hazard_impacted) dict_of_exposure_summary_id[exposure_key] = impact_layer.id() for analysis in impact_function._impact_functions: exposure_key = ( analysis.provenance[][]) analysis_summary = analysis.analysis_impacted dict_of_analysis_summary_id[exposure_key] = analysis_summary.id() set_provenance( provenance, provenance_multi_exposure_summary_layers_id, dict_of_exposure_summary_id) set_provenance( provenance, provenance_multi_exposure_analysis_summary_layers_id, dict_of_analysis_summary_id) impact_function._output_layer_expected = \ impact_function._compute_output_layer_expected() crs = get_provenance(provenance, provenance_crs) if crs: impact_function._crs = QgsCoordinateReferenceSystem(crs) impact_function._provenance = provenance impact_function._provenance_ready = True return impact_function
Set Impact Function based on an output of an analysis's metadata. If possible, we will try to use layers already in the legend and to not recreating new ones. We will keep the style for instance. :param output_metadata: Metadata from an output layer. :type output_metadata: OutputLayerMetadata :returns: Impact Function based on the metadata. :rtype: ImpactFunction
### Input: Set Impact Function based on an output of an analysis's metadata. If possible, we will try to use layers already in the legend and to not recreating new ones. We will keep the style for instance. :param output_metadata: Metadata from an output layer. :type output_metadata: OutputLayerMetadata :returns: Impact Function based on the metadata. :rtype: ImpactFunction ### Response: def load_from_output_metadata(output_metadata): impact_function = MultiExposureImpactFunction() provenance = output_metadata[] paths = get_provenance(provenance, provenance_multi_exposure_layers) if paths: for path in paths: impact_function.add_exposure(load_layer_from_registry(path)) path = get_provenance(provenance, provenance_hazard_layer) if path: impact_function.hazard = load_layer_from_registry(path) path = get_provenance(provenance, provenance_aggregation_layer) if path: impact_function.aggregation = load_layer_from_registry(path) extent = get_provenance(provenance, provenance_analysis_extent) if extent: impact_function._analysis_extent = QgsGeometry.fromWkt(extent) data_store_uri = get_provenance(provenance, provenance_data_store_uri) if data_store_uri: impact_function.datastore = Folder(data_store_uri) name = get_provenance(provenance, provenance_impact_function_name) impact_function._name = name start_datetime = get_provenance(provenance, provenance_start_datetime) impact_function._start_datetime = start_datetime end_datetime = get_provenance(provenance, provenance_end_datetime) impact_function._end_datetime = end_datetime duration = get_provenance(provenance, provenance_duration) impact_function._duration = duration debug_mode = get_provenance(provenance, provenance_debug_mode) impact_function.debug_mode = debug_mode path = get_provenance(provenance, provenance_layer_aggregation_summary) if path: impact_function._aggregation_summary = load_layer_from_registry( path) set_provenance( provenance, provenance_layer_aggregation_summary_id, impact_function._aggregation_summary.id()) path = get_provenance(provenance, provenance_layer_analysis_impacted) if path: impact_function._analysis_summary = load_layer_from_registry(path) set_provenance( provenance, provenance_layer_analysis_impacted_id, impact_function._analysis_summary.id()) dict_of_exposure_summary = get_provenance( provenance, provenance_multi_exposure_summary_layers) dict_of_exposure_summary_id = {} dict_of_analysis_summary_id = {} for exposure_key, exposure_summary in ( iter(list(dict_of_exposure_summary.items()))): layer = load_layer_from_registry(exposure_summary) keywords = KeywordIO.read_keywords(layer) serialized_impact_function = ( ImpactFunction.load_from_output_metadata(keywords)) impact_function._impact_functions.append( serialized_impact_function) impact_layer = serialized_impact_function.exposure_summary or ( serialized_impact_function.aggregate_hazard_impacted) dict_of_exposure_summary_id[exposure_key] = impact_layer.id() for analysis in impact_function._impact_functions: exposure_key = ( analysis.provenance[][]) analysis_summary = analysis.analysis_impacted dict_of_analysis_summary_id[exposure_key] = analysis_summary.id() set_provenance( provenance, provenance_multi_exposure_summary_layers_id, dict_of_exposure_summary_id) set_provenance( provenance, provenance_multi_exposure_analysis_summary_layers_id, dict_of_analysis_summary_id) impact_function._output_layer_expected = \ impact_function._compute_output_layer_expected() crs = get_provenance(provenance, provenance_crs) if crs: impact_function._crs = QgsCoordinateReferenceSystem(crs) impact_function._provenance = provenance impact_function._provenance_ready = True return impact_function
def plot_account(self, row, per_capita=False, sector=None, file_name=False, file_dpi=600, population=None, **kwargs): if type(per_capita) is not bool: logging.error() return None if type(row) is int: row = self.D_cba.ix[row].name name_row = (str(row). replace(, ). replace(, ). replace("[] for sector total account - per capita - (unit) (unit)kindkindbarcolormapcolormapSpectralFootprintD_cbaTerritorialD_pbaImportsD_impExportsD_expFootprintD_cba_capTerritorialD_pba_capImportsD_imp_capExportsD_exp_capFootprintD_cba_regTerritorialD_pba_regImportsD_imp_regExportsD_exp_regsectorPopulation regions are inconsistent with IO regionsPopulation must be given for sector results per capitatitletitleRegionsbest') try: plt.tight_layout() except: pass if file_name: plt.savefig(file_name, dpi=file_dpi) return ax
Plots D_pba, D_cba, D_imp and D_exp for the specified row (account) Plot either the total country accounts or for a specific sector, depending on the 'sector' parameter. Per default the accounts are plotted as bar charts. However, any valid keyword for the pandas.DataFrame.plot method can be passed. Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- row : string, tuple or int A valid index for the row in the extension which should be plotted (one(!) row - no list allowed) per_capita : boolean, optional Plot the per capita accounts instead of the absolute values default is False sector: string, optional Plot the results for a specific sector of the IO table. If None is given (default), the total regional accounts are plotted. population : pandas.DataFrame or np.array, optional Vector with population per region. This must be given if values should be plotted per_capita for a specific sector since these values are calculated on the fly. file_name : path string, optional If given, saves the plot to the given filename file_dpi : int, optional Dpi for saving the figure, default 600 **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method Returns ------- Axis as given by pandas.DataFrame.plot, None in case of errors
### Input: Plots D_pba, D_cba, D_imp and D_exp for the specified row (account) Plot either the total country accounts or for a specific sector, depending on the 'sector' parameter. Per default the accounts are plotted as bar charts. However, any valid keyword for the pandas.DataFrame.plot method can be passed. Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- row : string, tuple or int A valid index for the row in the extension which should be plotted (one(!) row - no list allowed) per_capita : boolean, optional Plot the per capita accounts instead of the absolute values default is False sector: string, optional Plot the results for a specific sector of the IO table. If None is given (default), the total regional accounts are plotted. population : pandas.DataFrame or np.array, optional Vector with population per region. This must be given if values should be plotted per_capita for a specific sector since these values are calculated on the fly. file_name : path string, optional If given, saves the plot to the given filename file_dpi : int, optional Dpi for saving the figure, default 600 **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method Returns ------- Axis as given by pandas.DataFrame.plot, None in case of errors ### Response: def plot_account(self, row, per_capita=False, sector=None, file_name=False, file_dpi=600, population=None, **kwargs): if type(per_capita) is not bool: logging.error() return None if type(row) is int: row = self.D_cba.ix[row].name name_row = (str(row). replace(, ). replace(, ). replace("[] for sector total account - per capita - (unit) (unit)kindkindbarcolormapcolormapSpectralFootprintD_cbaTerritorialD_pbaImportsD_impExportsD_expFootprintD_cba_capTerritorialD_pba_capImportsD_imp_capExportsD_exp_capFootprintD_cba_regTerritorialD_pba_regImportsD_imp_regExportsD_exp_regsectorPopulation regions are inconsistent with IO regionsPopulation must be given for sector results per capitatitletitleRegionsbest') try: plt.tight_layout() except: pass if file_name: plt.savefig(file_name, dpi=file_dpi) return ax
def sing(a, b, c=False, name=): print(.format(a, b, c, name))
sing a song hehe :param a: I'm a :param b: I'm b :param c: I'm c :param name: I'm name
### Input: sing a song hehe :param a: I'm a :param b: I'm b :param c: I'm c :param name: I'm name ### Response: def sing(a, b, c=False, name=): print(.format(a, b, c, name))
def parse_v4_unit_placement(placement_str): placement = placement_str container = machine = service = unit = if in placement: try: container, placement = placement_str.split() except ValueError: msg = .format( placement_str) raise ValueError(msg.encode()) if in placement: try: placement, unit = placement.split() except ValueError: msg = .format( placement_str) raise ValueError(msg.encode()) if placement.isdigit() or placement == : machine = placement else: service = placement if (container and container not in VALID_CONTAINERS): msg = .format( container, placement_str) raise ValueError(msg.encode()) unit = _parse_unit(unit, placement_str) return UnitPlacement(container, machine, service, unit)
Return a UnitPlacement for bundles version 4, given a placement string. See https://github.com/juju/charmstore/blob/v4/docs/bundles.md Raise a ValueError if the placement is not valid.
### Input: Return a UnitPlacement for bundles version 4, given a placement string. See https://github.com/juju/charmstore/blob/v4/docs/bundles.md Raise a ValueError if the placement is not valid. ### Response: def parse_v4_unit_placement(placement_str): placement = placement_str container = machine = service = unit = if in placement: try: container, placement = placement_str.split() except ValueError: msg = .format( placement_str) raise ValueError(msg.encode()) if in placement: try: placement, unit = placement.split() except ValueError: msg = .format( placement_str) raise ValueError(msg.encode()) if placement.isdigit() or placement == : machine = placement else: service = placement if (container and container not in VALID_CONTAINERS): msg = .format( container, placement_str) raise ValueError(msg.encode()) unit = _parse_unit(unit, placement_str) return UnitPlacement(container, machine, service, unit)
def get_files(commit_only=True, copy_dest=None): "Get copies of files for analysis." if commit_only: real_files = bash( "git diff --cached --name-status | " "grep -v -E | " "awk " ).value().strip() else: real_files = bash( "git ls-tree --name-only --full-tree -r HEAD" ).value().strip() if real_files: return create_fake_copies(real_files.split(), copy_dest) return []
Get copies of files for analysis.
### Input: Get copies of files for analysis. ### Response: def get_files(commit_only=True, copy_dest=None): "Get copies of files for analysis." if commit_only: real_files = bash( "git diff --cached --name-status | " "grep -v -E | " "awk " ).value().strip() else: real_files = bash( "git ls-tree --name-only --full-tree -r HEAD" ).value().strip() if real_files: return create_fake_copies(real_files.split(), copy_dest) return []
def apt_add_key(keyid, keyserver=, log=False): if log: log_green( % (keyid, keyserver) ) with settings(hide(, , )): sudo( % (keyserver, keyid)) return True
trust a new PGP key related to a apt-repository
### Input: trust a new PGP key related to a apt-repository ### Response: def apt_add_key(keyid, keyserver=, log=False): if log: log_green( % (keyid, keyserver) ) with settings(hide(, , )): sudo( % (keyserver, keyid)) return True
def biquad(self, b, a): if not isinstance(b, list): raise ValueError() if not isinstance(a, list): raise ValueError() if len(b) != 3: raise ValueError() if len(a) != 3: raise ValueError() if not all([is_number(b_val) for b_val in b]): raise ValueError() if not all([is_number(a_val) for a_val in a]): raise ValueError() effect_args = [ , .format(b[0]), .format(b[1]), .format(b[2]), .format(a[0]), .format(a[1]), .format(a[2]) ] self.effects.extend(effect_args) self.effects_log.append() return self
Apply a biquad IIR filter with the given coefficients. Parameters ---------- b : list of floats Numerator coefficients. Must be length 3 a : list of floats Denominator coefficients. Must be length 3 See Also -------- fir, treble, bass, equalizer
### Input: Apply a biquad IIR filter with the given coefficients. Parameters ---------- b : list of floats Numerator coefficients. Must be length 3 a : list of floats Denominator coefficients. Must be length 3 See Also -------- fir, treble, bass, equalizer ### Response: def biquad(self, b, a): if not isinstance(b, list): raise ValueError() if not isinstance(a, list): raise ValueError() if len(b) != 3: raise ValueError() if len(a) != 3: raise ValueError() if not all([is_number(b_val) for b_val in b]): raise ValueError() if not all([is_number(a_val) for a_val in a]): raise ValueError() effect_args = [ , .format(b[0]), .format(b[1]), .format(b[2]), .format(a[0]), .format(a[1]), .format(a[2]) ] self.effects.extend(effect_args) self.effects_log.append() return self
def dvsep(s1, s2): assert len(s1) is 6 and len(s2) is 6 s1 = stypes.toDoubleVector(s1) s2 = stypes.toDoubleVector(s2) return libspice.dvsep_c(s1, s2)
Calculate the time derivative of the separation angle between two input states, S1 and S2. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvsep_c.html :param s1: State vector of the first body. :type s1: 6-Element Array of floats :param s2: State vector of the second body. :type s2: 6-Element Array of floats :return: The time derivative of the angular separation between S1 and S2. :rtype: float
### Input: Calculate the time derivative of the separation angle between two input states, S1 and S2. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvsep_c.html :param s1: State vector of the first body. :type s1: 6-Element Array of floats :param s2: State vector of the second body. :type s2: 6-Element Array of floats :return: The time derivative of the angular separation between S1 and S2. :rtype: float ### Response: def dvsep(s1, s2): assert len(s1) is 6 and len(s2) is 6 s1 = stypes.toDoubleVector(s1) s2 = stypes.toDoubleVector(s2) return libspice.dvsep_c(s1, s2)
def flush(self, queue_name): for _ in iter_queue(self.queues[queue_name]): self.queues[queue_name].task_done()
Drop all the messages from a queue. Parameters: queue_name(str): The queue to flush.
### Input: Drop all the messages from a queue. Parameters: queue_name(str): The queue to flush. ### Response: def flush(self, queue_name): for _ in iter_queue(self.queues[queue_name]): self.queues[queue_name].task_done()
def mod_watch(name, **kwargs): sfun = kwargs.pop(, None) mapfun = {: purged, : latest, : removed, : installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {: name, : {}, : .format(sfun), : False}
Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
### Input: Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ### Response: def mod_watch(name, **kwargs): sfun = kwargs.pop(, None) mapfun = {: purged, : latest, : removed, : installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {: name, : {}, : .format(sfun), : False}
def bucket_to_dataframe(name, buckets, append_name=None): expanded_buckets = [] for item in buckets: if type(item) is dict: single_dict = item else: single_dict = item.to_dict() single_dict[name] = single_dict.pop() if append_name: persistance_dict = single_dict.copy() for key in persistance_dict.keys(): single_dict[append_name + + key] = single_dict.pop(key) expanded_buckets.append(single_dict) return pd.DataFrame(expanded_buckets)
A function that turns elasticsearch aggregation buckets into dataframes :param name: The name of the bucket (will be a column in the dataframe) :type name: str :param bucket: a bucket from elasticsearch results :type bucket: list[dict] :returns: pandas.DataFrame
### Input: A function that turns elasticsearch aggregation buckets into dataframes :param name: The name of the bucket (will be a column in the dataframe) :type name: str :param bucket: a bucket from elasticsearch results :type bucket: list[dict] :returns: pandas.DataFrame ### Response: def bucket_to_dataframe(name, buckets, append_name=None): expanded_buckets = [] for item in buckets: if type(item) is dict: single_dict = item else: single_dict = item.to_dict() single_dict[name] = single_dict.pop() if append_name: persistance_dict = single_dict.copy() for key in persistance_dict.keys(): single_dict[append_name + + key] = single_dict.pop(key) expanded_buckets.append(single_dict) return pd.DataFrame(expanded_buckets)
def image_url(self, pixel_size=None): if "profile" not in self._raw: return profile = self._raw["profile"] if (pixel_size): img_key = "image_%s" % pixel_size if img_key in profile: return profile[img_key] return profile[self._DEFAULT_IMAGE_KEY]
Get the URL for the user icon in the desired pixel size, if it exists. If no size is supplied, give the URL for the full-size image.
### Input: Get the URL for the user icon in the desired pixel size, if it exists. If no size is supplied, give the URL for the full-size image. ### Response: def image_url(self, pixel_size=None): if "profile" not in self._raw: return profile = self._raw["profile"] if (pixel_size): img_key = "image_%s" % pixel_size if img_key in profile: return profile[img_key] return profile[self._DEFAULT_IMAGE_KEY]
def chebyshev_polynomial_coefficients(a, b, degree): if a >= b or a <= 0: raise ValueError( % (a, b)) std_roots = np.cos(np.pi * (np.arange(degree) + 0.5) / degree) scaled_roots = 0.5 * (b-a) * (1 + std_roots) + a scaled_poly = np.poly(scaled_roots) scaled_poly /= np.polyval(scaled_poly, 0) return scaled_poly
Chebyshev polynomial coefficients for the interval [a,b]. Parameters ---------- a,b : float The left and right endpoints of the interval. degree : int Degree of desired Chebyshev polynomial Returns ------- Coefficients of the Chebyshev polynomial C(t) with minimum magnitude on the interval [a,b] such that C(0) = 1.0. The coefficients are returned in descending order. Notes ----- a,b typically represent the interval of the spectrum for some matrix that you wish to damp with a Chebyshev smoother. Examples -------- >>> from pyamg.relaxation.chebyshev import chebyshev_polynomial_coefficients >>> print chebyshev_polynomial_coefficients(1.0,2.0, 3) [-0.32323232 1.45454545 -2.12121212 1. ]
### Input: Chebyshev polynomial coefficients for the interval [a,b]. Parameters ---------- a,b : float The left and right endpoints of the interval. degree : int Degree of desired Chebyshev polynomial Returns ------- Coefficients of the Chebyshev polynomial C(t) with minimum magnitude on the interval [a,b] such that C(0) = 1.0. The coefficients are returned in descending order. Notes ----- a,b typically represent the interval of the spectrum for some matrix that you wish to damp with a Chebyshev smoother. Examples -------- >>> from pyamg.relaxation.chebyshev import chebyshev_polynomial_coefficients >>> print chebyshev_polynomial_coefficients(1.0,2.0, 3) [-0.32323232 1.45454545 -2.12121212 1. ] ### Response: def chebyshev_polynomial_coefficients(a, b, degree): if a >= b or a <= 0: raise ValueError( % (a, b)) std_roots = np.cos(np.pi * (np.arange(degree) + 0.5) / degree) scaled_roots = 0.5 * (b-a) * (1 + std_roots) + a scaled_poly = np.poly(scaled_roots) scaled_poly /= np.polyval(scaled_poly, 0) return scaled_poly
def initialize_from_string(content: str) -> : if content is None or content is "": return None parts = content.split(":") min_x = 100000 max_x = 0 min_y = 100000 max_y = 0 symbol_name = parts[0] sequence = parts[1] image_numbers = parts[2].split() image_data = numpy.asarray(image_numbers, numpy.uint8).reshape((30, 30)) stroke = [] for point_string in sequence.split(";"): if point_string is "": continue point_x, point_y = point_string.split(",") x = float(point_x) y = float(point_y) stroke.append(SimplePoint2D(x, y)) max_x = max(max_x, x) min_x = min(min_x, x) max_y = max(max_y, y) min_y = min(min_y, y) dimensions = Rectangle(Point2D(min_x, min_y), int(max_x - min_x + 1), int(max_y - min_y + 1)) return CapitanSymbol(content, stroke, image_data, symbol_name, dimensions)
Create and initializes a new symbol from a string :param content: The content of a symbol as read from the text-file in the form <label>:<sequence>:<image> :return: The initialized symbol :rtype: CapitanSymbol
### Input: Create and initializes a new symbol from a string :param content: The content of a symbol as read from the text-file in the form <label>:<sequence>:<image> :return: The initialized symbol :rtype: CapitanSymbol ### Response: def initialize_from_string(content: str) -> : if content is None or content is "": return None parts = content.split(":") min_x = 100000 max_x = 0 min_y = 100000 max_y = 0 symbol_name = parts[0] sequence = parts[1] image_numbers = parts[2].split() image_data = numpy.asarray(image_numbers, numpy.uint8).reshape((30, 30)) stroke = [] for point_string in sequence.split(";"): if point_string is "": continue point_x, point_y = point_string.split(",") x = float(point_x) y = float(point_y) stroke.append(SimplePoint2D(x, y)) max_x = max(max_x, x) min_x = min(min_x, x) max_y = max(max_y, y) min_y = min(min_y, y) dimensions = Rectangle(Point2D(min_x, min_y), int(max_x - min_x + 1), int(max_y - min_y + 1)) return CapitanSymbol(content, stroke, image_data, symbol_name, dimensions)
def showDescription( self ): plugin = self.currentPlugin() if ( not plugin ): self.uiDescriptionTXT.setText() else: self.uiDescriptionTXT.setText(plugin.description())
Shows the description for the current plugin in the interface.
### Input: Shows the description for the current plugin in the interface. ### Response: def showDescription( self ): plugin = self.currentPlugin() if ( not plugin ): self.uiDescriptionTXT.setText() else: self.uiDescriptionTXT.setText(plugin.description())
def get_first_line(filepath, dialect): with open(filepath, "rb") as csvfile: csvreader = csv.reader(csvfile, dialect=dialect) for first_line in csvreader: break return first_line
Returns List of first line items of file filepath
### Input: Returns List of first line items of file filepath ### Response: def get_first_line(filepath, dialect): with open(filepath, "rb") as csvfile: csvreader = csv.reader(csvfile, dialect=dialect) for first_line in csvreader: break return first_line
def validlines(self): return [ln for ln in self.lines() if (not ln.isBroken() and not ln.ignoreMe)]
Return all lines within which Prosodic understood all words.
### Input: Return all lines within which Prosodic understood all words. ### Response: def validlines(self): return [ln for ln in self.lines() if (not ln.isBroken() and not ln.ignoreMe)]
def create_delete_model(record): data = cloudwatch.get_historical_base_info(record) tech_id = None arn = get_arn(tech_id, record[]) log.debug(.format(arn=arn)) data.update({ : {} }) items = list(Current{{cookiecutter.technology_slug | titlecase}}Model.query(arn, limit=1)) if items: model_dict = items[0].__dict__[].copy() model_dict.update(data) model = Current{{cookiecutter.technology_slug | titlecase }}Model(**model_dict) model.save() return model
Create a {{cookiecutter.technology_name}} model from a record.
### Input: Create a {{cookiecutter.technology_name}} model from a record. ### Response: def create_delete_model(record): data = cloudwatch.get_historical_base_info(record) tech_id = None arn = get_arn(tech_id, record[]) log.debug(.format(arn=arn)) data.update({ : {} }) items = list(Current{{cookiecutter.technology_slug | titlecase}}Model.query(arn, limit=1)) if items: model_dict = items[0].__dict__[].copy() model_dict.update(data) model = Current{{cookiecutter.technology_slug | titlecase }}Model(**model_dict) model.save() return model
def clean_params(params, drop_nones=True, recursive=True): cleaned = {} for key, value in six.iteritems(params): if drop_nones and value is None: continue if recursive and isinstance(value, dict): value = clean_params(value, drop_nones, recursive) cleaned[key] = value return cleaned
Clean up a dict of API parameters to be sent to the Coinbase API. Some endpoints require boolean options to be represented as integers. By default, will remove all keys whose value is None, so that they will not be sent to the API endpoint at all.
### Input: Clean up a dict of API parameters to be sent to the Coinbase API. Some endpoints require boolean options to be represented as integers. By default, will remove all keys whose value is None, so that they will not be sent to the API endpoint at all. ### Response: def clean_params(params, drop_nones=True, recursive=True): cleaned = {} for key, value in six.iteritems(params): if drop_nones and value is None: continue if recursive and isinstance(value, dict): value = clean_params(value, drop_nones, recursive) cleaned[key] = value return cleaned
def feather_links(self, factor=0.01, include_self=False): def feather_node(node): node_weight_sum = sum(l.weight for l in node.link_list) for original_link in node.link_list[:]: neighbor_node = original_link.target neighbor_weight = original_link.weight feather_weight = neighbor_weight / node_weight_sum neighbor_node_weight_sum = sum(l.weight for l in neighbor_node.link_list) for neighbor_link in neighbor_node.link_list: if (not include_self) and (neighbor_link.target == node): continue relative_link_weight = (neighbor_link.weight / neighbor_node_weight_sum) feathered_link_weight = round((relative_link_weight * feather_weight * factor), 2) node.add_link(neighbor_link.target, feathered_link_weight) for n in self.node_list: feather_node(n)
Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01
### Input: Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01 ### Response: def feather_links(self, factor=0.01, include_self=False): def feather_node(node): node_weight_sum = sum(l.weight for l in node.link_list) for original_link in node.link_list[:]: neighbor_node = original_link.target neighbor_weight = original_link.weight feather_weight = neighbor_weight / node_weight_sum neighbor_node_weight_sum = sum(l.weight for l in neighbor_node.link_list) for neighbor_link in neighbor_node.link_list: if (not include_self) and (neighbor_link.target == node): continue relative_link_weight = (neighbor_link.weight / neighbor_node_weight_sum) feathered_link_weight = round((relative_link_weight * feather_weight * factor), 2) node.add_link(neighbor_link.target, feathered_link_weight) for n in self.node_list: feather_node(n)
def _location_purge_all(delete=False, verbosity=0): if DataLocation.objects.exists(): for location in DataLocation.objects.filter(Q(purged=False) | Q(data=None)): location_purge(location.id, delete, verbosity) else: logger.info("No data locations")
Purge all data locations.
### Input: Purge all data locations. ### Response: def _location_purge_all(delete=False, verbosity=0): if DataLocation.objects.exists(): for location in DataLocation.objects.filter(Q(purged=False) | Q(data=None)): location_purge(location.id, delete, verbosity) else: logger.info("No data locations")
def of(cls, jcriterion, bigdl_type="float"): criterion = Criterion(bigdl_type, jcriterion) criterion.value = jcriterion criterion.bigdl_type = bigdl_type return criterion
Create a python Criterion by a java criterion object :param jcriterion: A java criterion object which created by Py4j :return: a criterion.
### Input: Create a python Criterion by a java criterion object :param jcriterion: A java criterion object which created by Py4j :return: a criterion. ### Response: def of(cls, jcriterion, bigdl_type="float"): criterion = Criterion(bigdl_type, jcriterion) criterion.value = jcriterion criterion.bigdl_type = bigdl_type return criterion
def get_logger(name, file_name=None, stream=None, template=None, propagate=False, level=None): logger = logging.getLogger(name) running_tests = ( in sys.argv or sys.argv[0].endswith()) if running_tests and not level: level = logging.CRITICAL if not level: level = logging.INFO logger.setLevel(level) logger.propagate = propagate formatter = logging.Formatter(template) if not stream: stream = sys.stdout logger.handlers = [] handler = logging.StreamHandler(stream=stream) handler.setFormatter(formatter) logger.addHandler(handler) if file_name: handler = logging.FileHandler(file_name) handler.setFormatter(logging.Formatter(+template)) logger.addHandler(handler) return logger
Get a logger by name.
### Input: Get a logger by name. ### Response: def get_logger(name, file_name=None, stream=None, template=None, propagate=False, level=None): logger = logging.getLogger(name) running_tests = ( in sys.argv or sys.argv[0].endswith()) if running_tests and not level: level = logging.CRITICAL if not level: level = logging.INFO logger.setLevel(level) logger.propagate = propagate formatter = logging.Formatter(template) if not stream: stream = sys.stdout logger.handlers = [] handler = logging.StreamHandler(stream=stream) handler.setFormatter(formatter) logger.addHandler(handler) if file_name: handler = logging.FileHandler(file_name) handler.setFormatter(logging.Formatter(+template)) logger.addHandler(handler) return logger
def solve(self, y, h, t_end): ts = [] ys = [] yi = y ti = 0.0 while ti < t_end: ts.append(ti) yi = self.step(yi, None, ti, h) ys.append(yi) ti += h return ts, ys
Given a function, initial conditions, step size and end value, this will calculate an unforced system. The default start time is t=0.0, but this can be changed. y - initial state h - step size n - stop time
### Input: Given a function, initial conditions, step size and end value, this will calculate an unforced system. The default start time is t=0.0, but this can be changed. y - initial state h - step size n - stop time ### Response: def solve(self, y, h, t_end): ts = [] ys = [] yi = y ti = 0.0 while ti < t_end: ts.append(ti) yi = self.step(yi, None, ti, h) ys.append(yi) ti += h return ts, ys