code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def read_energy_bounds(hdu): nebins = len(hdu.data) ebin_edges = np.ndarray((nebins + 1)) try: ebin_edges[0:-1] = np.log10(hdu.data.field("E_MIN")) - 3. ebin_edges[-1] = np.log10(hdu.data.field("E_MAX")[-1]) - 3. except KeyError: ebin_edges[0:-1] = np.log10(hdu.data.field("energy_MIN")) ebin_edges[-1] = np.log10(hdu.data.field("energy_MAX")[-1]) return ebin_edges
Reads and returns the energy bin edges from a FITs HDU
def impact_check_range(func): @wraps(func) def impact_wrapper(*args,**kwargs): if isinstance(args[1],numpy.ndarray): out= numpy.zeros(len(args[1])) goodIndx= (args[1] < args[0]._deltaAngleTrackImpact)*(args[1] > 0.) out[goodIndx]= func(args[0],args[1][goodIndx]) return out elif args[1] >= args[0]._deltaAngleTrackImpact or args[1] <= 0.: return 0. else: return func(*args,**kwargs) return impact_wrapper
Decorator to check the range of interpolated kicks
def get_image_size(self, image): if image['size'] is None: args = settings.THUMBNAIL_VIPSHEADER.split(' ') args.append(image['source']) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() m = size_re.match(str(p.stdout.read())) image['size'] = int(m.group('x')), int(m.group('y')) return image['size']
Returns the image width and height as a tuple
def get_as_parameters_with_default(self, key, default_value): result = self.get_as_nullable_parameters(key) return result if result != None else default_value
Converts map element into an Parameters or returns default value if conversion is not possible. :param key: a key of element to get. :param default_value: the default value :return: Parameters value of the element or default value if conversion is not supported.
def remove_listener(self, listener): internal_listener = self._internal_listeners.pop(listener) return self._client.remove_listener(internal_listener)
Remove the given listener from the wrapped client. :param listener: A listener previously passed to :meth:`add_listener`.
def get_narrow_url(self, instance): text = instance[0] request = self.context["request"] query_params = request.GET.copy() page_query_param = self.get_paginate_by_param() if page_query_param and page_query_param in query_params: del query_params[page_query_param] selected_facets = set(query_params.pop(self.root.facet_query_params_text, [])) selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text}) query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets)) path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()} url = request.build_absolute_uri(path) return serializers.Hyperlink(url, "narrow-url")
Return a link suitable for narrowing on the current item.
def also_restrict_to(self, restriction): if type(restriction) != list: restriction = [ restriction ] self._also_restriction = restriction
Works like restict_to but offers an additional restriction. Playbooks use this to implement serial behavior.
def _convert_json(obj): if isinstance(obj, dict): return {_convert_json(key): _convert_json(val) for (key, val) in six.iteritems(obj)} elif isinstance(obj, list) and len(obj) == 2: first = obj[0] second = obj[1] if first == 'set' and isinstance(second, list): return [_convert_json(elem) for elem in second] elif first == 'map' and isinstance(second, list): for elem in second: if not isinstance(elem, list) or len(elem) != 2: return obj return {elem[0]: _convert_json(elem[1]) for elem in second} else: return obj elif isinstance(obj, list): return [_convert_json(elem) for elem in obj] else: return obj
Converts from the JSON output provided by ovs-vsctl into a usable Python object tree. In particular, sets and maps are converted from lists to actual sets or maps. Args: obj: Object that shall be recursively converted. Returns: Converted version of object.
def ex_varassign(name, expr): if not isinstance(expr, ast.expr): expr = ex_literal(expr) return ast.Assign([ex_lvalue(name)], expr)
Assign an expression into a single variable. The expression may either be an `ast.expr` object or a value to be used as a literal.
def _interpret_ltude(value, name, psuffix, nsuffix): if not isinstance(value, str): return Angle(degrees=_unsexagesimalize(value)) value = value.strip().upper() if value.endswith(psuffix): sign = +1.0 elif value.endswith(nsuffix): sign = -1.0 else: raise ValueError('your {0} string {1!r} does not end with either {2!r}' ' or {3!r}'.format(name, value, psuffix, nsuffix)) try: value = float(value[:-1]) except ValueError: raise ValueError('your {0} string {1!r} cannot be parsed as a floating' ' point number'.format(name, value)) return Angle(degrees=sign * value)
Interpret a string, float, or tuple as a latitude or longitude angle. `value` - The string to interpret. `name` - 'latitude' or 'longitude', for use in exception messages. `positive` - The string that indicates a positive angle ('N' or 'E'). `negative` - The string that indicates a negative angle ('S' or 'W').
def aroon_up(data, period): catch_errors.check_for_period_error(data, period) period = int(period) a_up = [((period - list(reversed(data[idx+1-period:idx+1])).index(np.max(data[idx+1-period:idx+1]))) / float(period)) * 100 for idx in range(period-1, len(data))] a_up = fill_for_noncomputable_vals(data, a_up) return a_up
Aroon Up. Formula: AROONUP = (((PERIOD) - (PERIODS since PERIOD high)) / (PERIOD)) * 100
def _persist_metadata(self): serializable_data = self.get_serializable() try: self._try_persist_metadata(serializable_data) except TypeError: cleaned_data = Script._remove_non_serializable_store_entries(serializable_data["store"]) self._try_persist_metadata(cleaned_data)
Write all script meta-data, including the persistent script Store. The Store instance might contain arbitrary user data, like function objects, OpenCL contexts, or whatever other non-serializable objects, both as keys or values. Try to serialize the data, and if it fails, fall back to checking the store and removing all non-serializable data.
def get_transport_target(cls, instance, timeout, retries): if "ip_address" not in instance: raise Exception("An IP address needs to be specified") ip_address = instance["ip_address"] port = int(instance.get("port", 161)) return hlapi.UdpTransportTarget((ip_address, port), timeout=timeout, retries=retries)
Generate a Transport target object based on the instance's configuration
def parse_datetime(record: str) -> Optional[datetime]: format_strings = {8: '%Y%m%d', 12: '%Y%m%d%H%M', 14: '%Y%m%d%H%M%S'} if record == '': return None return datetime.strptime(record.strip(), format_strings[len(record.strip())])
Parse a datetime string into a python datetime object
def _start_lst_proc(self, listener_type, listener_opts): log.debug('Starting the listener process for %s', listener_type) listener = NapalmLogsListenerProc(self.opts, self.address, self.port, listener_type, listener_opts=listener_opts) proc = Process(target=listener.start) proc.start() proc.description = 'Listener process' log.debug('Started listener process as %s with PID %s', proc._name, proc.pid) return proc
Start the listener process.
def edit_matching_entry(program, arguments): entry = program.select_entry(*arguments) entry.context.execute("pass", "edit", entry.name)
Edit the matching entry.
def _generate_manager(manager_config): if 'class' not in manager_config: raise ValueError( 'Manager not fully specified. Give ' '"class:manager_name", e.g. "class:MongoDBManager".') mgr_class_name = manager_config['class'] if mgr_class_name.lower()[:5] == 'mongo': from datafs.managers.manager_mongo import ( MongoDBManager as mgr_class) elif mgr_class_name.lower()[:6] == 'dynamo': from datafs.managers.manager_dynamo import ( DynamoDBManager as mgr_class) else: raise KeyError( 'Manager class "{}" not recognized. Choose from {}'.format( mgr_class_name, 'MongoDBManager or DynamoDBManager')) manager = mgr_class( *manager_config.get('args', []), **manager_config.get('kwargs', {})) return manager
Generate a manager from a manager_config dictionary Parameters ---------- manager_config : dict Configuration with keys class, args, and kwargs used to generate a new datafs.manager object Returns ------- manager : object datafs.managers.MongoDBManager or datafs.managers.DynamoDBManager object initialized with *args, **kwargs Examples -------- Generate a dynamo manager: .. code-block:: python >>> mgr = APIConstructor._generate_manager({ ... 'class': 'DynamoDBManager', ... 'kwargs': { ... 'table_name': 'data-from-yaml', ... 'session_args': { ... 'aws_access_key_id': "access-key-id-of-your-choice", ... 'aws_secret_access_key': "secret-key-of-your-choice"}, ... 'resource_args': { ... 'endpoint_url':'http://localhost:8000/', ... 'region_name':'us-east-1'} ... } ... }) >>> >>> from datafs.managers.manager_dynamo import DynamoDBManager >>> assert isinstance(mgr, DynamoDBManager) >>> >>> 'data-from-yaml' in mgr.table_names False >>> mgr.create_archive_table('data-from-yaml') >>> 'data-from-yaml' in mgr.table_names True >>> mgr.delete_table('data-from-yaml')
def get_paths(self, theme, icon_size): _size_str = "x".join(map(str, icon_size)) theme_path = get_program_path() + "share" + os.sep + "icons" + os.sep icon_path = theme_path + theme + os.sep + _size_str + os.sep action_path = icon_path + "actions" + os.sep toggle_path = icon_path + "toggles" + os.sep return theme_path, icon_path, action_path, toggle_path
Returns tuple of theme, icon, action and toggle paths
def _handle_stderr_event(self, fd, events): assert fd == self.fd_stderr if events & self.ioloop.READ: if not self.headers_sent: payload = self.process.stderr.read() data = 'HTTP/1.1 500 Internal Server Error\r\nDate: %s\r\nContent-Length: %d\r\n\r\n' % (get_date_header(), len(payload)) self.headers_sent = True data += payload else: logger.error("This should not happen (stderr)") data = self.process.stderr.read() logger.debug('Sending stderr to client: %r', data) self.request.write(data) if events & self.ioloop.ERROR: logger.debug('Error on stderr') if not self.process.stderr.closed: self.process.stderr.close() self.ioloop.remove_handler(self.fd_stderr) return self._graceful_finish()
Eventhandler for stderr
def set_published_date(self): try: self.published_date = self.soup.find('pubdate').string except AttributeError: self.published_date = None
Parses published date and set value
def bluemix(cls, vcap_services, instance_name=None, service_name=None, **kwargs): service_name = service_name or 'cloudantNoSQLDB' try: service = CloudFoundryService(vcap_services, instance_name=instance_name, service_name=service_name) except CloudantException: raise CloudantClientException(103) if hasattr(service, 'iam_api_key'): return Cloudant.iam(service.username, service.iam_api_key, url=service.url, **kwargs) return Cloudant(service.username, service.password, url=service.url, **kwargs)
Create a Cloudant session using a VCAP_SERVICES environment variable. :param vcap_services: VCAP_SERVICES environment variable :type vcap_services: dict or str :param str instance_name: Optional Bluemix instance name. Only required if multiple Cloudant instances are available. :param str service_name: Optional Bluemix service name. Example usage: .. code-block:: python import os from cloudant.client import Cloudant client = Cloudant.bluemix(os.getenv('VCAP_SERVICES'), 'Cloudant NoSQL DB') print client.all_dbs()
def get_all_query_traces(self, max_wait_per=None, query_cl=ConsistencyLevel.LOCAL_ONE): if self._query_traces: return [self._get_query_trace(i, max_wait_per, query_cl) for i in range(len(self._query_traces))] return []
Fetches and returns the query traces for all query pages, if tracing was enabled. See note in :meth:`~.get_query_trace` regarding possible exceptions.
def intercept(actions: dict={}): for action in actions.values(): if type(action) is not returns and type(action) is not raises: raise InterceptorError('Actions must be declared as `returns` or `raises`') def decorated(f): def wrapped(*args, **kargs): try: return f(*args, **kargs) except Exception as e: if e.__class__ in actions: return actions[e.__class__](e) else: raise return wrapped return decorated
Decorates a function and handles any exceptions that may rise. Args: actions: A dictionary ``<exception type>: <action>``. Available actions\ are :class:`raises` and :class:`returns`. Returns: Any value declared using a :class:`returns` action. Raises: AnyException: if AnyException is declared together with a :class:`raises` action. InterceptorError: if the decorator is called with something different from a :class:`returns` or :class:`raises` action. Interceptors can be declared inline to return a value or raise an exception when the declared exception is risen: >>> @intercept({ ... TypeError: returns('intercepted!') ... }) ... def fails(foo): ... if foo: ... raise TypeError('inner exception') ... return 'ok' >>> fails(False) 'ok' >>> fails(True) 'intercepted!' >>> @intercept({ ... TypeError: raises(Exception('intercepted!')) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... Exception: intercepted! But they can also be declared and then used later on: >>> intercept0r = intercept({ ... TypeError: returns('intercepted!') ... }) >>> @intercept0r ... def fail(): ... raise TypeError('raising error') >>> fail() 'intercepted!' You can declare also an action that captures the risen exception by passing a callable to the action. This is useful to create a custom error message: >>> @intercept({ ... TypeError: returns(lambda e: 'intercepted {}'.format(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() 'intercepted inner exception' Or to convert captured exceptions into custom errors: >>> class CustomError(Exception): ... pass >>> @intercept({ ... TypeError: raises(lambda e: CustomError(e)) ... }) ... def fail(): ... raise TypeError('inner exception') >>> fail() Traceback (most recent call last): ... intercept.CustomError: inner exception
def clear_copyright(self): if (self.get_copyright_metadata().is_read_only() or self.get_copyright_metadata().is_required()): raise errors.NoAccess() self._my_map['copyright'] = dict(self._copyright_default)
Removes the copyright. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def _evaluate_dimension_fields(self) -> bool: for _, item in self._dimension_fields.items(): item.run_evaluate() if item.eval_error: return False return True
Evaluates the dimension fields. Returns False if any of the fields could not be evaluated.
def findLowest(self, symorders): _range = range(len(symorders)) stableSymorders = map(None, symorders, _range) stableSymorders.sort() lowest = None for index in _range: if stableSymorders[index][0] == lowest: return stableSymorders[index-1][1] lowest = stableSymorders[index][0] return -1
Find the position of the first lowest tie in a symorder or -1 if there are no ties
def vae(x, z_size, name=None): with tf.variable_scope(name, default_name="vae"): mu = tf.layers.dense(x, z_size, name="mu") log_sigma = tf.layers.dense(x, z_size, name="log_sigma") shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma
Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga.
def inv(self): self.v = 1/self.v tmp = self.v**2 if self.deriv > 1: self.dd[:] = tmp*(2*self.v*np.outer(self.d, self.d) - self.dd) if self.deriv > 0: self.d[:] = -tmp*self.d[:]
In place invert
def get_subgraph_by_edge_filter(graph, edge_predicates: Optional[EdgePredicates] = None): rv = graph.fresh_copy() expand_by_edge_filter(graph, rv, edge_predicates=edge_predicates) return rv
Induce a sub-graph on all edges that pass the given filters. :param pybel.BELGraph graph: A BEL graph :param edge_predicates: An edge predicate or list of edge predicates :return: A BEL sub-graph induced over the edges passing the given filters :rtype: pybel.BELGraph
def readAltWCS(fobj, ext, wcskey=' ', verbose=False): if isinstance(fobj, str): fobj = fits.open(fobj, memmap=False) hdr = altwcs._getheader(fobj, ext) try: original_logging_level = log.level log.setLevel(logutil.logging.WARNING) nwcs = pywcs.WCS(hdr, fobj=fobj, key=wcskey) except KeyError: if verbose: print('readAltWCS: Could not read WCS with key %s' % wcskey) print(' Skipping %s[%s]' % (fobj.filename(), str(ext))) return None finally: log.setLevel(original_logging_level) hwcs = nwcs.to_header() if nwcs.wcs.has_cd(): hwcs = altwcs.pc2cd(hwcs, key=wcskey) return hwcs
Reads in alternate primary WCS from specified extension. Parameters ---------- fobj : str, `astropy.io.fits.HDUList` fits filename or fits file object containing alternate/primary WCS(s) to be converted wcskey : str [" ",A-Z] alternate/primary WCS key that will be replaced by the new key ext : int fits extension number Returns ------- hdr: fits.Header header object with ONLY the keywords for specified alternate WCS
def _any_pandas_objects(terms): return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms)
Check a sequence of terms for instances of PandasObject.
def _get_batch_representative(items, key): if isinstance(items, dict): return items, items else: vals = set([]) out = [] for data in items: if key in data: vals.add(data[key]) out.append(data) if len(vals) != 1: raise ValueError("Incorrect values for %s: %s" % (key, list(vals))) return out[0], items
Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file.
def _is_valid_function(module_name, function): try: functions = __salt__['sys.list_functions'](module_name) except salt.exceptions.SaltException: functions = ["unable to look up functions"] return "{0}.{1}".format(module_name, function) in functions
Determine if a function is valid for a module
def _get_driver(self): ComputeEngine = get_driver(Provider.GCE) return ComputeEngine( self.service_account_email, self.service_account_file, project=self.service_account_project )
Get authenticated GCE driver.
def family_coff(self): if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FAMILY_COFF)
Return the family_coff attribute of the BFD file being processed.
def check_padding_around_mutation(given_padding, epitope_lengths): min_required_padding = max(epitope_lengths) - 1 if not given_padding: return min_required_padding else: require_integer(given_padding, "Padding around mutation") if given_padding < min_required_padding: raise ValueError( "Padding around mutation %d cannot be less than %d " "for epitope lengths %s" % ( given_padding, min_required_padding, epitope_lengths)) return given_padding
If user doesn't provide any padding around the mutation we need to at least include enough of the surrounding non-mutated esidues to construct candidate epitopes of the specified lengths.
def extract(path_to_hex, output_path=None): with open(path_to_hex, 'r') as hex_file: python_script = extract_script(hex_file.read()) if output_path: with open(output_path, 'w') as output_file: output_file.write(python_script) else: print(python_script)
Given a path_to_hex file this function will attempt to extract the embedded script from it and save it either to output_path or stdout
def remove_cycle_mrkr(self): window_start = self.parent.value('window_start') try: self.annot.remove_cycle_mrkr(window_start) except KeyError: msg = ('The start of the window does not correspond to any cycle ' 'marker in sleep scoring file') self.parent.statusBar().showMessage(msg) lg.debug(msg) else: lg.debug('User removed cycle marker at' + str(window_start)) self.parent.overview.update(reset=False) self.parent.overview.display_annotations()
Remove cycle marker.
def set_type(spec, obj_type): if spec is None: raise ValueError('Spec cannot be None') if TemplateFields.generation not in spec: spec[TemplateFields.generation] = {} spec[TemplateFields.generation][TemplateFields.commkey] = \ Gen.CLIENT if (obj_type & (int(1) << TemplateFields.FLAG_COMM_GEN)) > 0 else Gen.LEGACY_RANDOM spec[TemplateFields.generation][TemplateFields.appkey] = \ Gen.CLIENT if (obj_type & (int(1) << TemplateFields.FLAG_APP_GEN)) > 0 else Gen.LEGACY_RANDOM spec[TemplateFields.type] = "%x" % obj_type return spec
Updates type integer in the cerate UO specification. Type has to already have generations flags set correctly. Generation field is set accordingly. :param spec: :param obj_type: :return:
def load_from_file(filename): if os.path.isdir(filename): logger.error("Err: File '%s' is a directory", filename) return None if not os.path.isfile(filename): logger.error("Err: File '%s' does not exist", filename) return None try: with open(filename, 'r') as sourcefile: songs = [line.strip() for line in sourcefile] except IOError as error: logger.exception(error) return None songs = set(Song.from_filename(song) for song in songs) return songs.difference({None})
Load a list of filenames from an external text file.
def convert(self, value, view): if isinstance(value, int): return value elif isinstance(value, float): return int(value) else: self.fail(u'must be a number', view, True)
Check that the value is an integer. Floats are rounded.
def build_full_toctree(builder, docname, prune, collapse): env = builder.env doctree = env.get_doctree(env.config.master_doc) toctrees = [] for toctreenode in doctree.traverse(addnodes.toctree): toctree = env.resolve_toctree(docname, builder, toctreenode, collapse=collapse, prune=prune, ) toctrees.append(toctree) if not toctrees: return None result = toctrees[0] for toctree in toctrees[1:]: if toctree: result.extend(toctree.children) env.resolve_references(result, docname, builder) return result
Return a single toctree starting from docname containing all sub-document doctrees.
def rename(self, name): if name: rename1, rename2 = callbacks.add( b'rename', self.change_name, False) self.dispatch_command(b'/bin/echo "' + rename1 + b'""' + rename2 + b'"' + name + b'\n') else: self.change_name(self.hostname.encode())
Send to the remote shell, its new name to be shell expanded
def expand_dataset(X, y_proba, factor=10, random_state=None, extra_arrays=None): rng = check_random_state(random_state) extra_arrays = extra_arrays or [] n_classes = y_proba.shape[1] classes = np.arange(n_classes, dtype=int) for el in zip(X, y_proba, *extra_arrays): x, probs = el[0:2] rest = el[2:] for label in rng.choice(classes, size=factor, p=probs): yield (x, label) + rest
Convert a dataset with float multiclass probabilities to a dataset with indicator probabilities by duplicating X rows and sampling true labels.
def schema(self): if not self._schema: try: self._load_info() self._schema = _schema.Schema(self._info['schema']['fields']) except KeyError: raise Exception('Unexpected table response: missing schema') return self._schema
Retrieves the schema of the table. Returns: A Schema object containing a list of schema fields and associated metadata. Raises Exception if the request could not be executed or the response was malformed.
def patch_sys_version(): if '|' in sys.version: sys_version = sys.version.split('|') sys.version = ' '.join([sys_version[0].strip(), sys_version[-1].strip()])
Remove Continuum copyright statement to avoid parsing errors in IDLE
def is_installed(self, name: str) -> bool: assert name is not None try: self.__docker.images.get(name) return True except docker.errors.ImageNotFound: return False
Indicates a given Docker image is installed on this server. Parameters: name: the name of the Docker image. Returns: `True` if installed; `False` if not.
def _get_by_index(self, index): volume_or_disk = self.parser.get_by_index(index) volume, disk = (volume_or_disk, None) if not isinstance(volume_or_disk, Disk) else (None, volume_or_disk) return volume, disk
Returns a volume,disk tuple for the specified index
def start(self): self._timer = Timer(self.time, self.handler) self._timer.daemon = True self._timer.start() return
Starts the watchdog timer.
def redirect_to(request, url, permanent=True, query_string=False, **kwargs): r args = request.META.get('QUERY_STRING', '') if url is not None: if kwargs: url = url % kwargs if args and query_string: url = "%s?%s" % (url, args) klass = (permanent and HttpResponsePermanentRedirect or HttpResponseRedirect) return klass(url) else: logger.warning( 'Gone: %s', request.path, extra={ 'status_code': 410, 'request': request }) return HttpResponseGone()
r""" Redirect to a given URL. The given url may contain dict-style string formatting, which will be interpolated against the params in the URL. For example, to redirect from ``/foo/<id>/`` to ``/bar/<id>/``, you could use the following URLconf:: urlpatterns = patterns('', (r'^foo/(?P<id>\d+)/$', 'django.views.generic.simple.redirect_to', {'url' : '/bar/%(id)s/'}), ) If the given url is ``None``, a HttpResponseGone (410) will be issued. If the ``permanent`` argument is False, then the response will have a 302 HTTP status code. Otherwise, the status code will be 301. If the ``query_string`` argument is True, then the GET query string from the request is appended to the URL.
def update_content_encoding(self, data: Any) -> None: if not data: return enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower() if enc: if self.compress: raise ValueError( 'compress can not be set ' 'if Content-Encoding header is set') elif self.compress: if not isinstance(self.compress, str): self.compress = 'deflate' self.headers[hdrs.CONTENT_ENCODING] = self.compress self.chunked = True
Set request content encoding.
def find_op_code_sequence(pattern: list, instruction_list: list) -> Generator: for i in range(0, len(instruction_list) - len(pattern) + 1): if is_sequence_match(pattern, instruction_list, i): yield i
Returns all indices in instruction_list that point to instruction sequences following a pattern. :param pattern: The pattern to look for, e.g. [["PUSH1", "PUSH2"], ["EQ"]] where ["PUSH1", "EQ"] satisfies pattern :param instruction_list: List of instructions to look in :return: Indices to the instruction sequences
def detailed_tokens(tokenizer, text): node = tokenizer.parseToNode(text) node = node.next words = [] while node.posid != 0: surface = node.surface base = surface parts = node.feature.split(",") pos = ",".join(parts[0:4]) if len(parts) > 7: base = parts[7] words.append(ShortUnitWord(surface, base, pos)) node = node.next return words
Format Mecab output into a nice data structure, based on Janome.
def SamplingRoundAddedEventHandler(instance, event): if instance.portal_type != "SamplingRound": print("How does this happen: type is %s should be SamplingRound" % instance.portal_type) return renameAfterCreation(instance) num_art = len(instance.ar_templates) destination_url = instance.aq_parent.absolute_url() + \ "/portal_factory/" + \ "AnalysisRequest/Request new analyses/ar_add?samplinground=" + \ instance.UID() + "&ar_count=" + str(num_art) request = getattr(instance, 'REQUEST', None) request.response.redirect(destination_url)
Event fired when BikaSetup object gets modified. Since Sampling Round is a dexterity object we have to change the ID by "hand" Then we have to redirect the user to the ar add form
def forward(self, inputs, label, begin_state, sampled_values): encoded = self.embedding(inputs) length = inputs.shape[0] batch_size = inputs.shape[1] encoded, out_states = self.encoder.unroll(length, encoded, begin_state, layout='TNC', merge_outputs=True) out, new_target = self.decoder(encoded, sampled_values, label) out = out.reshape((length, batch_size, -1)) new_target = new_target.reshape((length, batch_size)) return out, out_states, new_target
Defines the forward computation. Parameters ----------- inputs : NDArray input tensor with shape `(sequence_length, batch_size)` when `layout` is "TNC". begin_state : list initial recurrent state tensor with length equals to num_layers*2. For each layer the two initial states have shape `(batch_size, num_hidden)` and `(batch_size, num_projection)` sampled_values : list a list of three tensors for `sampled_classes` with shape `(num_samples,)`, `expected_count_sampled` with shape `(num_samples,)`, and `expected_count_true` with shape `(sequence_length, batch_size)`. Returns -------- out : NDArray output tensor with shape `(sequence_length, batch_size, 1+num_samples)` when `layout` is "TNC". out_states : list output recurrent state tensor with length equals to num_layers*2. For each layer the two initial states have shape `(batch_size, num_hidden)` and `(batch_size, num_projection)` new_target : NDArray output tensor with shape `(sequence_length, batch_size)` when `layout` is "TNC".
def network_undefine(name, **kwargs): conn = __get_conn(**kwargs) try: net = conn.networkLookupByName(name) return not bool(net.undefine()) finally: conn.close()
Remove a defined virtual network. This does not stop the virtual network. :param name: virtual network name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.network_undefine default
def get_path(self): md5_hash = hashlib.md5(self.task_id.encode()).hexdigest() logger.debug('Hash %s corresponds to task %s', md5_hash, self.task_id) return os.path.join(self.temp_dir, str(self.unique.value), md5_hash)
Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments
def queue_command(self, command): if self._running: QtCore.QCoreApplication.postEvent( self, ActionEvent(command), QtCore.Qt.LowEventPriority) else: self._incoming.append(command)
Put a command on the queue to be called in the component's thread. :param callable command: the method to be invoked, e.g. :py:meth:`~Component.new_frame_event`.
def _read_with_mask(raster, masked): if masked is None: mask_flags = raster.mask_flag_enums per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags]) masked = per_dataset_mask return masked
returns if we should read from rasterio using the masked
def parse_date(date: str, hour_threshold: int = 200): date = date.strip('Z') if len(date) == 4: date += '00' if not (len(date) == 6 and date.isdigit()): return now = datetime.utcnow() guess = now.replace(day=int(date[0:2]), hour=int(date[2:4]) % 24, minute=int(date[4:6]) % 60, second=0, microsecond=0) hourdiff = (guess - now) / timedelta(minutes=1) / 60 if hourdiff > hour_threshold: guess += relativedelta(months=-1) elif hourdiff < -hour_threshold: guess += relativedelta(months=+1) return guess
Parses a report timestamp in ddhhZ or ddhhmmZ format This function assumes the given timestamp is within the hour threshold from current date
def _unicode_sub_super(string, mapping, max_len=None): string = str(string) if string.startswith('(') and string.endswith(')'): len_string = len(string) - 2 else: len_string = len(string) if max_len is not None: if len_string > max_len: raise KeyError("max_len exceeded") unicode_letters = [] for letter in string: unicode_letters.append(mapping[letter]) return ''.join(unicode_letters)
Try to render a subscript or superscript string in unicode, fall back on ascii if this is not possible
def function_to_serializable_representation(fn): if type(fn) not in (FunctionType, BuiltinFunctionType): raise ValueError( "Can't serialize %s : %s, must be globally defined function" % ( fn, type(fn),)) if hasattr(fn, "__closure__") and fn.__closure__ is not None: raise ValueError("No serializable representation for closure %s" % (fn,)) return {"__module__": get_module_name(fn), "__name__": fn.__name__}
Converts a Python function into a serializable representation. Does not currently work for methods or functions with closure data.
def weighting(self, landscape=None): if landscape is not None: if len(landscape) > 0: maxy = np.max(landscape[:, 1]) else: maxy = 1 def linear(interval): d = interval[1] return (1 / maxy) * d if landscape is not None else d def pw_linear(interval): t = interval[1] b = maxy / self.ny if t <= 0: return 0 if 0 < t < b: return t / b if b <= t: return 1 return linear
Define a weighting function, for stability results to hold, the function must be 0 at y=0.
def from_name(cls, name): result = cls.list({'items_per_page': 500}) webaccs = {} for webacc in result: webaccs[webacc['name']] = webacc['id'] return webaccs.get(name)
Retrieve webacc id associated to a webacc name.
def set_config(self, config): if self.config is None: self.config = { } self.config.update(config_to_api_list(config))
Set the service configuration. @param config: A dictionary of config key/value
def tnet_to_nx(df, t=None): if t is not None: df = get_network_when(df, t=t) if 'weight' in df.columns: nxobj = nx.from_pandas_edgelist( df, source='i', target='j', edge_attr='weight') else: nxobj = nx.from_pandas_edgelist(df, source='i', target='j') return nxobj
Creates undirected networkx object
def get_substances(identifier, namespace='sid', as_dataframe=False, **kwargs): results = get_json(identifier, namespace, 'substance', **kwargs) substances = [Substance(r) for r in results['PC_Substances']] if results else [] if as_dataframe: return substances_to_frame(substances) return substances
Retrieve the specified substance records from PubChem. :param identifier: The substance identifier to use as a search query. :param namespace: (optional) The identifier type, one of sid, name or sourceid/<source name>. :param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Substance` properties into a pandas :class:`~pandas.DataFrame` and return that.
def _ConvertAttributeContainerToDict(cls, attribute_container): if not isinstance( attribute_container, containers_interface.AttributeContainer): raise TypeError('{0:s} is not an attribute container type.'.format( type(attribute_container))) container_type = getattr(attribute_container, 'CONTAINER_TYPE', None) if not container_type: raise ValueError('Unsupported attribute container type: {0:s}.'.format( type(attribute_container))) json_dict = { '__type__': 'AttributeContainer', '__container_type__': container_type, } for attribute_name, attribute_value in attribute_container.GetAttributes(): json_dict[attribute_name] = cls._ConvertAttributeValueToDict( attribute_value) return json_dict
Converts an attribute container object into a JSON dictionary. The resulting dictionary of the JSON serialized objects consists of: { '__type__': 'AttributeContainer' '__container_type__': ... ... } Here '__type__' indicates the object base type. In this case 'AttributeContainer'. '__container_type__' indicates the container type and rest of the elements of the dictionary make up the attributes of the container. Args: attribute_container (AttributeContainer): attribute container. Returns: dict[str, object]: JSON serialized objects. Raises: TypeError: if not an instance of AttributeContainer. ValueError: if the attribute container type is not supported.
def burst_range(psd, snr=8, energy=1e-2, fmin=100, fmax=500): freqs = psd.frequencies.value if not fmin: fmin = psd.f0 if not fmax: fmax = psd.span[1] condition = (freqs >= fmin) & (freqs < fmax) integrand = burst_range_spectrum( psd[condition], snr=snr, energy=energy) ** 3 result = integrate.trapz(integrand.value, freqs[condition]) r = units.Quantity(result / (fmax - fmin), unit=integrand.unit) ** (1/3.) return r.to('Mpc')
Calculate the integrated GRB-like GW burst range from a strain PSD Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: ``8`` energy : `float`, optional the relative energy output of the GW burst, defaults to ``1e-2`` for a GRB-like burst fmin : `float`, optional the lower frequency cutoff of the burst range integral, default: ``100 Hz`` fmax : `float`, optional the upper frequency cutoff of the burst range integral, default: ``500 Hz`` Returns ------- range : `~astropy.units.Quantity` the GRB-like-burst sensitive range [Mpc (default)] Examples -------- Grab some data for LIGO-Livingston around GW150914 and generate a PSD >>> from gwpy.timeseries import TimeSeries >>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> hoff = hoft.psd(fftlength=4) Now we can calculate the :func:`burst_range`: >>> from gwpy.astro import burst_range >>> r = burst_range(hoff, fmin=30) >>> print(r) 42.5055584195 Mpc
def int_dp_g(arr, dp): return integrate(arr, to_pascal(dp, is_dp=True), vert_coord_name(dp)) / GRAV_EARTH
Mass weighted integral.
def add_nodes(self, coors, node_low_or_high=None): last = self.lastnode if type(coors) is nm.ndarray: if len(coors.shape) == 1: coors = coors.reshape((1, coors.size)) nadd = coors.shape[0] idx = slice(last, last + nadd) else: nadd = 1 idx = self.lastnode right_dimension = coors.shape[1] self.nodes[idx, :right_dimension] = coors self.node_flag[idx] = True self.lastnode += nadd self.nnodes += nadd
Add new nodes at the end of the list.
def dropout_mask(x:Tensor, sz:Collection[int], p:float): "Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element." return x.new(*sz).bernoulli_(1-p).div_(1-p)
Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element.
def declare_config_variable(self, name, config_id, type_name, default=None, convert=None): config = ConfigDescriptor(config_id, type_name, default, name=name, python_type=convert) self._config_variables[config_id] = config
Declare a config variable that this emulated tile accepts. The default value (if passed) may be specified as either a `bytes` object or a python int or list of ints. If an int or list of ints is passed, it is converted to binary. Otherwise, the raw binary data is used. Passing a unicode string is only allowed if as_string is True and it will be encoded as utf-8 and null terminated for use as a default value. Args: name (str): A user friendly name for this config variable so that it can be printed nicely. config_id (int): A 16-bit integer id number to identify the config variable. type_name (str): An encoded type name that will be parsed by parse_size_name() default (object): The default value if there is one. This should be a python object that will be converted to binary according to the rules for the config variable type specified in type_name. convert (str): whether this variable should be converted to a python string or bool rather than an int or a list of ints. You can pass either 'bool', 'string' or None
def attr_to_path(node): def get_intrinsic_path(modules, attr): if isinstance(attr, ast.Name): return modules[demangle(attr.id)], (demangle(attr.id),) elif isinstance(attr, ast.Attribute): module, path = get_intrinsic_path(modules, attr.value) return module[attr.attr], path + (attr.attr,) obj, path = get_intrinsic_path(MODULES, node) if not obj.isliteral(): path = path[:-1] + ('functor', path[-1]) return obj, ('pythonic', ) + path
Compute path and final object for an attribute node
def get_authorizations_by_ids(self, authorization_ids): collection = JSONClientValidated('authorization', collection='Authorization', runtime=self._runtime) object_id_list = [] for i in authorization_ids: object_id_list.append(ObjectId(self._get_id(i, 'authorization').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.AuthorizationList(sorted_result, runtime=self._runtime, proxy=self._proxy)
Gets an ``AuthorizationList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the authorizations specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Authorizations`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: authorization_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.authorization.AuthorizationList) - the returned ``Authorization list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``authorization_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def has_scope(context=None): if not booted(context): return False _sd_version = version(context) if _sd_version is None: return False return _sd_version >= 205
Scopes were introduced in systemd 205, this function returns a boolean which is true when the minion is systemd-booted and running systemd>=205.
def read(self, document, iface, *args, **kwargs): try: document = IReadableDocument(document) mime_type = document.mime_type reader = self.lookup_reader(mime_type, iface) if not reader: msg = ("No adapter found to read object %s from %s document" % (iface.__class__.__name__, mime_type)) raise NoReaderFoundError(msg) return reader.read(document, *args, **kwargs) except: return defer.fail(Failure())
Returns a Deferred that fire the read object.
def AgregarReceptor(self, cuit, iibb, nro_socio, nro_fet, **kwargs): "Agrego un receptor a la liq." rcpt = dict(cuit=cuit, iibb=iibb, nroSocio=nro_socio, nroFET=nro_fet) self.solicitud['receptor'] = rcpt return True
Agrego un receptor a la liq.
def strip_HETATMs(self, only_strip_these_chains = []): if only_strip_these_chains: self.lines = [l for l in self.lines if not(l.startswith('HETATM')) or l[21] not in only_strip_these_chains] else: self.lines = [l for l in self.lines if not(l.startswith('HETATM'))] self._update_structure_lines()
Throw away all HETATM lines. If only_strip_these_chains is specified then only strip HETATMs lines for those chains.
def contact_methods(self, **kwargs): endpoint = '{0}/{1}/contact_methods'.format( self.endpoint, self['id'], ) result = self.request('GET', endpoint=endpoint, query_params=kwargs) return result['contact_methods']
Get all contact methods for this user.
def transform(self, X=None, y=None): zoom_x, zoom_y= self.zoom self.params = (zoom_x, zoom_y) zoom_matrix = np.array([[zoom_x, 0, 0], [0, zoom_y, 0]]) self.tx.set_parameters(zoom_matrix) if self.lazy or X is None: return self.tx else: return self.tx.apply_to_image(X, reference=self.reference)
Transform an image using an Affine transform with the given zoom parameters. Return the transform if X=None. Arguments --------- X : ANTsImage Image to transform y : ANTsImage (optional) Another image to transform Returns ------- ANTsImage if y is None, else a tuple of ANTsImage types Examples -------- >>> import ants >>> img = ants.image_read(ants.get_data('r16')) >>> tx = ants.contrib.Zoom2D(zoom=(0.8,0.8,0.8)) >>> img2 = tx.transform(img)
def issequence(arg): string_behaviour = ( isinstance(arg, six.string_types) or isinstance(arg, six.text_type)) list_behaviour = hasattr(arg, '__getitem__') or hasattr(arg, '__iter__') return not string_behaviour and list_behaviour
Return True if `arg` acts as a list and does not look like a string.
def put_name(self, type_, id_, name): cachefile = self.filename(type_, id_) dirname = os.path.dirname(cachefile) try: os.makedirs(dirname) except OSError as e: if e.errno != errno.EEXIST: raise with open(cachefile, 'w') as f: f.write(name)
Write a cached name to disk. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :returns: None
def get_ip6_address(interface_name, expand=False): address = _get_address(interface_name, IP6_PATTERN) if address and expand: return ':'.join(_expand_groups(address)) return address
Extracts the IPv6 address for a particular interface from `ifconfig`. :param interface_name: Name of the network interface (e.g. ``eth0``). :type interface_name: unicode :param expand: If set to ``True``, an abbreviated address is expanded to the full address. :type expand: bool :return: IPv6 address; ``None`` if the interface is present but no address could be extracted. :rtype: unicode
def close(self): if self._socket is not None and self._conn is not None: message_input = UnityMessage() message_input.header.status = 400 self._communicator_send(message_input.SerializeToString()) if self._socket is not None: self._socket.close() self._socket = None if self._socket is not None: self._conn.close() self._conn = None
Sends a shutdown signal to the unity environment, and closes the socket connection.
def get_remote_file(self, remote_path, local_path): sftp_client = self.transport.open_sftp_client() LOG.debug('Get the remote file. ' 'Source=%(src)s. Target=%(target)s.' % {'src': remote_path, 'target': local_path}) try: sftp_client.get(remote_path, local_path) except Exception as ex: LOG.error('Failed to secure copy. Reason: %s.' % six.text_type(ex)) raise SFtpExecutionError(err=ex)
Fetch remote File. :param remote_path: remote path :param local_path: local path
def GetAll(alias=None,location=None,session=None): if not alias: alias = clc.v2.Account.GetAlias(session=session) policies = [] policy_resp = clc.v2.API.Call('GET','antiAffinityPolicies/%s' % alias,{},session=session) for k in policy_resp: r_val = policy_resp[k] for r in r_val: if r.get('location'): if location and r['location'].lower()!=location.lower(): continue servers = [obj['id'] for obj in r['links'] if obj['rel'] == "server"] policies.append(AntiAffinity(id=r['id'],name=r['name'],location=r['location'],servers=servers,session=session)) return(policies)
Gets a list of anti-affinity policies within a given account. https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies >>> clc.v2.AntiAffinity.GetAll() [<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>]
def generateCertificate(self, alias, commonName, organizationalUnit, city, state, country, keyalg="RSA", keysize=1024, sigalg="SHA256withRSA", validity=90 ): params = {"f" : "json", "alias" : alias, "commonName" : commonName, "organizationalUnit" : organizationalUnit, "city" : city, "state" : state, "country" : country, "keyalg" : keyalg, "keysize" : keysize, "sigalg" : sigalg, "validity" : validity } url = self._url + "/SSLCertificate/ generateCertificate" return self._post(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
Use this operation to create a self-signed certificate or as a starting point for getting a production-ready CA-signed certificate. The portal will generate a certificate for you and store it in its keystore.
def _fetch_dimensions(self, dataset): yield Dimension(u"school") yield Dimension(u"year", datatype="year") yield Dimension(u"semester", datatype="academic_term", dialect="swedish") yield Dimension(u"municipality", datatype="year", domain="sweden/municipalities")
Iterate through semesters, counties and municipalities.
def _handle_config(self, data): self.room.config.update(data) self.conn.enqueue_data("config", data)
Handle initial config push and config changes
def pkg_supports(feature, pkg_version, pkg_feat_dict): from pkg_resources import parse_requirements feature = str(feature) pkg_version = str(pkg_version) supp_versions = pkg_feat_dict.get(feature, None) if supp_versions is None: return False if is_string(supp_versions): supp_versions = [supp_versions] ver_specs = ['pkg' + supp_ver for supp_ver in supp_versions] ver_reqs = [list(parse_requirements(ver_spec))[0] for ver_spec in ver_specs] for req in ver_reqs: if req.specifier.contains(pkg_version, prereleases=True): return True return False
Return bool indicating whether a package supports ``feature``. Parameters ---------- feature : str Name of a potential feature of a package. pkg_version : str Version of the package that should be checked for presence of the feature. pkg_feat_dict : dict Specification of features of a package. Each item has the following form:: feature_name: version_specification Here, ``feature_name`` is a string that is matched against ``feature``, and ``version_specification`` is a string or a sequence of strings that specifies version sets. These specifications are the same as for ``setuptools`` requirements, just without the package name. A ``None`` entry signals "no support in any version", i.e., always ``False``. If a sequence of requirements are given, they are OR-ed together. See ``Examples`` for details. Returns ------- supports : bool ``True`` if ``pkg_version`` of the package in question supports ``feature``, ``False`` otherwise. Examples -------- >>> feat_dict = { ... 'feat1': '==0.5.1', ... 'feat2': '>0.6, <=0.9', # both required simultaneously ... 'feat3': ['>0.6', '<=0.9'], # only one required, i.e. always True ... 'feat4': ['==0.5.1', '>0.6, <=0.9'], ... 'feat5': None ... } >>> pkg_supports('feat1', '0.5.1', feat_dict) True >>> pkg_supports('feat1', '0.4', feat_dict) False >>> pkg_supports('feat2', '0.5.1', feat_dict) False >>> pkg_supports('feat2', '0.6.1', feat_dict) True >>> pkg_supports('feat2', '0.9', feat_dict) True >>> pkg_supports('feat2', '1.0', feat_dict) False >>> pkg_supports('feat3', '0.4', feat_dict) True >>> pkg_supports('feat3', '1.0', feat_dict) True >>> pkg_supports('feat4', '0.5.1', feat_dict) True >>> pkg_supports('feat4', '0.6', feat_dict) False >>> pkg_supports('feat4', '0.6.1', feat_dict) True >>> pkg_supports('feat4', '1.0', feat_dict) False >>> pkg_supports('feat5', '0.6.1', feat_dict) False >>> pkg_supports('feat5', '1.0', feat_dict) False
def configured_class(cls): base = cls.configurable_base() if base.__dict__.get('_Configurable__impl_class') is None: base.__impl_class = cls.configurable_default() return base.__impl_class
Returns the currently configured class.
def _start_srv_proc(self, started_os_proc): log.debug('Starting the server process') server = NapalmLogsServerProc(self.opts, self.config_dict, started_os_proc, buffer=self._buffer) proc = Process(target=server.start) proc.start() proc.description = 'Server process' log.debug('Started server process as %s with PID %s', proc._name, proc.pid) return proc
Start the server process.
def progress_patch(self, _=False): from .progress import ShellProgressView self.cli_ctx.progress_controller.init_progress(ShellProgressView()) return self.cli_ctx.progress_controller
forces to use the Shell Progress
def _threaded_copy_data(instream, outstream): copy_thread = threading.Thread(target=_copy_data, args=(instream, outstream)) copy_thread.setDaemon(True) log.debug('%r, %r, %r', copy_thread, instream, outstream) copy_thread.start() return copy_thread
Copy data from one stream to another in a separate thread. Wraps ``_copy_data()`` in a :class:`threading.Thread`. :type instream: :class:`io.BytesIO` or :class:`io.StringIO` :param instream: A byte stream to read from. :param file outstream: The file descriptor of a tmpfile to write to.
def _get_params(self): return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))
return the value of the parameters.
def handle_onchain_secretreveal( initiator_state: InitiatorTransferState, state_change: ContractReceiveSecretReveal, channel_state: NettingChannelState, pseudo_random_generator: random.Random, ) -> TransitionResult[InitiatorTransferState]: iteration: TransitionResult[InitiatorTransferState] secret = state_change.secret secrethash = initiator_state.transfer_description.secrethash is_valid_secret = is_valid_secret_reveal( state_change=state_change, transfer_secrethash=secrethash, secret=secret, ) is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED is_lock_expired = state_change.block_number > initiator_state.transfer.lock.expiration is_lock_unlocked = ( is_valid_secret and not is_lock_expired ) if is_lock_unlocked: channel.register_onchain_secret( channel_state=channel_state, secret=secret, secrethash=secrethash, secret_reveal_block_number=state_change.block_number, ) if is_lock_unlocked and is_channel_open: events = events_for_unlock_lock( initiator_state, channel_state, state_change.secret, state_change.secrethash, pseudo_random_generator, ) iteration = TransitionResult(None, events) else: events = list() iteration = TransitionResult(initiator_state, events) return iteration
When a secret is revealed on-chain all nodes learn the secret. This check the on-chain secret corresponds to the one used by the initiator, and if valid a new balance proof is sent to the next hop with the current lock removed from the merkle tree and the transferred amount updated.
def stop(self): self.logger.info('Stopping client fuzzer') self._target_control_thread.stop() self.target.signal_mutated() super(ClientFuzzer, self).stop()
Stop the fuzzing session
def get_feed_renderer(engines, name): if name not in engines: raise FeedparserError("Given feed name '{}' does not exists in 'settings.FEED_RENDER_ENGINES'".format(name)) renderer = safe_import_module(engines[name]) return renderer
From engine name, load the engine path and return the renderer class Raise 'FeedparserError' if any loading error
def remove_files(self): file_list = ["molecule.svg","lig.pdb","HIS.pdb","PHE.pdb","TRP.pdb","TYR.pdb","lig.mol","test.xtc"] for residue in self.topol_data.dict_of_plotted_res.keys(): file_list.append(residue[1]+residue[2]+".svg") for f in file_list: if os.path.isfile(f)==True: os.remove(f)
Removes intermediate files.