code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def image(self,path_img): im_open = Image.open(path_img) im = im_open.convert("RGB") pix_line, img_size = self._convert_image(im) self._print_image(pix_line, img_size)
Open image file
def result(retn): ok, valu = retn if ok: return valu name, info = valu ctor = getattr(s_exc, name, None) if ctor is not None: raise ctor(**info) info[] = name raise s_exc.SynErr(**info)
Return a value or raise an exception from a retn tuple.
def getRowByIndex(self, index): assert isinstance(index, int) return Row(self._impl.getRowByIndex(index))
Get row by numeric index. Args: index: Zero-based index of the row to get. Returns: The corresponding row.
def main(argString=None): args = parseArgs(argString) checkArgs(args) logger.info("Options used:") for key, value in vars(args).iteritems(): logger.info(" --{} {}".format(key.replace("_", "-"), value)) logger.info("Processing the TPED and TFAM file") processTPEDandTFAM(args.tfile + ".tped", args.tfile + ".tfam", args.out)
The main function of the module. :param argString: the options. :type argString: list These are the steps: 1. Prints the options. 2. Reads the ``tfam`` and ``tped`` files and find all heterozygous and all failed markers (:py:func:`processTPEDandTFAM`).
def workspace_create(ctx, clobber_mets, directory): workspace = ctx.resolver.workspace_from_nothing( directory=os.path.abspath(directory), mets_basename=ctx.mets_basename, clobber_mets=clobber_mets ) workspace.save_mets() print(workspace.directory)
Create a workspace with an empty METS file in DIRECTORY. Use '.' for $PWD"
def ctc_beam_search_decoder(probs_seq, alphabet, beam_size, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): beam_results = swigwrapper.ctc_beam_search_decoder( probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n, scorer) beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] return beam_results
Wrapper for the CTC Beam Search Decoder. :param probs_seq: 2-D list of probability distributions over each time step, with each element being a list of normalized probabilities over alphabet and blank. :type probs_seq: 2-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list
def shuffle(self, times=1): for _ in xrange(times): random.shuffle(self.cards)
Shuffles the Stack. .. note:: Shuffling large numbers of cards (100,000+) may take a while. :arg int times: The number of times to shuffle.
def get_service_policy(host, username, password, service_name, protocol=None, port=None, host_names=None): s policy for a given host or list of hosts. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. service_name The name of the service for which to retrieve the policy. Supported service names are: - DCUI - TSM - SSH - lbtd - lsassd - lwiod - netlogond - ntpd - sfcbd-watchdog - snmpd - vprobed - vpxa - xorg protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter the hosts for which to get service policy information. If host_names is not provided, the service policy information will be retrieved for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash salt vsphere.get_service_policy my.esxi.host root bad-password salt vsphere.get_service_policy my.vcenter.location root bad-password \ host_names= DCUITSMSSHsshlbtdlsassdlwiodnetlogondntpdsfcbd-watchdogsnmpdvprobedvpxaxorgt have a valid service, return. The service will be invalid for all hosts. if service_name not in valid_services: ret.update({host_name: {: .format(service_name)}}) return ret host_ref = _get_host_ref(service_instance, host, host_name=host_name) services = host_ref.configManager.serviceSystem.serviceInfo.service return ret
Get the service name's policy for a given host or list of hosts. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. service_name The name of the service for which to retrieve the policy. Supported service names are: - DCUI - TSM - SSH - lbtd - lsassd - lwiod - netlogond - ntpd - sfcbd-watchdog - snmpd - vprobed - vpxa - xorg protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter the hosts for which to get service policy information. If host_names is not provided, the service policy information will be retrieved for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.get_service_policy my.esxi.host root bad-password 'ssh' # Used for connecting to a vCenter Server salt '*' vsphere.get_service_policy my.vcenter.location root bad-password 'ntpd' \ host_names='[esxi-1.host.com, esxi-2.host.com]'
def _collapse_variants_by_function(graph: BELGraph, func: str) -> None: for parent_node, variant_node, data in graph.edges(data=True): if data[RELATION] == HAS_VARIANT and parent_node.function == func: collapse_pair(graph, from_node=variant_node, to_node=parent_node)
Collapse all of the given functions' variants' edges to their parents, in-place.
def associate(self, id_option_vip, id_environment_vip): if not is_valid_int_param(id_option_vip): raise InvalidParameterError( u) if not is_valid_int_param(id_environment_vip): raise InvalidParameterError( u) url = + \ str(id_option_vip) + + str(id_environment_vip) + code, xml = self.submit(None, , url) return self.response(code, xml)
Create a relationship of OptionVip with EnvironmentVip. :param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero. :param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero. :return: Following dictionary :: {'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} } :raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid. :raise OptionVipNotFoundError: Option VIP not registered. :raise EnvironmentVipNotFoundError: Environment VIP not registered. :raise OptionVipError: Option vip is already associated with the environment vip. :raise UserNotAuthorizedError: User does not have authorization to make this association. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def keyPressEvent(self, event): if event.key() == Qt.Key_Escape: self.reject() super(XSnapshotWidget, self).keyPressEvent(event)
Listens for the escape key to cancel out from this snapshot. :param event | <QKeyPressEvent>
def add_triple( self, subj: Union[URIRef, str], pred: Union[URIRef, str], obj: Union[URIRef, Literal, str] ) -> None: if obj in [None, "", " "]: return _subj = self.process_subj_or_pred(subj) _pred = self.process_subj_or_pred(pred) _obj = self.process_obj(obj) self.g.add( (_subj, _pred, _obj) )
Adds triple to rdflib Graph Triple can be of any subject, predicate, and object of the entity without a need for order. Args: subj: Entity subject pred: Entity predicate obj: Entity object Example: In [1]: add_triple( ...: 'http://uri.interlex.org/base/ilx_0101431', ...: RDF.type, ...: 'http://www.w3.org/2002/07/owl#Class') ...: )
def send_immediately(self, message, fail_silently=False): try: return self.smtp_mailer.send(*self._message_args(message)) except smtplib.socket.error: if not fail_silently: raise
Send a message immediately, outside the transaction manager. If there is a connection error to the mail server this will have to be handled manually. However if you pass ``fail_silently`` the error will be swallowed. :versionadded: 0.3 :param message: a 'Message' instance. :param fail_silently: silently handle connection errors.
def adjust_for_registry_api_versions(self): versions = self.spec.registry_api_versions.value if not in versions: raise OsbsValidationException() try: push_conf = self.dj.dock_json_get_plugin_conf(, ) tag_and_push_registries = push_conf[][] except (KeyError, IndexError): tag_and_push_registries = {} if not in versions: for phase, name in [(, )]: logger.info("removing v1-only plugin: %s", name) self.dj.remove_plugin(phase, name) self.remove_tag_and_push_registries(tag_and_push_registries, )
Enable/disable plugins depending on supported registry API versions
def load(self, **kwargs): self._is_version_supported_method() newinst = self._stamp_out_core() newinst._refresh(**kwargs) return newinst
Method to list the UCS on the system Since this is only fixed in 12.1.0 and up we implemented version check here
def setHorCrossPlotAutoRangeOn(self, axisNumber): setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.horCrossPlotRangeCti, axisNumber)
Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber. :param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
def percentile(data, n): size = len(data) idx = (n / 100.0) * size - 0.5 if idx < 0 or idx > size: raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n)) return data[int(idx)]
Return the n-th percentile of the given data Assume that the data are already sorted
def files(self): ios_names = [info.name for info in self._ios_to_add.keys()] return set(self.files_to_add + ios_names)
files that will be add to tar file later should be tuple, list or generator that returns strings
def propose(self): dims = self.stochastic.value.shape dev = rnormal( 0, self.adaptive_scale_factor * self.proposal_sd, size=dims) symmetrize(dev) self.stochastic.value = dev + self.stochastic.value
Proposals for positive definite matrix using random walk deviations on the Cholesky factor of the current value.
def safe_extract_proto_from_ipfs(ipfs_client, ipfs_hash, protodir): spec_tar = get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash) with tarfile.open(fileobj=io.BytesIO(spec_tar)) as f: for m in f.getmembers(): if (os.path.dirname(m.name) != ""): raise Exception("tarball has directories. We do not support it.") if (not m.isfile()): raise Exception("tarball contains %s which is not a files"%m.name) fullname = os.path.join(protodir, m.name) if (os.path.exists(fullname)): raise Exception("%s already exists."%fullname) f.extractall(protodir)
Tar files might be dangerous (see https://bugs.python.org/issue21109, and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning) we extract only simple files
def get_input_kwargs(self, key=None, default=None): warnings.warn("`get_input_kwargs` is deprecated; use `get_catalog_info` instead.", DeprecationWarning) return self.get_catalog_info(key, default)
Deprecated. Use `get_catalog_info` instead. Get information from the catalog config file. If *key* is `None`, return the full dict.
def get_tweet(self, id): try: return Tweet(self._client.get_status(id=id)._json) except TweepError as e: if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR: return None raise
Get an existing tweet. :param id: ID of the tweet in question :return: Tweet object. None if not found
def read_config(config_path_or_dict=None): config = None if isinstance(config_path_or_dict, dict): config = Config(config_path_or_dict) if isinstance(config_path_or_dict, string_types): if os.path.isabs(config_path_or_dict): config_path = config_path_or_dict else: config_path = os.path.join( os.getcwd(), os.path.normpath(config_path_or_dict) ) else: config_path = os.path.join( os.getcwd(), DEFAULT_CONFIG_PATH ) if os.path.exists(config_path): with open(config_path, ) as f: data = json.load(f) config = Config(data) if config is None: raise ConfigNotFound() else: config.validate() return config
Read config from given path string or dict object. :param config_path_or_dict: :type config_path_or_dict: str or dict :return: Returns config object or None if not found. :rtype: :class:`revision.config.Config`
def delay(self, wait, *args): def call_it(): self.obj(*args) t = Timer((float(wait) / float(1000)), call_it) t.start() return self._wrap(self.obj)
Delays a function for the given number of milliseconds, and then calls it with the arguments supplied.
def update(self, td): self.sprite.last_position = self.sprite.position self.sprite.last_velocity = self.sprite.velocity if self.particle_group != None: self.update_particle_group(td)
Update state of ball
async def DestroyMachines(self, force, machine_names): _params = dict() msg = dict(type=, request=, version=1, params=_params) _params[] = force _params[] = machine_names reply = await self.rpc(msg) return reply
force : bool machine_names : typing.Sequence[str] Returns -> None
def where_entry_date(query, datespec): date, interval, _ = utils.parse_date(datespec) start_date, end_date = date.span(interval) return orm.select( e for e in query if e.local_date >= start_date.naive and e.local_date <= end_date.naive )
Where clause for entries which match a textual date spec datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
def run(self, data, rewrap=False, prefetch=0): if rewrap: data = [data] for _filter in self._filters: _filter.feed(data) data = _filter else: iterable = self._prefetch_callable(data, prefetch) if prefetch else data for out_data in iterable: yield out_data
Wires the pipeline and returns a lazy object of the transformed data. :param data: must be an iterable, where a full document must be returned for each loop :param rewrap: (optional) is a bool that indicates the need to rewrap data in cases where iterating over it produces undesired results, for instance ``dict`` instances. :param prefetch: (optional) is an int defining the number of items to be prefetched once the pipeline starts yielding data. The default prefetching mechanism is based on threads, so be careful with CPU-bound processing pipelines.
def _get_answer(self, part): answer_fname = getattr(self, "answer_{}_fname".format(part)) if os.path.isfile(answer_fname): with open(answer_fname) as f: return f.read().strip() response = requests.get(self.url, cookies=self._cookies, headers=self._headers) response.raise_for_status() soup = bs4.BeautifulSoup(response.text, "html.parser") if not self._title: self._save_title(soup=soup) hit = "Your puzzle answer was" paras = [p for p in soup.find_all("p") if p.text.startswith(hit)] if paras: parta_correct_answer = paras[0].code.text self._save_correct_answer(value=parta_correct_answer, part="a") if len(paras) > 1: _p1, p2 = paras partb_correct_answer = p2.code.text self._save_correct_answer(value=partb_correct_answer, part="b") if os.path.isfile(answer_fname): with open(answer_fname) as f: return f.read().strip() msg = "Answer {}-{}{} is not available".format(self.year, self.day, part) raise PuzzleUnsolvedError(msg)
Note: Answers are only revealed after a correct submission. If you've have not already solved the puzzle, AocdError will be raised.
def find(self, *strings, **kwargs): start = kwargs.pop("start", 0) stop = kwargs.pop("stop", None) keys_only = kwargs.pop("keys_only", False) results = {string: [] for string in strings} stop = len(self) if stop is None else stop for i, line in enumerate(self[start:stop]): for string in strings: if string in line: if keys_only: results[string].append(i) else: results[string].append((i, line)) if len(strings) == 1: return results[strings[0]] return results
Search the entire editor for lines that match the string. .. code-block:: Python string = '''word one word two three''' ed = Editor(string) ed.find('word') # [(0, "word one"), (1, "word two")] ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]} Args: strings (str): Any number of strings to search for keys_only (bool): Only return keys start (int): Optional line to start searching on stop (int): Optional line to stop searching on Returns: results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
def download_from_url(source, destination, progress=False, uncompress=False): from tqdm import tqdm import requests from autopaths.file_path import FilePath destination = FilePath(destination) destination.directory.create_if_not_exists() response = requests.get(source, stream=True) total_size = int(response.headers.get()) block_size = int(total_size/1024) with open(destination, "wb") as handle: if progress: for data in tqdm(response.iter_content(chunk_size=block_size), total=1024): handle.write(data) else: for data in response.iter_content(chunk_size=block_size): handle.write(data) if uncompress: with open(destination) as f: header = f.read(4) if header == "PK\x03\x04": unzip(destination, inplace=True) return destination
Download a file from an URL and place it somewhere. Like wget. Uses requests and tqdm to display progress if you want. By default it will uncompress files. #TODO: handle case where destination is a directory
def fill(self, passage=None, xpath=None): if xpath is True: xpath = self.xpath replacement = r"\1" if isinstance(passage, str): replacement = r"\1\2" return REFERENCE_REPLACER.sub(replacement, xpath) else: if isinstance(passage, CtsReference): passage = passage.start.list elif passage is None: return REFERENCE_REPLACER.sub( r"\1", self.refsDecl ) passage = iter(passage) return REFERENCE_REPLACER.sub( lambda m: _ref_replacer(m, passage), self.refsDecl )
Fill the xpath with given informations :param passage: CapitainsCtsPassage reference :type passage: CtsReference or list or None. Can be list of None and not None :param xpath: If set to True, will return the replaced self.xpath value and not the whole self.refsDecl :type xpath: Boolean :rtype: basestring :returns: Xpath to find the passage .. code-block:: python citation = XmlCtsCitation(name="line", scope="/TEI/text/body/div/div[@n=\"?\"]",xpath="//l[@n=\"?\"]") print(citation.fill(["1", None])) # /TEI/text/body/div/div[@n='1']//l[@n] print(citation.fill(None)) # /TEI/text/body/div/div[@n]//l[@n] print(citation.fill(CtsReference("1.1")) # /TEI/text/body/div/div[@n='1']//l[@n='1'] print(citation.fill("1", xpath=True) # //l[@n='1']
def find_idx_by_threshold(self, threshold, train=False, valid=False, xval=False): tm = ModelBase._get_metrics(self, train, valid, xval) m = {} for k, v in viewitems(tm): m[k] = None if v is None else v.find_idx_by_threshold(threshold) return list(m.values())[0] if len(m) == 1 else m
Retrieve the index in this metric's threshold list at which the given threshold is located. If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval". :param float threshold: Threshold value to search for in the threshold list. :param bool train: If True, return the find idx by threshold value for the training data. :param bool valid: If True, return the find idx by threshold value for the validation data. :param bool xval: If True, return the find idx by threshold value for each of the cross-validated splits. :returns: The find idx by threshold values for the specified key(s).
def cost_min2(self, alpha): n = self.V.dim() ax = alpha[:n] ay = alpha[n:] q2, r2 = self.get_q2_r2(ax, ay) Lax = self.L * ax Lay = self.L * ay out = [ 0.5 * numpy.dot(Lax, Lax), 0.5 * numpy.dot(Lay, Lay), 0.5 * numpy.dot(q2 - 1, q2 - 1), 0.5 * numpy.dot(r2, r2), ] if self.num_f_eval % 10000 == 0: print("{:7d} {:e} {:e} {:e} {:e}".format(self.num_f_eval, *out)) self.num_f_eval += 1 return numpy.sum(out)
Residual formulation, Hessian is a low-rank update of the identity.
def set_embeddings(self, embeddings): if self.embeddings_ is not None: raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.") assert embeddings.shape[1] == self.corpus_.get_num_terms() self.embeddings_ = embeddings.T self.vocab_ = self.corpus_.get_terms() return self
Specifies fixed set of embeddings :param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms) :return: EmbeddingsResolver
def on_quit(self, connection, event): nickname = self.get_nickname(event) nickname_color = self.nicknames[nickname] del self.nicknames[nickname] self.namespace.emit("message", nickname, "leaves", nickname_color) self.emit_nicknames()
Someone left the channel - send the nicknames list to the WebSocket.
def _open_ok(self, args): self.known_hosts = args.read_shortstr() AMQP_LOGGER.debug( % self.known_hosts) return None
signal that the connection is ready This method signals to the client that the connection is ready for use. PARAMETERS: known_hosts: shortstr
def on_configparser_dumps(self, configparser, config, dictionary, **kwargs): root_section = kwargs.pop("root") if not isinstance(root_section, str): root_section = config.__name__ delimiter = kwargs.pop("delimiter", ":") if delimiter in root_section: warnings.warn( f"root section {root_section!r} contains delimiter character " f"{delimiter!r}, loading from the resulting content will likely fail" ) try: return INIParser.from_dict( dictionary, root_section=root_section, delimiter=kwargs.pop("delimiter", ":"), empty_sections=kwargs.pop("empty_sections", False), ).to_ini() except ValueError: raise ValueError("INI cannot handle this config, try using toml instead")
The :mod:`configparser` dumps method. :param module configparser: The ``configparser`` module :param class config: The instance's config class :param dict dictionary: The dictionary instance to serialize :param str root: The top-level section of the ini file, defaults to ``config.__name__``, optional :param str delimiter: The delimiter character used for representing nested dictionaries, defaults to ":", optional :return: The ini serialization of the given ``dictionary`` :rtype: str
def get_author_and_version(package): init_py = open(os.path.join(package, )).read() author = re.search("__author__ = [\"]+)[\"]([^\"]", init_py).group(1) return author, version
Return package author and version as listed in `init.py`.
def run_instance_jmap(cluster, environ, topology, instance, role=None): params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params[] = role request_url = tornado.httputil.url_concat( create_url(JMAP_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
:param cluster: :param environ: :param topology: :param instance: :param role: :return:
def pgettext(self, context, string, domain=None, **variables): t = self.get_translations(domain) return t.upgettext(context, string) % variables
Like :meth:`gettext` but with a context.
def cd(cls, directory): Log.debug(.format(directory)) os.chdir(directory)
Change directory. It behaves like "cd directory".
def write(self, obj, **kwargs): super().write(obj, **kwargs) for name, ss in obj.items(): key = .format(name=name) if key not in self.group._v_children: node = self._handle.create_group(self.group, key) else: node = getattr(self.group, key) s = SparseSeriesFixed(self.parent, node) s.write(ss) self.attrs.default_fill_value = obj.default_fill_value self.attrs.default_kind = obj.default_kind self.write_index(, obj.columns)
write it as a collection of individual sparse series
def _ggplot(df, out_file): import ggplot as gg df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]] df["category"] = [cat_labels[x] for x in df["category"]] df["caller"] = [caller_labels.get(x, None) for x in df["caller"]] p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar() + gg.facet_wrap("variant.type", "category") + gg.theme_seaborn()) gg.ggsave(p, out_file)
Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional
def init_class(self, class_, step_func=None): if self.is_class_initialized(class_): l.debug("Class %r already initialized.", class_) return l.debug("Initialize class %r.", class_) self.initialized_classes.add(class_) if not class_.is_loaded: l.warning("Class %r is not loaded in CLE. Skip initializiation.", class_) return clinit_method = resolve_method(self.state, , class_.name, include_superclasses=False, init_class=False) if clinit_method.is_loaded: javavm_simos = self.state.project.simos clinit_state = javavm_simos.state_call(addr=SootAddressDescriptor(clinit_method, 0, 0), base_state=self.state, ret_addr=SootAddressTerminator()) simgr = self.state.project.factory.simgr(clinit_state) l.info(">"*15 + " Run class initializer %r ... " + ">"*15, clinit_method) simgr.run(step_func=step_func) l.debug("<"*15 + " Run class initializer %r ... done " + "<"*15, clinit_method) self.state.memory.vm_static_table = simgr.deadended[-1].memory.vm_static_table.copy() self.state.memory.heap = simgr.deadended[-1].memory.heap.copy() else: l.debug("Class initializer <clinit> is not loaded in CLE. Skip initializiation.")
This method simulates the loading of a class by the JVM, during which parts of the class (e.g. static fields) are initialized. For this, we run the class initializer method <clinit> (if available) and update the state accordingly. Note: Initialization is skipped, if the class has already been initialized (or if it's not loaded in CLE).
def seed_zoom(seeds, zoom): labels = np.unique(seeds) labels = np.delete(labels, 0) loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int) loseeds = np.zeros(loshape, dtype=np.int8) loseeds = loseeds.astype(np.int8) for label in labels: a, b, c = np.where(seeds == label) loa = np.round(a // zoom) lob = np.round(b // zoom) loc = np.round(c // zoom) loseeds[loa, lob, loc] += label loseeds[loseeds > label] = 100 loseeds[loseeds > 99] = 0 return loseeds
Smart zoom for sparse matrix. If there is resize to bigger resolution thin line of label could be lost. This function prefers labels larger then zero. If there is only one small voxel in larger volume with zeros it is selected.
def _match_nodes(self, validators, obj): results = [] for node in object_iter(obj): if all([validate(node) for validate in validators]): results.append(node) return results
Apply each validator in validators to each node in obj. Return each node in obj which matches all validators.
def match_string(self, stype): return not (stype - self.string_types) or bool(stype & self.wild_string_types)
Match string type.
def page(self, enabled=values.unset, date_created_after=values.unset, date_created_before=values.unset, friendly_name=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): params = values.of({ : enabled, : serialize.iso8601_datetime(date_created_after), : serialize.iso8601_datetime(date_created_before), : friendly_name, : page_token, : page_number, : page_size, }) response = self._version.page( , self._uri, params=params, ) return CompositionHookPage(self._version, response, self._solution)
Retrieve a single page of CompositionHookInstance records from the API. Request is executed immediately :param bool enabled: Only show Composition Hooks enabled or disabled. :param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone. :param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone. :param unicode friendly_name: Only show Composition Hooks with friendly name that match this name. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CompositionHookInstance :rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage
def publish(self, user=None, when=None): assert self.state == self.DRAFT user_published = if user: user_published = user.username now = timezone.now() with xact(): published = self.is_published else: published = self.object.is_published if not when and not published and self.last_scheduled: klass = self.get_version_class() for obj in klass.normal.filter(object_id=self.object_id, last_scheduled=self.last_scheduled, state=self.SCHEDULED): when = self.date_published obj.delete() when = when or now if self.state == self.DRAFT: self.last_scheduled = now self.date_published = when self.save(last_save=now) self._clone() self.user_published = user_published self.state = self.SCHEDULED self.save() self.schedule(when=when)
Publishes a item and any sub items. A new transaction will be started if we aren't already in a transaction. Should only be run on draft items
def eintr_retry_zmq(f, *args, **kwargs): return eintr_retry(zmq.ZMQError, f, *args, **kwargs)
The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`.
def _resolve_dotted_name(dotted_name): if not isinstance(dotted_name, str): return dotted_name if not in dotted_name: return dotted_name args = None params = None match = P_PARAMS.match(dotted_name) if match: dotted_name = match.group() params = match.group() module, name = dotted_name.rsplit(, 1) attr = import_module(module) attr = getattr(attr, name) if params: args, kwargs = _parse_args_kwargs(params[1:-1]) attr = attr(*args, **kwargs) return attr
Returns objects from strings Deals e.g. with 'torch.nn.Softmax(dim=-1)'. Modified from palladium: https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py with added support for instantiated objects.
def display_message(pymux, variables): " Display a message. " message = variables[] client_state = pymux.get_client_state() client_state.message = message
Display a message.
def clear_tc(self, owner, data, clear_type): batch = self.tcex.batch(owner, action=) tc_type = data.get() path = data.get() if tc_type in self.tcex.group_types: name = self.tcex.playbook.read(data.get()) name = self.path_data(name, path) if name is not None: print( .format( c.Style.BRIGHT, c.Fore.MAGENTA, name ) ) self.log.info( .format( clear_type, tc_type, name ) ) batch.group(tc_type, name) elif tc_type in self.tcex.indicator_types: if data.get() is not None: summary = self.tcex.playbook.read(data.get()) else: resource = self.tcex.resource(tc_type) summary = resource.summary(data) summary = self.path_data(summary, path) if summary is not None: print( .format( c.Style.BRIGHT, c.Fore.MAGENTA, summary ) ) self.log.info( .format( clear_type, tc_type, summary ) ) batch.indicator(tc_type, summary) batch_results = batch.submit() self.log.debug(.format(clear_type, batch_results)) for error in batch_results.get() or []: self.log.error(.format(clear_type, error))
Delete threat intel from ThreatConnect platform. Args: owner (str): The ThreatConnect owner. data (dict): The data for the threat intel to clear. clear_type (str): The type of clear action.
def sample_stats_to_xarray(self): if self.model_fn is None or self.observed is None: return None log_likelihood = [] sample_size = self.posterior[0].shape[0] for i in range(sample_size): variables = {} for var_i, var_name in enumerate(self.var_names): variables[var_name] = self.posterior[var_i][i] with self.ed.interception(self._value_setter(variables)): log_likelihood.append((self.model_fn().distribution.log_prob(self.observed))) data = {} if self.dims is not None: coord_name = self.dims.get("obs") else: coord_name = None dims = {"log_likelihood": coord_name} with self.tf.Session() as sess: data["log_likelihood"] = np.expand_dims( sess.run(log_likelihood, feed_dict=self.feed_dict), axis=0 ) return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)
Extract sample_stats from tfp trace.
def available_modes(self): if not self._available_modes: modes = self.available_modes_with_ids if not modes: return None self._available_modes = list(modes.keys()) return self._available_modes
Return list of available mode names.
def get_byte_array(integer): return int.to_bytes( integer, (integer.bit_length() + 8 - 1) // 8, byteorder=, signed=False )
Return the variable length bytes corresponding to the given int
def write_var(self, var_spec, var_attrs=None, var_data=None): VariableData_TypeNum_ElementsRec_VaryDims_SizesDim_VaryVar_TypeSparseno_sparseCompressBlock_FactorPads type, a corresponding CDF type is assumed: CDF_INT4 for int, CDF_DOUBLE for float, CDF_EPOCH16 for complex and and CDF_INT8 for long. For example, the following defined attributes will have the same types in the CDF:: var_attrs= { : , : 12.45, : [3,4,5], ..... } With data type (in the list form):: var_attrs= { : , : [12.45, ], : [[3,4,5], ], ..... } var_data : The data for the variable. If the variable is a regular variable without sparse records, it must be in a single structure of bytes, or numpy.ndarray for numeric variable, or str or list of strs for string variable. If the variable has sparse records, var_data should be presented in a list/tuple with two elements, the first being a list/tuple that contains the physical record number(s), the second being the variable data in bytes, numpy.ndarray, or a list of strings. Variable data can have just physical records if not isinstance(var_spec, dict): raise TypeError() try: dataType = int(var_spec[]) numElems = int(var_spec[]) name = var_spec[] recVary = var_spec[] except Exception: raise ValueError() var_type = var_spec.setdefault(, ) if (var_type.lower() == ): zVar = True else: var_spec[] = zVar = False if (dataType == CDF.CDF_CHAR or dataType == CDF.CDF_UCHAR): if numElems < 1: raise ValueError() else: if numElems != 1: raise ValueError() if zVar: try: dimSizes = var_spec[] numDims = len(dimSizes) dimVary = [] for _ in range(0, numDims): dimVary.append(True) except Exception: raise ValueError() else: dimSizes = self.rdim_sizes numDims = self.num_rdim try: dimVary = var_spec[] if (len(dimVary) != numDims): raise ValueError() except Exception: raise ValueError() sparse = CDF._sparse_token(var_spec.get(, )) compression = var_spec.get(, 6) if (isinstance(compression, int)): if not 0 <= compression <= 9: compression = 0 else: compression = 6 if compression else 0 blockingfactor = int(var_spec.get(, 1)) pad = var_spec.get(, None) if (isinstance(pad, list) or isinstance(pad, tuple)): pad = pad[0] if (name in self.zvars or name in self.rvars): raise ValueError(.format(name)) with self.path.open() as f: f.seek(0, 2) varNum, offset = self._write_vdr(f, dataType, numElems, numDims, dimSizes, name, dimVary, recVary, sparse, blockingfactor, compression, pad, zVar) if zVar: if len(self.zvars) == 1: self._update_offset_value(f, self.gdr_head+12, 8, offset) if var_attrs is not None: self._write_var_attrs(f, varNum, var_attrs, zVar) if not (var_data is None): if (sparse == 0): varMaxRec = self._write_var_data_nonsparse(f, zVar, varNum, dataType, numElems, recVary, compression, blockingfactor, var_data) else: notsupport = False if not isinstance(var_data, (list, tuple)): notsupport = True if notsupport or len(var_data) != 2: print() print() print() return var_data = self._make_sparse_blocks(var_spec, var_data[0], var_data[1]) for block in var_data: varMaxRec = self._write_var_data_sparse(f, zVar, varNum, dataType, numElems, recVary, block) if not zVar: if (maxRec < varMaxRec): self._update_offset_value(f, self.gdr_head+52, 4, varMaxRec)
Writes a variable, along with variable attributes and data. Parameters ---------- var_spec : dict The specifications of the variable. The required/optional keys for creating a variable: Required keys: - ['Variable']: The name of the variable - ['Data_Type']: the CDF data type - ['Num_Elements']: The number of elements. Always 1 the for numeric type. The char length for string type. - ['Rec_Vary']: Record variance For zVariables: - ['Dims_Sizes']: The dimensional sizes for zVariables only. Use [] for 0-dimension. Each and every dimension is varying for zVariables. For rVariables: - ['Dim_Vary']: The dimensional variances for rVariables only. Optional keys: - ['Var_Type']: Whether the variable is a zVariable or rVariable. Valid values: "zVariable" and "rVariable". The default is "zVariable". - ['Sparse']: Whether the variable has sparse records. Valid values are "no_sparse", "pad_sparse", and "prev_sparse". The default is 'no_sparse'. - ['Compress']: Set the gzip compression level (0 to 9), 0 for no compression. The default is to compress with level 6 (done only if the compressed data is less than the uncompressed data). - ['Block_Factor']: The blocking factor, the number of records in a chunk when the variable is compressed. - ['Pad']: The padded value (in bytes, numpy.ndarray or string) var_attrs : dict {attribute:value} pairs. The attribute is the name of a variable attribute. The value can have its data type specified for the numeric data. If not, based on Python's type, a corresponding CDF type is assumed: CDF_INT4 for int, CDF_DOUBLE for float, CDF_EPOCH16 for complex and and CDF_INT8 for long. For example, the following defined attributes will have the same types in the CDF:: var_attrs= { 'attr1': 'value1', 'attr2': 12.45, 'attr3': [3,4,5], ..... } With data type (in the list form):: var_attrs= { 'attr1': 'value1', 'attr2': [12.45, 'CDF_DOUBLE'], 'attr3': [[3,4,5], 'CDF_INT4'], ..... } var_data : The data for the variable. If the variable is a regular variable without sparse records, it must be in a single structure of bytes, or numpy.ndarray for numeric variable, or str or list of strs for string variable. If the variable has sparse records, var_data should be presented in a list/tuple with two elements, the first being a list/tuple that contains the physical record number(s), the second being the variable data in bytes, numpy.ndarray, or a list of strings. Variable data can have just physical records' data (with the same number of records as the first element) or have data from both physical records and virtual records (which with filled data). The var_data has the form:: [[rec_#1,rec_#2,rec_#3,...], [data_#1,data_#2,data_#3,...]] See the sample for its setup.
def DiamAns(cmd, **fields): upfields, name = getCmdParams(cmd, False, **fields) p = DiamG(**upfields) p.name = name return p
Craft Diameter answer commands
def attr_delete(args): if args.entity_type and args.entities: entities = _entity_paginator(args.project, args.workspace, args.entity_type, page_size=1000, filter_terms=None, sort_direction="asc") if args.entities: entities = [e for e in entities if e[] in args.entities] attrs = sorted(args.attributes) etype = args.entity_type entity_data = [] for entity_dict in entities: name = entity_dict[] line = name if etype == "sample": line += "\t" + entity_dict[][][] for attr in attrs: line += "\t__DELETE__" entity_data.append(line) entity_header = ["entity:" + etype + "_id"] if etype == "sample": entity_header.append("participant_id") entity_header = .join(entity_header + list(attrs)) message = "WARNING: this will delete these attributes:\n\n" + \ .join(args.attributes) + "\n\n" if args.entities: message += .format(args.entity_type) + \ .join(args.entities) else: message += .format(args.entity_type) message += "\n\nin workspace {0}/{1}\n".format(args.project, args.workspace) if not args.yes and not _confirm_prompt(message): return 0 if args.verbose: print("Batching " + str(len(entity_data)) + " updates to Firecloud...") chunk_len = 500 total = int(len(entity_data) / chunk_len) + 1 batch = 0 for i in range(0, len(entity_data), chunk_len): batch += 1 if args.verbose: print("Updating samples {0}-{1}, batch {2}/{3}".format( i+1, min(i+chunk_len, len(entity_data)), batch, total )) this_data = entity_header + + .join(entity_data[i:i+chunk_len]) r = fapi.upload_entities(args.project, args.workspace, this_data) fapi._check_response_code(r, 200) else: message = "WARNING: this will delete the following attributes in " + \ "{0}/{1}\n\t".format(args.project, args.workspace) + \ "\n\t".join(args.attributes) if not (args.yes or _confirm_prompt(message)): return 0 updates = [fapi._attr_rem(a) for a in args.attributes] r = fapi.update_workspace_attributes(args.project, args.workspace, updates) fapi._check_response_code(r, 200) return 0
Delete key=value attributes: if entity name & type are specified then attributes will be deleted from that entity, otherwise the attribute will be removed from the workspace
def open_files(self, path): paths = [] input_list = _patt.findall(path) if not input_list: input_list = [path] for path in input_list: if path.endswith(): path = path[:-1] if os.path.isdir(path): continue self.logger.debug(.format(path)) info = iohelper.get_fileinfo(path) ext = iohelper.get_hdu_suffix(info.numhdu) files = glob.glob(info.filepath) paths.extend([.format(f, ext) for f in files]) if len(paths) > 0: self.load_paths(paths) return True return False
Load file(s) -- image*.fits, image*.fits[ext]. Returns success code (True or False).
def create_graph_html(js_template, css_template, html_template=None): if html_template is None: html_template = read_lib(, ) graph_id = .format(_get_random_id()) js = populate_template(js_template, graph_id=graph_id) css = populate_template(css_template, graph_id=graph_id) return populate_template( html_template, graph_id=graph_id, css=css, js=js )
Create HTML code block given the graph Javascript and CSS.
def configuration_check(config): log_level = config.get(, ) num_level = getattr(logging, log_level.upper(), None) pidfile = config.get(, ) if not os.path.isdir(os.path.dirname(pidfile)): raise ValueError("{d} doesnInvalid log level: {}log_filestderr_filedaemondaemondaemon{opt}' option in daemon section: {err}" .format(opt=option, err=exc)) raise ValueError(msg) service_configuration_check(config)
Perform a sanity check on configuration. First it performs a sanity check against settings for daemon and then against settings for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all checks are successfully passed otherwise raises a ValueError exception.
def load(self, filename, format_file=): try: ts = load_data(filename, format_file) validator = CloudUPDRSDataFrameValidator() if validator.is_valid(ts): return ts else: logging.error() return None except IOError as e: ierr = "({}): {}".format(e.errno, e.strerror) logging.error("load data, file not found, I/O error %s", ierr) except ValueError as verr: logging.error("load data ValueError ->%s", verr.message) except: logging.error("Unexpected error on load data method: %s", sys.exc_info()[0])
This is a general load data method where the format of data to load can be passed as a parameter, :param str filename: The path to load data from :param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. :return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \ data_frame.index is the datetime-like index
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext): iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF()) prefix = ctx.PNAME_NS().getText() if iri not in self.context.ld_prefixes: self.context.prefixes.setdefault(prefix, iri.val)
prefixDecl: KW_PREFIX PNAME_NS IRIREF
def add(self, num): self.index = max(0, min(len(self.allowed)-1, self.index+num)) self.set(self.allowed[self.index])
Adds num to the current value
def _query(function, consul_url, token=None, method=, api_version=, data=None, query_params=None): if not query_params: query_params = {} ret = {: , : True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, .format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == : data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get(, None) == http_client.OK: ret[] = result.get(, result) ret[] = True elif result.get(, None) == http_client.NO_CONTENT: ret[] = False elif result.get(, None) == http_client.NOT_FOUND: ret[] = ret[] = False else: if result: ret[] = result ret[] = True else: ret[] = False return ret
Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False.
def plot_hpd( x, y, credible_interval=0.94, color="C1", circular=False, smooth=True, smooth_kwargs=None, fill_kwargs=None, plot_kwargs=None, ax=None, ): if plot_kwargs is None: plot_kwargs = {} plot_kwargs.setdefault("color", color) plot_kwargs.setdefault("alpha", 0) if fill_kwargs is None: fill_kwargs = {} fill_kwargs.setdefault("color", color) fill_kwargs.setdefault("alpha", 0.5) if ax is None: ax = gca() hpd_ = hpd(y, credible_interval=credible_interval, circular=circular) if smooth: if smooth_kwargs is None: smooth_kwargs = {} smooth_kwargs.setdefault("window_length", 55) smooth_kwargs.setdefault("polyorder", 2) x_data = np.linspace(x.min(), x.max(), 200) hpd_interp = griddata(x, hpd_, x_data) y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs) else: idx = np.argsort(x) x_data = x[idx] y_data = hpd_[idx] ax.plot(x_data, y_data, **plot_kwargs) ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs) return ax
Plot hpd intervals for regression data. Parameters ---------- x : array-like Values to plot y : array-like values ​​from which to compute the hpd credible_interval : float, optional Credible interval to plot. Defaults to 0.94. color : str Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color circular : bool, optional Whether to compute the hpd taking into account `x` is a circular variable (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables). smooth : boolean If True the result will be smoothed by first computing a linear interpolation of the data over a regular grid and then applying the Savitzky-Golay filter to the interpolated data. Defaults to True. smooth_kwargs : dict, optional Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for details fill_kwargs : dict Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill). plot_kwargs : dict Keywords passed to HPD limits ax : matplotlib axes Returns ------- ax : matplotlib axes
def add_interrupt_callback(gpio_id, callback, edge=, \ pull_up_down=PUD_OFF, threaded_callback=False, \ debounce_timeout_ms=None): _rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \ threaded_callback, debounce_timeout_ms)
Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. If debounce_timeout_ms is set, new interrupts will not be forwarded until after the specified amount of milliseconds.
def p_created_1(self, p): try: if six.PY2: value = p[2].decode(encoding=) else: value = p[2] self.builder.set_created_date(self.document, value) except CardinalityError: self.more_than_one_error(, p.lineno(1))
created : CREATED DATE
def string_value(node): if (node.nodeType == node.DOCUMENT_NODE or node.nodeType == node.ELEMENT_NODE): s = u for n in axes[](node): if n.nodeType == n.TEXT_NODE: s += n.data return s elif node.nodeType == node.ATTRIBUTE_NODE: return node.value elif (node.nodeType == node.PROCESSING_INSTRUCTION_NODE or node.nodeType == node.COMMENT_NODE or node.nodeType == node.TEXT_NODE): return node.data
Compute the string-value of a node.
def list_replica_set_for_all_namespaces(self, **kwargs): kwargs[] = True if kwargs.get(): return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) return data
list_replica_set_for_all_namespaces # noqa: E501 list or watch objects of kind ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_replica_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ReplicaSetList If the method is called asynchronously, returns the request thread.
def _merge_bee(self, bee): s value Args: bee (EmployerBee): supplied bee to merge Returns: tuple: (score of new position, values of new position, fitness function return value of new position) ' random_dimension = randint(0, len(self._value_ranges) - 1) second_bee = randint(0, self._num_employers - 1) while (bee.id == self._employers[second_bee].id): second_bee = randint(0, self._num_employers - 1) new_bee = deepcopy(bee) new_bee.values[random_dimension] = self.__onlooker.calculate_positions( new_bee.values[random_dimension], self._employers[second_bee].values[random_dimension], self._value_ranges[random_dimension] ) fitness_score = new_bee.get_score(self._fitness_fxn( new_bee.values, **self._args )) return (fitness_score, new_bee.values, new_bee.error)
Shifts a random value for a supplied bee with in accordance with another random bee's value Args: bee (EmployerBee): supplied bee to merge Returns: tuple: (score of new position, values of new position, fitness function return value of new position)
def map_sid2sub(self, sid, sub): self.set(, sid, sub) self.set(, sub, sid)
Store the connection between a Session ID and a subject ID. :param sid: Session ID :param sub: subject ID
def draw(self): self.context.save() self.context.rectangle(*self.rect) self.context.clip() content = self.get_cell_content() pos_x, pos_y = self.rect[:2] self.context.translate(pos_x + 2, pos_y + 2) cell_attributes = self.code_array.cell_attributes if self.rect[2] < cell_attributes[self.key]["borderwidth_right"] or \ self.rect[3] < cell_attributes[self.key]["borderwidth_bottom"]: self.context.restore() return if self.code_array.cell_attributes[self.key]["button_cell"]: label = self.code_array.cell_attributes[self.key]["button_cell"] self.draw_button(1, 1, self.rect[2]-5, self.rect[3]-5, label) elif isinstance(content, wx._gdi.Bitmap): self.draw_bitmap(content) elif pyplot is not None and isinstance(content, pyplot.Figure): self.draw_matplotlib_figure(content) elif isinstance(content, basestring) and is_svg(content): self.draw_svg(content) elif content is not None: self.draw_text(content) self.context.translate(-pos_x - 2, -pos_y - 2) self.context.restore()
Draws cell content to context
def write(self): f = open(, mode=) self.show(f) f.close()
write status to status.txt
def _format_templates(name, command, templates): yield .format(name) yield _indent() yield _indent() yield yield _indent() yield _indent() yield _indent() for key, var in templates.items(): kwargs = command.lookup_keys(key) yield _indent(.format(key)) yield _indent(.format(var)) yield _indent(.format(.join(kwargs))) yield
Creates a list-table directive for a set of defined environment variables Parameters: name (str): The name of the config section command (object): The sdss_access path instance templates (dict): A dictionary of the path templates Yields: A string rst-formated list-table directive
def get_exchange_rate(self, base, target, raise_errors=True): assert base and target base, target = base.lower(), target.lower() r = self.session.get(API_SIMPLE_TICKER.format(base, target)) if r.status_code != requests.codes.ok: if not raise_errors: return None raise CryptonatorException( ("An error occurred while getting requested exchange rate " "({} from Cryptonator).").format(r.status_code) ) j = r.json() if not j[] or j[]: if not raise_errors: return None raise CryptonatorException( ("An error occurred while getting requested exchange rate ({}, {})" "().").format(base, target, j[]) ) return float(j[][])
Return the ::base:: to ::target:: exchange rate.
def killJobs(self, jobsToKill): if len(jobsToKill) > 0: self.batchSystem.killBatchJobs(jobsToKill) for jobBatchSystemID in jobsToKill: self.processFinishedJob(jobBatchSystemID, 1)
Kills the given set of jobs and then sends them for processing
def _iter_text_wave( self, text, numbers, step=1, fore=None, back=None, style=None, rgb_mode=False): if fore and back: raise ValueError() pos = 0 end = len(text) numbergen = self._iter_wave(numbers) def make_color(n): try: r, g, b = n except TypeError: if rgb_mode: return n, n, n return n return r, g, b for value in numbergen: lastchar = pos + step yield self.color( text[pos:lastchar], fore=make_color(value) if fore is None else fore, back=make_color(value) if fore is not None else back, style=style ) if lastchar >= end: numbergen.send(True) pos = lastchar
Yield colorized characters from `text`, using a wave of `numbers`. Arguments: text : String to be colorized. numbers : A list/tuple of numbers (256 colors). step : Number of characters to colorize per color. fore : Fore color to use (name or number). (Back will be gradient) back : Background color to use (name or number). (Fore will be gradient) style : Style name to use. rgb_mode : Use number for rgb value. This should never be used when the numbers are rgb values themselves.
def page(self, end_date=values.unset, event_type=values.unset, minutes=values.unset, reservation_sid=values.unset, start_date=values.unset, task_queue_sid=values.unset, task_sid=values.unset, worker_sid=values.unset, workflow_sid=values.unset, task_channel=values.unset, sid=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): params = values.of({ : serialize.iso8601_datetime(end_date), : event_type, : minutes, : reservation_sid, : serialize.iso8601_datetime(start_date), : task_queue_sid, : task_sid, : worker_sid, : workflow_sid, : task_channel, : sid, : page_token, : page_number, : page_size, }) response = self._version.page( , self._uri, params=params, ) return EventPage(self._version, response, self._solution)
Retrieve a single page of EventInstance records from the API. Request is executed immediately :param datetime end_date: Filter events by an end date. :param unicode event_type: Filter events by those of a certain event type :param unicode minutes: Filter events by up to 'x' minutes in the past. :param unicode reservation_sid: Filter events by those pertaining to a particular reservation :param datetime start_date: Filter events by a start date. :param unicode task_queue_sid: Filter events by those pertaining to a particular queue :param unicode task_sid: Filter events by those pertaining to a particular task :param unicode worker_sid: Filter events by those pertaining to a particular worker :param unicode workflow_sid: Filter events by those pertaining to a particular workflow :param unicode task_channel: Filter events by those pertaining to a particular task channel :param unicode sid: Filter events by those pertaining to a particular event :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of EventInstance :rtype: twilio.rest.taskrouter.v1.workspace.event.EventPage
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None): line_marker, ref_line = remove_reference_line_marker(ref_line) ref_line, identified_dois = identify_and_tag_DOI(ref_line) ref_line, identified_urls = identify_and_tag_URLs(ref_line) tagged_line, bad_titles_count = tag_reference_line(ref_line, kbs, bad_titles_count) LOGGER.debug("tags %r", tagged_line) citation_elements, line_marker, counts = \ parse_tagged_reference_line(line_marker, tagged_line, identified_dois, identified_urls) split_volume_from_journal(citation_elements) format_volume(citation_elements) handle_special_journals(citation_elements, kbs) format_report_number(citation_elements) format_author_ed(citation_elements) look_for_books(citation_elements, kbs) format_hep(citation_elements) remove_b_for_nucl_phys(citation_elements) mangle_volume(citation_elements) arxiv_urls_to_report_numbers(citation_elements) look_for_hdl(citation_elements) look_for_hdl_urls(citation_elements) if linker_callback: associate_recids(citation_elements, linker_callback) splitted_citations = split_citations(citation_elements) look_for_implied_ibids(splitted_citations) add_year_elements(splitted_citations) look_for_undetected_books(splitted_citations, kbs) if linker_callback: for citations in splitted_citations: associate_recids(citations, linker_callback) remove_duplicated_authors(splitted_citations) remove_duplicated_dois(splitted_citations) remove_duplicated_collaborations(splitted_citations) add_recid_elements(splitted_citations) print_citations(splitted_citations, line_marker) return splitted_citations, line_marker, counts, bad_titles_count
Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects)
def security_rule_get(security_rule, security_group, resource_group, **kwargs): netconn = __utils__[](, **kwargs) try: secrule = netconn.security_rules.get( network_security_group_name=security_group, resource_group_name=resource_group, security_rule_name=security_rule ) result = secrule.as_dict() except CloudError as exc: __utils__[](, str(exc), **kwargs) result = {: str(exc)} return result
.. versionadded:: 2019.2.0 Get a security rule within a specified network security group. :param name: The name of the security rule to query. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup
def katex_rendering_delimiters(app): if in app.config.katex_options: return katex_inline = [d.replace(, ) for d in app.config.katex_inline] katex_display = [d.replace(, ) for d in app.config.katex_display] katex_delimiters = {: katex_inline, : katex_display} delimiters = r.format(**katex_delimiters) return delimiters
Delimiters for rendering KaTeX math. If no delimiters are specified in katex_options, add the katex_inline and katex_display delimiters. See also https://khan.github.io/KaTeX/docs/autorender.html
def ReadPreprocessingInformation(self, knowledge_base): generator = self._GetAttributeContainers( self._CONTAINER_TYPE_SYSTEM_CONFIGURATION) for stream_number, system_configuration in enumerate(generator): knowledge_base.ReadSystemConfigurationArtifact( system_configuration, session_identifier=stream_number)
Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information.
def sample(self, rstate=None, return_q=False): if rstate is None: rstate = np.random if self.nells == 1: x = self.ells[0].sample(rstate=rstate) idx = 0 q = 1 if return_q: return x, idx, q else: return x, idx idx = rstate.choice(self.nells, p=self.vols/self.vol_tot) x = self.ells[idx].sample(rstate=rstate) q = self.overlap(x, j=idx) + 1 if return_q: return x, idx, q else: while rstate.rand() > (1. / q): idx = rstate.choice(self.nells, p=self.vols/self.vol_tot) x = self.ells[idx].sample(rstate=rstate) q = self.overlap(x, j=idx) + 1 return x, idx
Sample a point uniformly distributed within the *union* of ellipsoids. Returns ------- x : `~numpy.ndarray` with shape (ndim,) A coordinate within the set of ellipsoids. idx : int The index of the ellipsoid `x` was sampled from. q : int, optional The number of ellipsoids `x` falls within.
def mount(self, app=None): for endpoint in self._routes: endpoint.register_app(app) return self
Mounts all registered routes to a bottle.py application instance. Args: app (instance): A `bottle.Bottle()` application instance. Returns: The Router instance (for chaining purposes).
def save(self): with rconnect() as conn: try: self.validate() except ValidationError as e: log.warn(e.messages) raise except ModelValidationError as e: log.warn(e.messages) raise except ModelConversionError as e: log.warn(e.messages) raise except ValueError as e: log.warn(e) raise except FrinkError as e: log.warn(e.messages) raise except Exception as e: log.warn(e) raise else: log.debug(rv) except Exception as e: log.warn(e) self.id = None raise else: return self
Save the current instance to the DB
def get_arrive_stop(self, **kwargs): params = { : kwargs.get(), : util.language_code(kwargs.get()) } result = self.make_request(, , **params) if not util.check_result(result, ): return False, values = util.response_list(result, ) return True, [emtype.Arrival(**a) for a in values]
Obtain bus arrival info in target stop. Args: stop_number (int): Stop number to query. lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Arrival]), or message string in case of error.
def _at_for(self, calculator, rule, scope, block): var, _, name = block.argument.partition() frm, _, through = name.partition() if through: inclusive = True else: inclusive = False frm, _, through = frm.partition() frm = calculator.calculate(frm) through = calculator.calculate(through) try: frm = int(float(frm)) through = int(float(through)) except ValueError: return if frm > through: frm, through = through, frm rev = reversed else: rev = lambda x: x var = var.strip() var = calculator.do_glob_math(var) var = normalize_var(var) inner_rule = rule.copy() inner_rule.unparsed_contents = block.unparsed_contents if not self.should_scope_loop_in_rule(inner_rule): inner_rule.namespace = rule.namespace if inclusive: through += 1 for i in rev(range(frm, through)): inner_rule.namespace.set_variable(var, Number(i)) self.manage_children(inner_rule, scope)
Implements @for
def glover_time_derivative(tr, oversampling=50, time_length=32., onset=0.): do = .1 dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset) - glover_hrf(tr, oversampling, time_length, onset + do)) return dhrf
Implementation of the Glover time derivative hrf (dhrf) model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr), dtype=float dhrf sampling on the provided grid
def space(self, newlines=1): space = Space() for line in range(newlines): space.add_line() self._container.structure.insert(self._idx, space) self._idx += 1 return self
Creates a vertical space of newlines Args: newlines (int): number of empty lines Returns: self for chaining
def get_instances(self, object_specs, version=None): ret = dict() spec_keys = set() cache_keys = [] version = version or self.default_version for model_name, obj_pk, obj in object_specs: assert model_name assert obj_pk obj_key = self.key_for(version, model_name, obj_pk) spec_keys.add((model_name, obj_pk, obj, obj_key)) cache_keys.append(obj_key) if cache_keys and self.cache: cache_vals = self.cache.get_many(cache_keys) else: cache_vals = {} cache_to_set = {} for model_name, obj_pk, obj, obj_key in spec_keys: obj_val = cache_vals.get(obj_key) obj_native = json.loads(obj_val) if obj_val else None if not obj_native: if not obj: loader = self.model_function(model_name, version, ) obj = loader(obj_pk) serializer = self.model_function( model_name, version, ) obj_native = serializer(obj) or {} if obj_native: cache_to_set[obj_key] = json.dumps(obj_native) keys = [key for key in obj_native.keys() if in key] for key in keys: json_value = obj_native.pop(key) name, value = self.field_from_json(key, json_value) assert name not in obj_native obj_native[name] = value if obj_native: ret[(model_name, obj_pk)] = (obj_native, obj_key, obj) if cache_to_set and self.cache: self.cache.set_many(cache_to_set) return ret
Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None)
def make_functions(self): tmp_functions = self.kb.functions.copy() for function in tmp_functions.values(): function.mark_nonreturning_calls_endpoints() self.kb.functions.clear() blockaddr_to_function = { } traversed_cfg_nodes = set() function_nodes = set() for _, dst, data in self.graph.edges(data=True): jumpkind = data.get(, "") if jumpkind == or jumpkind.startswith(): function_nodes.add(dst) entry_node = self.model.get_any_node(self._binary.entry) if entry_node is not None: function_nodes.add(entry_node) min_stage_2_progress = 50.0 max_stage_2_progress = 90.0 nodes_count = len(function_nodes) for i, fn in enumerate(sorted(function_nodes, key=lambda n: n.addr)): if self._low_priority: self._release_gil(i, 20) if self._show_progressbar or self._progress_callback: progress = min_stage_2_progress + (max_stage_2_progress - min_stage_2_progress) * (i * 1.0 / nodes_count) self._update_progress(progress) self._graph_bfs_custom(self.graph, [ fn ], self._graph_traversal_handler, blockaddr_to_function, tmp_functions, traversed_cfg_nodes ) secondary_function_nodes |= missing_cfg_nodes min_stage_3_progress = 90.0 max_stage_3_progress = 99.9 nodes_count = len(secondary_function_nodes) for i, fn in enumerate(sorted(secondary_function_nodes, key=lambda n: n.addr)): if self._show_progressbar or self._progress_callback: progress = min_stage_3_progress + (max_stage_3_progress - min_stage_3_progress) * (i * 1.0 / nodes_count) self._update_progress(progress) self._graph_bfs_custom(self.graph, [fn], self._graph_traversal_handler, blockaddr_to_function, tmp_functions ) to_remove = set() if not is_arm_arch(self.project.arch): for fn in self.kb.functions.values(): addr = fn.addr - (fn.addr % 16) if addr != fn.addr and addr in self.kb.functions and self.kb.functions[addr].is_plt: to_remove.add(fn.addr) for func in self.kb.functions.values(): if func.startpoint is None: to_remove.add(func.addr) for addr in to_remove: del self.kb.functions[addr] for node in self._nodes.values(): if node.addr in blockaddr_to_function: node.function_address = blockaddr_to_function[node.addr].addr
Revisit the entire control flow graph, create Function instances accordingly, and correctly put blocks into each function. Although Function objects are crated during the CFG recovery, they are neither sound nor accurate. With a pre-constructed CFG, this method rebuilds all functions bearing the following rules: - A block may only belong to one function. - Small functions lying inside the startpoint and the endpoint of another function will be merged with the other function - Tail call optimizations are detected. - PLT stubs are aligned by 16. :return: None
def load_and_migrate() -> Dict[str, Path]: if IS_ROBOT: _migrate_robot() base = infer_config_base_dir() base.mkdir(parents=True, exist_ok=True) index = _load_with_overrides(base) return _ensure_paths_and_types(index)
Ensure the settings directory tree is properly configured. This function does most of its work on the actual robot. It will move all settings files from wherever they happen to be to the proper place. On non-robots, this mostly just loads. In addition, it writes a default config and makes sure all directories required exist (though the files in them may not).
def ra(self,*args,**kwargs): out= self._orb.ra(*args,**kwargs) if len(out) == 1: return out[0] else: return out
NAME: ra PURPOSE: return the right ascension INPUT: t - (optional) time at which to get ra (can be Quantity) obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity) (default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer (default=Object-wide default; can be Quantity) Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) OUTPUT: ra(t) in deg HISTORY: 2011-02-23 - Written - Bovy (NYU)
def update_function(self, param_vals): self.opt_obj.update_function(param_vals) return self.opt_obj.get_error()
Updates the opt_obj, returns new error.
def _get_one_pending_job(self): pending_job_key, pending_job = self._pending_jobs.popitem() pending_job_state = pending_job.state pending_job_call_stack = pending_job.call_stack pending_job_src_block_id = pending_job.src_block_id pending_job_src_exit_stmt_idx = pending_job.src_exit_stmt_idx self._deregister_analysis_job(pending_job.caller_func_addr, pending_job) l.debug("Target 0x%08x has been traced before. Trying the next one...", pending_exit_addr) self._graph_add_edge(pending_job_src_block_id, pending_job_key, jumpkind="Ijk_FakeRet", stmt_idx=pending_job_src_exit_stmt_idx, ins_addr=pending_job.src_exit_ins_addr) return None pending_job_state.history.jumpkind = job = CFGJob(pending_job_state.addr, pending_job_state, self._context_sensitivity_level, src_block_id=pending_job_src_block_id, src_exit_stmt_idx=pending_job_src_exit_stmt_idx, src_ins_addr=pending_job.src_exit_ins_addr, call_stack=pending_job_call_stack, ) l.debug("Tracing a missing return exit %s", self._block_id_repr(pending_job_key)) return job
Retrieve a pending job. :return: A CFGJob instance or None
def _increment_recursion_level(self): self._initialize_recursion_depth() recursion_options = self._options.get(, {}) current_depth = recursion_options.get(, 0) + 1 max_depth = recursion_options.get(, MAX_DEPTH) self.update_options(_recursion={: current_depth, : max_depth})
Increment current_depth based on either defaults or the enclosing Async.
def raise_204(instance): instance.response.status = 204 instance.response.body = instance.response.body_raw = None raise ResponseException(instance.response)
Abort the current request with a 204 (No Content) response code. Clears out the body of the response. :param instance: Resource instance (used to access the response) :type instance: :class:`webob.resource.Resource` :raises: :class:`webob.exceptions.ResponseException` of status 204