code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def decoration(self): rtiIconFactory = RtiIconFactory.singleton() if self._exception: return rtiIconFactory.getIcon(rtiIconFactory.ERROR, isOpen=False, color=rtiIconFactory.COLOR_ERROR) else: return rtiIconFactory.getIcon(self.iconGlyph, isOpen=not self.canFetchChildren(), color=self.iconColor)
The displayed icon. Shows open icon when node was visited (children are fetched). This allows users for instance to collapse a directory node but still see that it was visited, which may be useful if there is a huge list of directories.
def find_actual_caller(self): try: f = sys._getframe(1) except Exception: f = None else: modname = mod.__name__ if modname == __name__: f = f.f_back continue rv = (modname, filename, f.f_lineno, co.co_name) break return rv
Returns the full-qualified module name, full pathname, line number, and function in which `StreamTeeLogger.write()` was called. For example, if this instance is used to replace `sys.stdout`, this will return the location of any print statement.
def add_page(self, slug): post_data = self.get_post_data() post_data[] = self.userinfo.user_name if MWiki.get_by_uid(slug): self.set_status(400) return False else: MWiki.create_page(slug, post_data) tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh) self.redirect(.format(slug))
Add new page.
def _update_segmentation_mask_if_overlap(toupdate, other, id, otherid): yourmask = other == otherid mymask = toupdate == id overlap_exists = np.any(yourmask & mymask) if not overlap_exists: return yourfidxs, yoursidxs = np.where(other == otherid) toupdate[yourfidxs, yoursidxs] = id
Merges the segments specified by `id` (found in `toupdate`) and `otherid` (found in `other`) if they overlap at all. Updates `toupdate` accordingly.
def _group_kwargs_to_options(cls, obj, kwargs): "Format option group kwargs into canonical options format" groups = Options._option_groups if set(kwargs.keys()) - set(groups): raise Exception("Keyword options %s must be one of %s" % (groups, .join(repr(g) for g in groups))) elif not all(isinstance(v, dict) for v in kwargs.values()): raise Exception("The %s options must be specified using dictionary groups" % .join(repr(k) for k in kwargs.keys())) targets = [grp and all(k[0].isupper() for k in grp) for grp in kwargs.values()] if any(targets) and not all(targets): raise Exception("Cannot mix target specification keys such as with non-target keywords.") elif not any(targets): sanitized_group = util.group_sanitizer(obj.group) if obj.label: identifier = ( % ( obj.__class__.__name__, sanitized_group, util.label_sanitizer(obj.label))) elif sanitized_group != obj.__class__.__name__: identifier = % (obj.__class__.__name__, sanitized_group) else: identifier = obj.__class__.__name__ options = {identifier:{grp:kws for (grp,kws) in kwargs.items()}} else: dfltdict = defaultdict(dict) for grp, entries in kwargs.items(): for identifier, kws in entries.items(): dfltdict[identifier][grp] = kws options = dict(dfltdict) return options
Format option group kwargs into canonical options format
def _connect_mitogen_su(spec): return { : , : { : spec.remote_user(), : spec.password(), : spec.python_path(), : spec.become_exe(), : spec.timeout(), : get_remote_name(spec), } }
Return ContextService arguments for su as a first class connection.
def connect(self, taskspec): self.outputs.append(taskspec) taskspec._connect_notify(self)
Connect the *following* task to this one. In other words, the given task is added as an output task. :type taskspec: TaskSpec :param taskspec: The new output task.
def makeMigrator(context, portal_type, remove_old_value=True): meta_type = portal_type class BlobMigrator(BaseInlineMigrator): src_portal_type = portal_type src_meta_type = meta_type dst_portal_type = portal_type dst_meta_type = meta_type fields = [] def getFields(self, obj): if not self.fields: for field in ISchema(obj).fields(): if IBlobField.providedBy(field): self.fields.append(field.getName()) return self.fields @property def fields_map(self): fields = self.getFields(None) return dict([(name, None) for name in fields]) def migrate_data(self): fields = self.getFields(self.obj) for name in fields: oldfield = self.obj.schema[name] is_imagefield = False if hasattr(oldfield, ): is_imagefield = True oldfield.removeScales(self.obj) value = oldfield.get(self.obj) if not value: continue if isinstance(aq_base(value), BlobWrapper): continue field = self.obj.getField(name) field.getMutator(self.obj)(value) if remove_old_value: if is_imagefield: oldfield.set(self.obj, ) else: oldfield.set(self.obj, ) def last_migrate_reindex(self): self.obj.reindexObject() return BlobMigrator
generate a migrator for the given at-based portal type
def top2_full(votes): res = np.zeros(16) for vote in votes: if vote[0][0] == 0: res[0] += 1 if vote[1][0] == 1: res[4] += 1 elif vote[1][0] == 2: res[5] += 1 elif vote[1][0] == 3: res[6] += 1 elif vote[0][0] == 1: res[1] += 1 if vote[1][0] == 0: res[7] += 1 elif vote[1][0] == 2: res[8] += 1 elif vote[1][0] == 3: res[9] += 1 elif vote[0][0] == 2: res[2] += 1 if vote[1][0] == 0: res[10] += 1 elif vote[1][0] == 1: res[11] += 1 elif vote[1][0] == 3: res[12] += 1 elif vote[0][0] == 3: res[3] += 1 if vote[1][0] == 0: res[13] += 1 elif vote[1][0] == 1: res[14] += 1 elif vote[1][0] == 2: res[15] += 1 res /= len(votes) return res
Description: Top 2 alternatives 16 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def deserialize_frame(stream, header, verifier=None): _LOGGER.debug("Starting frame deserialization") frame_data = {} final_frame = False (sequence_number,) = unpack_values(">I", stream, verifier) if sequence_number == SequenceIdentifier.SEQUENCE_NUMBER_END.value: _LOGGER.debug("Deserializing final frame") (sequence_number,) = unpack_values(">I", stream, verifier) final_frame = True else: _LOGGER.debug("Deserializing frame sequence number %d", int(sequence_number)) frame_data["final_frame"] = final_frame frame_data["sequence_number"] = sequence_number (frame_iv,) = unpack_values(">{iv_len}s".format(iv_len=header.algorithm.iv_len), stream, verifier) frame_data["iv"] = frame_iv if final_frame is True: (content_length,) = unpack_values(">I", stream, verifier) if content_length >= header.frame_length: raise SerializationError( "Invalid final frame length: {final} >= {normal}".format( final=content_length, normal=header.frame_length ) ) else: content_length = header.frame_length (frame_content, frame_tag) = unpack_values( ">{content_len}s{auth_len}s".format(content_len=content_length, auth_len=header.algorithm.auth_len), stream, verifier, ) frame_data["ciphertext"] = frame_content frame_data["tag"] = frame_tag return MessageFrameBody(**frame_data), final_frame
Deserializes a frame from a body. :param stream: Source data stream :type stream: io.BytesIO :param header: Deserialized header :type header: aws_encryption_sdk.structures.MessageHeader :param verifier: Signature verifier object (optional) :type verifier: aws_encryption_sdk.internal.crypto.Verifier :returns: Deserialized frame and a boolean stating if this is the final frame :rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool
def install( engine, n_creatures=5, n_sickles=3, malaria_chance=.05, mate_chance=.05, mapsize=(1, 1), startpos=(0, 0) ): initmap = nx.grid_2d_graph(*mapsize) phys = engine.new_character("physical", data=initmap) species = engine.new_character( "species", mate_chance=mate_chance, malaria_chance=malaria_chance, n_creatures=n_creatures, ) for n in range(0, n_creatures): name = "critter" + str(n) phys.add_thing( name=name, location=startpos, sickle_a=(n < n_sickles), sickle_b=False, male=engine.coinflip(), last_mate_turn=-1 ) assert name in phys.thing assert name not in phys.place assert name in phys.node, "couldnlocationphysicallocationfrom_malariamalariaanemiamalemalesickle_asickle_bsickle_asickle_blast_mate_turnlast_mate_turnmatedlast_mate_turnmalemalemate_chancesickle_asickle_bfrom_malariamalaria_chancesickle_asickle_bfrom_malarialocation']) dest = critter.engine.choice(dests) critter.travel_to(dest) @wander.trigger def not_travelling(critter): return critter.next_location is None @wander.prereq def big_map(critter): return len(critter.character.place) > 1
Natural Selection on Sickle Cell Anemia If anyone carries a pair of sickle betaglobin genes, they die of sickle cell anemia. Individuals with 1x betaglobin, 1x sickle betaglobin are immune to malaria.
def RelaxNGValidateCtxt(self, reader, options): if reader is None: reader__o = None else: reader__o = reader._o ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(reader__o, self._o, options) return ret
Use RelaxNG schema context to validate the document as it is processed. Activation is only possible before the first Read(). If @ctxt is None, then RelaxNG schema validation is deactivated.
def _iter_unfolded_lines(self): line = self._input_file.readline() while line: self.line_counter += 1 self.byte_counter += len(line) line = self._strip_line_sep(line) nextline = self._input_file.readline() while nextline and nextline[:1] == b: line += self._strip_line_sep(nextline)[1:] nextline = self._input_file.readline() if not line.startswith(b): yield line line = nextline
Iter input unfoled lines. Skip comments.
def get_select_items(items): option_items = list() for item in items: if isinstance(item, dict) and defs.VALUE in item and defs.LABEL in item: option_items.append(item[defs.VALUE]) else: raise exceptions.ParametersFieldError(item, "a dictionary with {} and {}" .format(defs.LABEL, defs.VALUE)) return option_items
Return list of possible select items.
def _set_name_server(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name_server_ip",name_server.name_server, yang_name="name-server", rest_name="name-server", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: None, u: None, u: u, u: u, u: None}}), is_container=, yang_name="name-server", rest_name="name-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: None, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__name_server = t if hasattr(self, ): self._set()
Setter method for name_server, mapped from YANG variable /ip/dns/name_server (list) If this variable is read-only (config: false) in the source YANG file, then _set_name_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name_server() directly.
def visitLexerBlock(self, ctx: jsgParser.LexerBlockContext): self._rulePattern += self.visitChildren(ctx) self._rulePattern +=
lexerBlock: OPREN lexeraltList CPREN
def from_cif_string(cif_string, transformations=None, primitive=True, occupancy_tolerance=1.): parser = CifParser.from_string(cif_string, occupancy_tolerance) raw_string = re.sub(r"'", "\"", cif_string) cif_dict = parser.as_dict() cif_keys = list(cif_dict.keys()) s = parser.get_structures(primitive)[0] partial_cif = cif_dict[cif_keys[0]] if "_database_code_ICSD" in partial_cif: source = partial_cif["_database_code_ICSD"] + "-ICSD" else: source = "uploaded cif" source_info = {"source": source, "datetime": str(datetime.datetime.now()), "original_file": raw_string, "cif_data": cif_dict[cif_keys[0]]} return TransformedStructure(s, transformations, history=[source_info])
Generates TransformedStructure from a cif string. Args: cif_string (str): Input cif string. Should contain only one structure. For cifs containing multiple structures, please use CifTransmuter. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. primitive (bool): Option to set if the primitive cell should be extracted. Defaults to True. However, there are certain instances where you might want to use a non-primitive cell, e.g., if you are trying to generate all possible orderings of partial removals or order a disordered structure. occupancy_tolerance (float): If total occupancy of a site is between 1 and occupancy_tolerance, the occupancies will be scaled down to 1. Returns: TransformedStructure
def crypto_kx_seed_keypair(seed): public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES) secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES) ensure(isinstance(seed, bytes) and len(seed) == crypto_kx_SEED_BYTES, .format( crypto_kx_SEED_BYTES), raising=exc.TypeError) res = lib.crypto_kx_seed_keypair(public_key, secret_key, seed) ensure(res == 0, "Key generation failed.", raising=exc.CryptoError) return (ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:], ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:])
Generate a keypair with a given seed. This is functionally the same as crypto_box_seed_keypair, however it uses the blake2b hash primitive instead of sha512. It is included mainly for api consistency when using crypto_kx. :param seed: random seed :type seed: bytes :return: (public_key, secret_key) :rtype: (bytes, bytes)
def input_loop(): while mpstate.status.exit != True: try: if mpstate.status.exit != True: line = input(mpstate.rl.prompt) except EOFError: mpstate.status.exit = True sys.exit(1) mpstate.input_queue.put(line)
wait for user input
def merge_deployment_data(dict1: DeployedContracts, dict2: DeployedContracts) -> DeployedContracts: if not dict1: return dict2 if not dict2: return dict1 common_contracts: Dict[str, DeployedContract] = deepcopy(dict1[]) assert not common_contracts.keys() & dict2[].keys() common_contracts.update(dict2[]) assert dict2[] == dict1[] assert dict2[] == dict1[] return { : common_contracts, : dict1[], : dict1[], }
Take contents of two deployment JSON files and merge them The dictionary under 'contracts' key will be merged. The 'contracts' contents from different JSON files must not overlap. The contents under other keys must be identical.
def transpose(self, name=None): if name is None: name = self.module_name + "_transpose" if self._data_format == DATA_FORMAT_NWC: stride = self._stride[1:-1] else: stride = self._stride[2:] return Conv1D(output_channels=lambda: self.input_channels, kernel_shape=self.kernel_shape, stride=stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module.
def _mul8(ins): op1, op2 = tuple(ins.quad[2:]) if _int_ops(op1, op2) is not None: op1, op2 = _int_ops(op1, op2) output = _8bit_oper(op1) if op2 == 1: output.append() return output if op2 == 0: output.append() output.append() return output if op2 == 2: output.append() output.append() return output if op2 == 4: output.append() output.append() output.append() return output output.append( % int8(op2)) else: if op2[0] == : op1, op2 = op2, op1 output = _8bit_oper(op1, op2) output.append() output.append() REQUIRES.add() return output
Multiplies 2 las values from the stack. Optimizations: * If any of the ops is ZERO, then do A = 0 ==> XOR A, cause A * 0 = 0 * A = 0 * If any ot the ops is ONE, do NOTHING A * 1 = 1 * A = A
def create_iopub_stream(self, kernel_id): self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_iopub_stream(kernel_id)
Create a new iopub stream.
def dotplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7, expression_cutoff=0., mean_only_expressed=False, color_map=, dot_max=None, dot_min=None, figsize=None, dendrogram=False, gene_symbols=None, var_group_positions=None, standard_scale=None, smallest_dot=0., var_group_labels=None, var_group_rotation=None, layer=None, show=None, save=None, **kwds): if use_raw is None and adata.raw is not None: use_raw = True if isinstance(var_names, str): var_names = [var_names] categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories, layer=layer, gene_symbols=gene_symbols) obs_bool = obs_tidy > expression_cutoff fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count() if mean_only_expressed: mean_obs = obs_tidy.mask(~obs_bool).groupby(level=0).mean().fillna(0) else: mean_obs = obs_tidy.groupby(level=0).mean() if standard_scale == : mean_obs = mean_obs.sub(mean_obs.min(1), axis=0) mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0) elif standard_scale == : mean_obs -= mean_obs.min(0) mean_obs = (mean_obs / mean_obs.max(0)).fillna(0) elif standard_scale is None: pass else: logg.warn() dendro_width = 0.8 if dendrogram else 0 colorbar_width = 0.2 colorbar_width_spacer = 0.5 size_legend_width = 0.25 if figsize is None: height = len(categories) * 0.3 + 1 height = max([1.5, height]) heatmap_width = len(var_names) * 0.35 width = heatmap_width + colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer else: width, height = figsize heatmap_width = width - (colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer) if var_group_positions is not None and len(var_group_positions) > 0: height_ratios = [0.5, 10] else: height_ratios = [0, 10.5] fig = pl.figure(figsize=(width, height)) axs = gridspec.GridSpec(nrows=2, ncols=5, wspace=0.02, hspace=0.04, width_ratios=[heatmap_width, dendro_width, colorbar_width, colorbar_width_spacer, size_legend_width], height_ratios=height_ratios) if len(categories) < 4: axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 0], height_ratios=[len(categories) * 0.3, 1]) dot_ax = fig.add_subplot(axs2[0]) else: dot_ax = fig.add_subplot(axs[1, 0]) color_legend = fig.add_subplot(axs[1, 2]) if groupby is None or len(categories) <= 1: dendrogram = False if dendrogram: dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram, var_names=var_names, var_group_labels=var_group_labels, var_group_positions=var_group_positions) var_group_labels = dendro_data[] var_group_positions = dendro_data[] if dendro_data[] is not None: mean_obs = mean_obs.iloc[:,dendro_data[]] fraction_obs = fraction_obs.iloc[:, dendro_data[]] mean_obs = mean_obs.iloc[dendro_data[], :] fraction_obs = fraction_obs.iloc[dendro_data[], :] y_ticks = range(mean_obs.shape[0]) dendro_ax = fig.add_subplot(axs[1, 1], sharey=dot_ax) _plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks) size_legend_height = min(1.3, height) wspace = 10.5 / width axs3 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 4], wspace=wspace, height_ratios=[size_legend_height / height, (height - size_legend_height) / height]) y, x = np.indices(mean_obs.shape) y = y.flatten() x = x.flatten() frac = fraction_obs.values.flatten() mean_flat = mean_obs.values.flatten() cmap = pl.get_cmap(color_map) if dot_max is None: dot_max = np.ceil(max(frac) * 10) / 10 else: if dot_max < 0 or dot_max > 1: raise ValueError("`dot_max` value has to be between 0 and 1") if dot_min is None: dot_min = 0 else: if dot_min < 0 or dot_min > 1: raise ValueError("`dot_min` value has to be between 0 and 1") if dot_min != 0 or dot_max != 1: frac = np.clip(frac, dot_min, dot_max) old_range = dot_max - dot_min frac = ((frac - dot_min) / old_range) size = (frac * 10) ** 2 size += smallest_dot import matplotlib.colors normalize = matplotlib.colors.Normalize(vmin=kwds.get(), vmax=kwds.get()) colors = cmap(normalize(mean_flat)) dot_ax.scatter(x, y, color=colors, s=size, cmap=cmap, norm=None, edgecolor=, **kwds) y_ticks = range(mean_obs.shape[0]) dot_ax.set_yticks(y_ticks) dot_ax.set_yticklabels([mean_obs.index[idx] for idx in y_ticks]) x_ticks = range(mean_obs.shape[1]) dot_ax.set_xticks(x_ticks) dot_ax.set_xticklabels([mean_obs.columns[idx] for idx in x_ticks], rotation=90) dot_ax.tick_params(axis=, labelsize=) dot_ax.grid(False) dot_ax.set_xlim(-0.5, len(var_names) + 0.5) dot_ax.set_ylabel(groupby) ymin, ymax = dot_ax.get_ylim() dot_ax.set_ylim(ymax+0.5, ymin - 0.5) dot_ax.set_xlim(-1, len(var_names)) if var_group_positions is not None and len(var_group_positions) > 0: gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax) _plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions, group_labels=var_group_labels, rotation=var_group_rotation) import matplotlib.colorbar matplotlib.colorbar.ColorbarBase(color_legend, cmap=cmap, norm=normalize) diff = dot_max - dot_min if 0.3 < diff <= 0.6: step = 0.1 elif diff <= 0.3: step = 0.05 else: step = 0.2 fracs_legends = np.arange(dot_max, dot_min, step * -1)[::-1] if dot_min != 0 or dot_max != 1: fracs_values = ((fracs_legends - dot_min) / old_range) else: fracs_values = fracs_legends size = (fracs_values * 10) ** 2 size += smallest_dot color = [cmap(normalize(value)) for value in np.repeat(max(mean_flat) * 0.7, len(size))] size_legend = fig.add_subplot(axs3[0]) size_legend.scatter(np.repeat(0, len(size)), range(len(size)), s=size, color=color) size_legend.set_yticks(range(len(size))) labels = ["{:.0%}".format(x) for x in fracs_legends] if dot_max < 1: labels[-1] = ">" + labels[-1] size_legend.set_yticklabels(labels) size_legend.set_yticklabels(["{:.0%}".format(x) for x in fracs_legends]) size_legend.tick_params(axis=, left=False, labelleft=False, labelright=True) size_legend.tick_params(axis=, bottom=False, labelbottom=False) size_legend.spines[].set_visible(False) size_legend.spines[].set_visible(False) size_legend.spines[].set_visible(False) size_legend.spines[].set_visible(False) size_legend.grid(False) ymin, ymax = size_legend.get_ylim() size_legend.set_ylim(ymin, ymax+0.5) utils.savefig_or_show(, show=show, save=save) return axs
\ Makes a *dot plot* of the expression values of `var_names`. For each var_name and each `groupby` category a dot is plotted. Each dot represents two values: mean expression within each category (visualized by color) and fraction of cells expressing the var_name in the category (visualized by the size of the dot). If groupby is not given, the dotplot assumes that all data belongs to a single category. **Note**: A gene is considered expressed if the expression value in the adata (or adata.raw) is above the specified threshold which is zero by default. An example of dotplot usage is to visualize, for multiple marker genes, the mean value and the percentage of cells expressing the gene accross multiple clusters. Parameters ---------- {common_plot_args} expression_cutoff : `float` (default: `0.`) Expression cutoff that is used for binarizing the gene expression and determining the fraction of cells expressing given genes. A gene is expressed only if the expression value is greater than this threshold. mean_only_expressed : `bool` (default: `False`) If True, gene expression is averaged only over the cells expressing the given genes. color_map : `str`, optional (default: `Reds`) String denoting matplotlib color map. dot_max : `float` optional (default: `None`) If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given, the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to this value. dot_min : `float` optional (default: `None`) If none, the minimum dot size is set to 0. If given, the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to this value. standard_scale : {{'var', 'group'}}, optional (default: None) Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group, subtract the minimum and divide each by its maximum. smallest_dot : `float` optional (default: 0.) If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with `smallest_dot` dot size. {show_save_ax} **kwds : keyword arguments Are passed to `matplotlib.pyplot.scatter`. Returns ------- List of :class:`~matplotlib.axes.Axes` Examples ------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'], ... groupby='bulk_labels', dendrogram=True)
def iter_starred(self, login=None, sort=None, direction=None, number=-1, etag=None): if login: return self.user(login).iter_starred(sort, direction) params = {: sort, : direction} self._remove_none(params) url = self._build_url(, ) return self._iter(int(number), url, Repository, params, etag)
Iterate over repositories starred by ``login`` or the authenticated user. .. versionchanged:: 0.5 Added sort and direction parameters (optional) as per the change in GitHub's API. :param str login: (optional), name of user whose stars you want to see :param str sort: (optional), either 'created' (when the star was created) or 'updated' (when the repository was last pushed to) :param str direction: (optional), either 'asc' or 'desc'. Default: 'desc' :param int number: (optional), number of repositories to return. Default: -1 returns all repositories :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Repository <github3.repos.Repository>`
def set_consistent(self, consistent_config): self.topology._add_job_control_plane() self.oport.operator.consistent(consistent_config) return self._make_placeable()
Indicates that the stream is the start of a consistent region. Args: consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region. Returns: Stream: Returns this stream. .. versionadded:: 1.11
def delete_variable(self, name): del self.variables[name] self.signal_variable_changed.emit(self, name, "delete")
Deletes a variable from a DataFrame.
def audit_1_1(self): for row in self.credential_report: if row["user"] == "<root_account>": for field in "password_last_used", "access_key_1_last_used_date", "access_key_2_last_used_date": if row[field] != "N/A" and self.parse_date(row[field]) > datetime.now(tzutc()) - timedelta(days=1): raise Exception("Root account last used less than a day ago ({})".format(field))
1.1 Avoid the use of the "root" account (Scored)
def add_lines(self, txt, indent=0): for line in txt: self.add_line(line, indent)
Adds a list of lines. The list can be indented with the optional argument 'indent'.
def generate_message_doc(message_descriptor, locations, path, name_prefix=): prefixed_name = name_prefix + message_descriptor.name print(make_subsection(prefixed_name)) location = locations[path] if location.HasField(): print(textwrap.dedent(location.leading_comments)) row_tuples = [] for field_index, field in enumerate(message_descriptor.field): field_location = locations[path + (2, field_index)] if field.type not in [11, 14]: type_str = TYPE_TO_STR[field.type] else: type_str = make_link(field.type_name.lstrip()) row_tuples.append(( make_code(field.name), field.number, type_str, LABEL_TO_STR[field.label], textwrap.fill(get_comment_from_location(field_location), INFINITY), )) print_table((, , , , ), row_tuples) nested_types = enumerate(message_descriptor.nested_type) for index, nested_message_desc in nested_types: generate_message_doc(nested_message_desc, locations, path + (3, index), name_prefix=prefixed_name + ) for index, nested_enum_desc in enumerate(message_descriptor.enum_type): generate_enum_doc(nested_enum_desc, locations, path + (4, index), name_prefix=prefixed_name + )
Generate docs for message and nested messages and enums. Args: message_descriptor: descriptor_pb2.DescriptorProto instance for message to generate docs for. locations: Dictionary of location paths tuples to descriptor_pb2.SourceCodeInfo.Location instances. path: Path tuple to the message definition. name_prefix: Optional prefix for this message's name.
def is_access_granted(self, agreement_id, did, consumer_address): agreement_consumer = self._keeper.escrow_access_secretstore_template.get_agreement_consumer( agreement_id) if agreement_consumer != consumer_address: logger.warning(f f f) return False document_id = did_to_id(did) return self._keeper.access_secret_store_condition.check_permissions( document_id, consumer_address )
Check permission for the agreement. Verify on-chain that the `consumer_address` has permission to access the given asset `did` according to the `agreement_id`. :param agreement_id: id of the agreement, hex str :param did: DID, str :param consumer_address: ethereum account address of consumer, hex str :return: bool True if user has permission
def as_sql(self): labels, data = self._get_table() table = SqlTable(labels, data, "{:.3f}", "\n") return str(table)
Gets report as json :return: json-formatted report
def discover_base_dir(start_dir): s a base_dir (contains the "marker" directory *s2*) and return it. Otherwise, return the start_dir. ///' if is_base_dir(d2c): found_base_dir = d2c break return found_base_dir
Return start_dir or the parent dir that has the s2 marker. Starting from the specified directory, and going up the parent chain, check each directory to see if it's a base_dir (contains the "marker" directory *s2*) and return it. Otherwise, return the start_dir.
def login(): from .RemoteConnection.RemoteManager import RemoteManager global __remote_manager, __session_manager logger = logging.getLogger() remote_address = get_remote_address() res = __session_manager.get_session(remote_address) if res is None: logger.info("Logging using stored authentication token") rm = RemoteManager(address=remote_address, auth_token=res[1]) session_type = rm.auto_login(how=res[2]) __remote_manager = rm access_time = int(time.time()) auth_token = rm.auth_token __session_manager.add_session(remote_address, auth_token, access_time, session_type)
Enables the user to login to the remote GMQL service. If both username and password are None, the user will be connected as guest.
def _build_jss_object_list(self, response, obj_class): response_objects = [item for item in response if item is not None and item.tag != "size"] objects = [ JSSListData(obj_class, {i.tag: i.text for i in response_object}, self) for response_object in response_objects] return JSSObjectList(self, obj_class, objects)
Build a JSSListData object from response.
def detach_events(self, *events): reg = self.registry delete = defaultdict(list) all_events = reg.events for e in events: regexp = getattr(e.regexp, , e.regexp) iotype = e.iotype if e in all_events[iotype].get(regexp, []): all_events[iotype][regexp].remove(e) if not all_events[iotype][regexp]: del all_events[iotype][regexp] delete[iotype].append(regexp) for iotype, regexps in delete.items(): reg.events_re[iotype] = [r for r in reg.events_re[iotype] if r[0] not in regexps]
Detach one or more events from the bot instance
def _collect_potential_merges(dag, barriers): if len(barriers) < 2: return None node_to_barrier_qubits = {} current_barrier = barriers[0] end_of_barrier = current_barrier current_barrier_nodes = [current_barrier] current_qubits = set(current_barrier.qargs) current_ancestors = dag.ancestors(current_barrier) current_descendants = dag.descendants(current_barrier) barrier_to_add = Barrier(len(current_qubits)) for next_barrier in barriers[1:]: next_ancestors = {nd for nd in dag.ancestors(next_barrier) if nd not in current_barrier_nodes} next_descendants = {nd for nd in dag.descendants(next_barrier) if nd not in current_barrier_nodes} next_qubits = set(next_barrier.qargs) if ( not current_qubits.isdisjoint(next_qubits) and current_ancestors.isdisjoint(next_descendants) and current_descendants.isdisjoint(next_ancestors) ): current_ancestors = current_ancestors | next_ancestors current_descendants = current_descendants | next_descendants current_qubits = current_qubits | next_qubits barrier_to_add = Barrier(len(current_qubits)) else: if barrier_to_add: node_to_barrier_qubits[end_of_barrier] = current_qubits current_qubits = set(next_barrier.qargs) current_ancestors = dag.ancestors(next_barrier) current_descendants = dag.descendants(next_barrier) barrier_to_add = Barrier(len(current_qubits)) current_barrier_nodes = [] end_of_barrier = next_barrier current_barrier_nodes.append(end_of_barrier) if barrier_to_add: node_to_barrier_qubits[end_of_barrier] = current_qubits return node_to_barrier_qubits
Returns a dict of DAGNode : Barrier objects, where the barrier needs to be inserted where the corresponding DAGNode appears in the main DAG
def get_scoped_variable_m(self, data_port_id): for scoped_variable_m in self.scoped_variables: if scoped_variable_m.scoped_variable.data_port_id == data_port_id: return scoped_variable_m return None
Returns the scoped variable model for the given data port id :param data_port_id: The data port id to search for :return: The model of the scoped variable with the given id
def ndhess(f, delta=DELTA): def hess_f(*args, **kwargs): x = args[0] hess_val = numpy.zeros(x.shape + x.shape) it = numpy.nditer(x, op_flags=[], flags=[]) for xi in it: i = it.multi_index jt = numpy.nditer(x, op_flags=[], flags=[]) for xj in jt: j = jt.multi_index xi += delta/2 xj += delta/2 fpp = f(x) xj -= delta fpm = f(x) xi -= delta fmm = f(x) xj += delta fmp = f(x) xi += delta/2 xj -= delta/2 hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2 return hess_val return hess_f
Returns numerical hessian function of given input function Input: f, scalar function of an numpy array object delta(optional), finite difference step Output: hessian function object
def parse_notifier_name(name): if isinstance(name, str): return [name] elif name is None: return [] elif isinstance(name, (list, tuple)): for n in name: assert isinstance(n, str), "names must be strings" return name
Convert the name argument to a list of names. Examples -------- >>> parse_notifier_name('a') ['a'] >>> parse_notifier_name(['a','b']) ['a', 'b'] >>> parse_notifier_name(None) ['anytrait']
def calculate_heading_longpath(locator1, locator2): heading = calculate_heading(locator1, locator2) lp = (heading + 180)%360 return lp
calculates the heading from the first to the second locator (long path) Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Long path heading in deg Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the long path heading from locator1 to locator2 >>> from pyhamtools.locator import calculate_heading_longpath >>> calculate_heading_longpath("JN48QM", "QF67bf") 254.3136
def value_attr(attr_name): def value_attr(value, context, **_params): value = getattr(value, attr_name) return _attr(value) return value_attr
Creates a getter that will retrieve value's attribute with specified name. @param attr_name: the name of an attribute belonging to the value. @type attr_name: str
def uniqualize(l,**kwargs): if( in kwargs): mode = kwargs[] else: mode = pt = copy.deepcopy(l) seqs =[] freq = {} for i in range(0,pt.__len__()): v = pt[i] if(v in freq): freq[v] = freq[v] + 1 else: freq[v] = 0 seqs.append(i) npt = select_seqs(pt,seqs) pt = npt if(mode == ): return(npt) else: l.clear() l.extend(npt) return(l)
from elist.elist import * l = [1, 2, 2] new = uniqualize(l) new id(l) id(new) #### l = [1, 2, 2] rslt = uniqualize(l,mode="original") rslt id(l) id(rslt)
def list_build_records(page_size=200, page_index=0, sort="", q=""): data = list_build_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all BuildRecords
def setup(app): lexer = MarkdownLexer() for alias in lexer.aliases: app.add_lexer(alias, lexer) return dict(version=__version__)
Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
def generate_k(order, secexp, hash_func, data): qlen = bit_length(order) holen = hash_func().digest_size rolen = (qlen + 7) / 8 bx = number_to_string(secexp, order) + bits2octets(data, order) v = b() * holen k = b() * holen k = hmac.new(k, v+b()+bx, hash_func).digest() v = hmac.new(k, v, hash_func).digest() k = hmac.new(k, v+b()+bx, hash_func).digest() v = hmac.new(k, v, hash_func).digest() while True: t = b() while len(t) < rolen: v = hmac.new(k, v, hash_func).digest() t += v secret = bits2int(t, qlen) if secret >= 1 and secret < order: return secret k = hmac.new(k, v+b(), hash_func).digest() v = hmac.new(k, v, hash_func).digest()
order - order of the DSA generator used in the signature secexp - secure exponent (private key) in numeric form hash_func - reference to the same hash function used for generating hash data - hash in binary form of the signing data
def as_dict(self, join=): if self.path: path = [str(node) for node in self.path] else: path = return { join.join(path): self.message }
Returns the error as a path to message dictionary. Paths are joined with the ``join`` string.
def add_row(self, data: list=None): if self.headers and data: if len(self.headers) != len(data): raise ValueError offset = 0 if not self.headers else 1 row = list() if data: for i, element in enumerate(data): contents = if element is None else str(element) entry = ttk.Entry(self) entry.insert(0, contents) entry.grid(row=len(self._rows) + offset, column=i, sticky=) row.append(entry) else: for i in range(self.num_of_columns): entry = ttk.Entry(self) entry.grid(row=len(self._rows) + offset, column=i, sticky=) row.append(entry) self._rows.append(row) for row in self._rows: for widget in row: widget.unbind() def add(e): self.add_row() last_entry = self._rows[-1][-1] last_entry.bind(, add) e = self._rows[-1][0] e.focus_set() self._redraw()
Add a row of data to the current widget, add a <Tab> \ binding to the last element of the last row, and set \ the focus at the beginning of the next row. :param data: a row of data :return: None
def visit_For(self, node): if not isinstance(node.target, ast.Name): raise PythranSyntaxError( "Using something other than an identifier as loop target", node.target) target = self.visit(node.target) loop_body = Block([self.visit(stmt) for stmt in node.body]) loop_body = self.process_locals(node, loop_body, node.target.id) iterable = self.visit(node.iter) if self.can_use_c_for(node): header, loop = self.gen_c_for(node, target, loop_body) else: if self.can_use_autofor(node): header = [] self.ldecls.remove(node.target.id) autofor = AutoFor(target, iterable, loop_body) loop = [self.process_omp_attachements(node, autofor)] else: local_iter = "__iter{0}".format(id(node)) local_iter_decl = self.types.builder.Assignable( self.types[node.iter]) self.handle_omp_for(node, local_iter) asgnt = self.make_assign(local_iter_decl, local_iter, iterable) header = [Statement(asgnt)] loop = self.gen_for(node, target, local_iter, local_iter_decl, loop_body) for comp in metadata.get(node, metadata.Comprehension): header.append(Statement("pythonic::utils::reserve({0},{1})".format( comp.target, iterable))) return Block(header + loop)
Create For representation for Cxx generation. Examples -------- >> for i in xrange(10): >> ... work ... Becomes >> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX = __builtin__.xrange(10); >> ... possible container size reservation ... >> for (auto&& i: __iterX) >> ... the work ... This function also handle assignment for local variables. We can notice that three kind of loop are possible: - Normal for loop on iterator - Autofor loop. - Normal for loop using integer variable iteration Kind of loop used depend on OpenMP, yield use and variable scope.
def result(self): self.__result.sort(cmp = self.__cmp, key = self.__key, reverse = self.__reverse) return self.__result
Formats the result.
def communicate(self, input=None, timeout=-1): if self._process is None: raise RuntimeError() if timeout == -1: timeout = self._timeout output = [[], []] def writer(stream, data): offset = 0 while offset < len(data): buf = data[offset:offset+4096] stream.write(buf) offset += len(buf) stream.close() def reader(stream, data): while True: if self._encoding: buf = stream.read(4096) else: buf = stream.read1() if not buf: break data.append(buf) if self.stdin: fibers.spawn(writer, self.stdin, input or b) if self.stdout: fibers.spawn(reader, self.stdout, output[0]) if self.stderr: fibers.spawn(reader, self.stderr, output[1]) self.wait(timeout) empty = if self._encoding else b stdout_data = empty.join(output[0]) stderr_data = empty.join(output[1]) return (stdout_data, stderr_data)
Communicate with the child and return its output. If *input* is provided, it is sent to the client. Concurrent with sending the input, the child's standard output and standard error are read, until the child exits. The return value is a tuple ``(stdout_data, stderr_data)`` containing the data read from standard output and standard error.
def evaluate_cartesian(self, s, t, _verify=True): r if _verify: self._verify_cartesian(s, t) return _surface_helpers.evaluate_barycentric( self._nodes, self._degree, 1.0 - s - t, s, t )
r"""Compute a point on the surface. Evaluates :math:`B\left(1 - s - t, s, t\right)` by calling :meth:`evaluate_barycentric`: This method acts as a (partial) inverse to :meth:`locate`. .. testsetup:: surface-cartesian import numpy as np import bezier .. doctest:: surface-cartesian :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0 , 0.0, 0.5, 0.25], ... [0.0, 0.5, 0.625, 0.5, 0.5, 1.0 ], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = surface.evaluate_cartesian(0.125, 0.375) >>> point array([[0.16015625], [0.44726562]]) >>> surface.evaluate_barycentric(0.5, 0.125, 0.375) array([[0.16015625], [0.44726562]]) Args: s (float): Parameter along the reference triangle. t (float): Parameter along the reference triangle. _verify (Optional[bool]): Indicates if the coordinates should be verified inside of the reference triangle. Defaults to :data:`True`. Returns: numpy.ndarray: The point on the surface (as a two dimensional NumPy array).
def fetch(self): api = self.doapi_manager return api._ssh_key(api.request(self.url)["ssh_key"])
Fetch & return a new `SSHKey` object representing the SSH key's current state :rtype: SSHKey :raises DOAPIError: if the API endpoint replies with an error (e.g., if the SSH key no longer exists)
def jaccard(seq1, seq2): set1, set2 = set(seq1), set(seq2) return 1 - len(set1 & set2) / float(len(set1 | set2))
Compute the Jaccard distance between the two sequences `seq1` and `seq2`. They should contain hashable items. The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
def has_tsm(self): if (self.oemid[] == 19046 and self.oemid[] == 32): try: self.ipmicmd.xraw_command(netfn=0x3a, command=0xf) except pygexc.IpmiException as ie: if ie.ipmicode == 193: return False raise return True return False
True if this particular server have a TSM based service processor
def summary(processors, metrics, context): def display_header(processors, before=, after=): print(before, end=) for processor in processors: processor.display_header() print(after) def display_separator(processors, before=, after=): print(before, end=) for processor in processors: processor.display_separator() print(after) def display_metrics(processors, before=, after=, metrics=[]): print(before, end=) for processor in processors: processor.display_metrics(metrics) print(after) summary = {} for m in metrics: lang = metrics[m][] has_key = lang in summary if not has_key: summary[lang] = {: 0, : lang} summary[lang][] += 1 for i in metrics[m]: if i not in [, , ]: continue if not has_key: summary[lang][i] = 0 summary[lang][i] += metrics[m][i] total = {: } for m in summary: for i in summary[m]: if i == : continue if i not in total: total[i] = 0 total[i] += summary[m][i] print() display_header(processors, , ) display_separator(processors, *5, ) for k in sorted(summary.keys(), key=str.lower): display_metrics(processors, % summary[k][], , summary[k]) display_separator(processors, *5, ) display_metrics(processors, % total[], , total)
Print the summary
def energies(self, samples_like, dtype=np.float): samples, labels = as_samples(samples_like) if all(v == idx for idx, v in enumerate(labels)): ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype) else: ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype) energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset return np.asarray(energies, dtype=dtype)
Determine the energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`): The data type of the returned energies. Returns: :obj:`numpy.ndarray`: The energies.
def get_pubmed_for_beleditor(pmid: str) -> Mapping[str, Any]: pubmed = get_pubmed(pmid) pubtator = get_pubtator(pmid) pubmed["annotations"] = copy.deepcopy(pubtator["annotations"]) pubmed = enhance_pubmed_annotations(pubmed) return pubmed
Get fully annotated pubmed doc with Pubtator and full entity/annotation_types Args: pmid: Pubmed PMID Returns: Mapping[str, Any]: pubmed dictionary
def newLayer(self, effect=): self.layers.append(Layer(effect = effect)) self.activeLayer = len(self.layers)-1
Creates a new :py:class:`Layer` and set that as the active. :param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`. :rtype: Nothing.
def open(self) -> bool: if self.connection: return False connection_params = {: DNS_NAME.get_fqdn()} if self.timeout is not None: connection_params[] = self.timeout try: self.connection = smtplib.SMTP(self.host, self.port, **connection_params) context = ssl.SSLContext(self._protocol()) if self.ssl_certfile: context.load_cert_chain(certfile=self.ssl_certfile, keyfile=self.ssl_keyfile) self.connection.ehlo() self.connection.starttls(context=context) self.connection.ehlo() if self.username and self.password: self.connection.login(self.username, self.password) log.debug("Successful SMTP connection/login") else: log.debug("Successful SMTP connection (without login)") return True except smtplib.SMTPException: log.debug("SMTP connection and/or login failed") if not self.fail_silently: raise
Ensures we have a connection to the email server. Returns whether or not a new connection was required (True or False).
def Module(EPIC, campaign=None): channel = Channel(EPIC, campaign=campaign) nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81} for c in [channel, channel - 1, channel - 2, channel - 3]: if c in nums.values(): for mod, chan in nums.items(): if chan == c: return mod return None
Returns the module number for a given EPIC target.
def determine_extended_chord5(chord, shorthand=False, no_inversions=False, no_polychords=False): if len(chord) != 5: return False def inversion_exhauster(chord, shorthand, tries, result, polychords): def add_result(short): result.append((short, tries, chord[0])) triads = determine_triad(chord[:3], True, True) sevenths = determine_seventh(chord[:4], True, True, True) if tries == 1 and not no_polychords: polychords += determine_polychords(chord, shorthand) intval4 = intervals.determine(chord[0], chord[4]) for seventh in sevenths: seventh = seventh[len(chord[0]):] if seventh == : if intval4 == : add_result() elif seventh == : if intval4 == : add_result() elif intval4 == : add_result() elif seventh == : if intval4 == : add_result() elif intval4 == : add_result() elif intval4 == : add_result() elif intval4 == : add_result() elif intval4 == : add_result() elif intval4 == : add_result() elif seventh == : if intval4 == : add_result() elif intval4 == : add_result() if tries != 5 and not no_inversions: return inversion_exhauster([chord[-1]] + chord[:-1], shorthand, tries + 1, result, polychords) else: res = [] for r in result: if shorthand: res.append(r[2] + r[0]) else: res.append(r[2] + chord_shorthand_meaning[r[0]] + int_desc(r[1])) return res + polychords return inversion_exhauster(chord, shorthand, 1, [], [])
Determine the names of an extended chord.
def _get_args_contents(self): return .join( % (key, shlex_quote(str(self.args[key]))) for key in self.args ) +
Mimic the argument formatting behaviour of ActionBase._execute_module().
def _missing_(cls, value): if not (isinstance(value, int) and 0x0000 <= value <= 0xFFFF): raise ValueError( % (value, cls.__name__)) if 0x0001 <= value <= 0x0BB8: extend_enum(cls, % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x0020 <= value <= 0x003F: extend_enum(cls, % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x0BB9 <= value <= 0xFFFF: extend_enum(cls, % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x4000 <= value <= 0x4FFF: extend_enum(cls, % hex(value)[2:].upper().zfill(4), value) return cls(value) if 0x8000 <= value <= 0xFFFF: extend_enum(cls, % hex(value)[2:].upper().zfill(4), value) return cls(value) super()._missing_(value)
Lookup function used when value is not found.
def marks(value): for i, entry in enumerate(value): _assert_is_type(.format(i), entry, Mark)
list or KeyedList of ``Mark`` : Mark definitions Marks are the visual objects (such as lines, bars, etc.) that represent the data in the visualization space. See the :class:`Mark` class for details.
def _Rforce(self,R,z,phi=0.,t=0.): Rz= R**2.+z**2. sqrtRz= numpy.sqrt(Rz) return R*(1./Rz/(self.a+sqrtRz)-numpy.log(1.+sqrtRz/self.a)/sqrtRz/Rz)
NAME: _Rforce PURPOSE: evaluate the radial force for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the radial force HISTORY: 2010-07-09 - Written - Bovy (NYU)
def guest_reset(self, userid): LOG.info("Begin to reset vm %s", userid) self._smtclient.guest_reset(userid) LOG.info("Complete reset vm %s", userid)
Reset z/VM instance.
def calcEndOfPrdvPP(self): EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*self.PermGroFac**(-self.CRRA-1.0)*\ np.sum(self.PermShkVals_temp**(-self.CRRA-1.0)*self.vPPfuncNext(self.mNrmNext) *self.ShkPrbs_temp,axis=0) return EndOfPrdvPP
Calculates end-of-period marginal marginal value using a pre-defined array of next period market resources in self.mNrmNext. Parameters ---------- none Returns ------- EndOfPrdvPP : np.array End-of-period marginal marginal value of assets at each value in the grid of assets.
def getElements(self, zero_based=True, pared=False): points = self._points[:] elements = self._elements[:] offset = 0 if not zero_based: offset = 1 np = None if pared: np = NodePare() np.addPoints(points) np.parePoints() if pared or not zero_based: modified_elements = [] for element in elements: modified_element = [index + offset if np is None else np.getParedIndex(index) + offset for index in element] modified_elements.append(modified_element) elements = modified_elements return elements
Get the elements of the mesh as a list of point index list. :param zero_based: use zero based index of points if true otherwise use 1-based index of points. :param pared: use the pared down list of points :return: A list of point index lists
def render(self, template, **kwargs): try: temp = self.environment.get_template(template) return temp.render(**kwargs) except AttributeError: err_msg = "Invalid value for " self.log.error(err_msg) raise exception.BadValue(err_msg)
Renders the template :param template: The template to render. The template is actually a file, which is usually generated by :class:`rtcclient.template.Templater.getTemplate` and can also be modified by user accordingly. :param kwargs: The `kwargs` dict is used to fill the template. These two parameter are mandatory: * description * title Some of below parameters (which may not be included in some customized workitem type ) are mandatory if `keep` (parameter in :class:`rtcclient.template.Templater.getTemplate`) is set to `False`; Optional for otherwise. * teamArea (Team Area) * ownedBy (Owned By) * plannedFor(Planned For) * severity(Severity) * priority(Priority) * filedAgainst(Filed Against) Actually all these needed keywords/attributes/fields can be retrieved by :class:`rtcclient.template.Templater.listFields` :return: the :class:`string` object :rtype: string
def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None: existing_plugin = cls.available.get(name) if existing_plugin is None: cls.available[name] = plugin elif existing_plugin != plugin: raise ConnectionPluginAlreadyRegistered( f"Connection plugin {plugin.__name__} can't be registered as " f"{name!r} because plugin {existing_plugin.__name__} " f"was already registered under this name" )
Registers a connection plugin with a specified name Args: name: name of the connection plugin to register plugin: defined connection plugin class Raises: :obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if another plugin with the specified name was already registered
def connections(self, wait): while wait: try: params = pika.ConnectionParameters(host=self.rmq_host, port=self.rmq_port) connection = pika.BlockingConnection(params) self.channel = connection.channel() self.channel.exchange_declare(exchange=, exchange_type=) result = self.channel.queue_declare() self.queue_name = result.method.queue self.es_conn = Elasticsearch([{: self.es_host, : self.es_port}]) wait = False print() except Exception as e: print(str(e)) print( + str(e)) time.sleep(2) wait = True
wait for connections to both rabbitmq and elasticsearch to be made before binding a routing key to a channel and sending messages to elasticsearch
def sph_coords_to_pose(theta, psi): rot_z = RigidTransform.z_axis_rotation(theta) rot_y = RigidTransform.y_axis_rotation(psi) R = rot_y.dot(rot_z) return RigidTransform(rotation=R)
Convert spherical coordinates to a pose. Parameters ---------- theta : float azimuth angle psi : float elevation angle Returns ------- :obj:`RigidTransformation` rigid transformation corresponding to rotation with no translation
def corners(bounds): bounds = np.asanyarray(bounds, dtype=np.float64) if util.is_shape(bounds, (2, 2)): bounds = np.column_stack((bounds, [0, 0])) elif not util.is_shape(bounds, (2, 3)): raise ValueError() minx, miny, minz, maxx, maxy, maxz = np.arange(6) corner_index = np.array([minx, miny, minz, maxx, miny, minz, maxx, maxy, minz, minx, maxy, minz, minx, miny, maxz, maxx, miny, maxz, maxx, maxy, maxz, minx, maxy, maxz]).reshape((-1, 3)) corners = bounds.reshape(-1)[corner_index] return corners
Given a pair of axis aligned bounds, return all 8 corners of the bounding box. Parameters ---------- bounds : (2,3) or (2,2) float Axis aligned bounds Returns ---------- corners : (8,3) float Corner vertices of the cube
def _inject_into_mod(mod, name, value, force_lock=False): s variable without acquiring the lock and only acquires the lock if a new proxy has to be created and injected. ' old_value = getattr(mod, name, None) if force_lock: with _inject_into_mod.lock: if isinstance(old_value, ThreadLocalProxy): ThreadLocalProxy.set_reference(old_value, value) else: setattr(mod, name, ThreadLocalProxy(value, True)) else: if isinstance(old_value, ThreadLocalProxy): ThreadLocalProxy.set_reference(old_value, value) else: _inject_into_mod(mod, name, value, True)
Inject a variable into a module. This is used to inject "globals" like ``__salt__``, ``__pillar``, or ``grains``. Instead of injecting the value directly, a ``ThreadLocalProxy`` is created. If such a proxy is already present under the specified name, it is updated with the new value. This update only affects the current thread, so that the same name can refer to different values depending on the thread of execution. This is important for data that is not truly global. For example, pillar data might be dynamically overriden through function parameters and thus the actual values available in pillar might depend on the thread that is calling a module. mod: module object into which the value is going to be injected. name: name of the variable that is injected into the module. value: value that is injected into the variable. The value is not injected directly, but instead set as the new reference of the proxy that has been created for the variable. force_lock: whether the lock should be acquired before checking whether a proxy object for the specified name has already been injected into the module. If ``False`` (the default), this function checks for the module's variable without acquiring the lock and only acquires the lock if a new proxy has to be created and injected.
def _attribute_is_magic(node, attrs, parents): if node.attrname not in attrs: return False if not node.last_child(): return False try: for cls in node.last_child().inferred(): if isinstance(cls, Super): cls = cls._self_class if node_is_subclass(cls, *parents) or cls.qname() in parents: return True except InferenceError: pass return False
Checks that node is an attribute used inside one of allowed parents
def qteIsQtmacsWidget(widgetObj): if widgetObj is None: return False if hasattr(widgetObj, ): return True visited = [widgetObj] wid = widgetObj.parent() while wid not in visited: if hasattr(wid, ): return True elif wid is None: return False else: visited.append(wid) wid = wid.parent() return False
Determine if a widget is part of Qtmacs widget hierarchy. A widget belongs to the Qtmacs hierarchy if it, or one of its parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``). Since every applet has this attribute is guaranteed that the function returns **True** if the widget is embedded inside somewhere. |Args| * ``widgetObj`` (**QWidget**): the widget to test. |Returns| * **bool**: **True** if the widget, or one of its ancestors in the Qt hierarchy have a '_qteAdmin' attribute. |Raises| * **None**
def retrieve(self, namespace, stream, start_time, end_time, start_id, configuration, order=ResultOrder.ASCENDING, limit=sys.maxint): if not start_id: start_id = uuid_from_kronos_time(start_time, _type=UUIDType.LOWEST) else: start_id = TimeUUID(start_id) if uuid_to_kronos_time(start_id) > end_time: return [] return self._retrieve(namespace, stream, start_id, end_time, order, limit, configuration)
Retrieves all the events for `stream` from `start_time` (inclusive) till `end_time` (inclusive). Alternatively to `start_time`, `start_id` can be provided, and then all events from `start_id` (exclusive) till `end_time` (inclusive) are returned. `start_id` should be used in cases when the client got disconnected from the server before all the events in the requested time window had been returned. `order` can be one of ResultOrder.ASCENDING or ResultOrder.DESCENDING. Returns an iterator over all JSON serialized (strings) events.
def conditional_jit(function=None, **kwargs): def wrapper(function): try: numba = importlib.import_module("numba") return numba.jit(**kwargs)(function) except ImportError: return function if function: return wrapper(function) else: return wrapper
Use numba's jit decorator if numba is installed. Notes ----- If called without arguments then return wrapped function. @conditional_jit def my_func(): return else called with arguments @conditional_jit(nopython=True) def my_func(): return
def check_and_mutate_row( self, table_name, row_key, app_profile_id=None, predicate_filter=None, true_mutations=None, false_mutations=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "check_and_mutate_row" not in self._inner_api_calls: self._inner_api_calls[ "check_and_mutate_row" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.check_and_mutate_row, default_retry=self._method_configs["CheckAndMutateRow"].retry, default_timeout=self._method_configs["CheckAndMutateRow"].timeout, client_info=self._client_info, ) request = bigtable_pb2.CheckAndMutateRowRequest( table_name=table_name, row_key=row_key, app_profile_id=app_profile_id, predicate_filter=predicate_filter, true_mutations=true_mutations, false_mutations=false_mutations, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["check_and_mutate_row"]( request, retry=retry, timeout=timeout, metadata=metadata )
Mutates a row atomically based on the output of a predicate Reader filter. Example: >>> from google.cloud import bigtable_v2 >>> >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) Args: table_name (str): The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. row_key (bytes): The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``true_mutations`` is empty, and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def tree_build(self): from skbio.tree import TreeNode nodes = {} for tax_id in self.taxonomy.index: node = TreeNode(name=tax_id, length=1) node.tax_name = self.taxonomy["name"][tax_id] node.rank = self.taxonomy["rank"][tax_id] node.parent_tax_id = self.taxonomy["parent_tax_id"][tax_id] nodes[tax_id] = node for tax_id in self.taxonomy.index: try: parent = nodes[nodes[tax_id].parent_tax_id] except KeyError: if tax_id != "1": warnings.warn( "tax_id={} has parent_tax_id={} which is not in tree" "".format(tax_id, nodes[tax_id].parent_tax_id) ) continue parent.append(nodes[tax_id]) return nodes["1"]
Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or `SampleCollection`. Returns ------- `skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current analysis and their parents leading back to the root node.
def ephem(self, *args, **kwargs): return self.__class__(self.ephemeris(*args, **kwargs))
Create an Ephem object which is a subset of this one Take the same keyword arguments as :py:meth:`ephemeris` Return: Ephem:
def Print(self, output_writer): if self._names: output_writer.Write(.format( .join(self._names)))
Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer.
def run_on_main_thread(self, func, args=None, kwargs=None): if not args: args = () if not kwargs: kwargs = {} self.microservice.get_io_loop().add_callback(func, *args, **kwargs)
Runs the ``func`` callable on the main thread, by using the provided microservice instance's IOLoop. :param func: callable to run on the main thread :param args: tuple or list with the positional arguments. :param kwargs: dict with the keyword arguments. :return:
def slice_slice(old_slice, applied_slice, size): step = (old_slice.step or 1) * (applied_slice.step or 1) items = _expand_slice(old_slice, size)[applied_slice] if len(items) > 0: start = items[0] stop = items[-1] + int(np.sign(step)) if stop < 0: stop = None else: start = 0 stop = 0 return slice(start, stop, step)
Given a slice and the size of the dimension to which it will be applied, index it with another slice to return a new slice equivalent to applying the slices sequentially
def get_task_cost(self, task_name): summary = self.get_task_summary(task_name) if summary is None: return None if in summary: task_cost = summary[] cpu_cost = task_cost.get() memory = task_cost.get() input_size = task_cost.get() return Instance.TaskCost(cpu_cost, memory, input_size)
Get task cost :param task_name: name of the task :return: task cost :rtype: Instance.TaskCost :Example: >>> cost = instance.get_task_cost(instance.get_task_names()[0]) >>> cost.cpu_cost 200 >>> cost.memory_cost 4096 >>> cost.input_size 0
def rnd_datetime_array(size, start=datetime(1970, 1, 1), end=None): if end is None: end = datetime.now() start = parser.parse_datetime(start) end = parser.parse_datetime(end) _assert_correct_start_end(start, end) return _randn(size, _rnd_datetime, start, end)
Array or Matrix of random datetime generator. :returns: 1d or 2d array of datetime.date
def groupby(self, io_select): offsets = [] new_dim = [] acc = 1 for i, d in reversed(enumerate(self.dims)): if not io_select[i]: new_dim.insert(0, d) offsets.insert(0, acc * io_select[i]) acc *= d if not new_dim: return ((c, self[c]) for c in self._all_combos()) else: output = [[None, Matrix(dims=new_dim)] for i in range(acc)] _groupby(self.cube, 0, offsets, 0, output, tuple(), []) return output
SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING return -
def named_tuple( element_name, tuple_type, child_processors, required=True, alias=None, hooks=None ): converter = _named_tuple_converter(tuple_type) processor = _Aggregate(element_name, converter, child_processors, required, alias) return _processor_wrap_if_hooks(processor, hooks)
Create a processor for namedtuple values. :param tuple_type: The namedtuple type. See also :func:`declxml.dictionary`
def memcache(self, f): name = _fullname(f) cache = self.load_memcache(name) @wraps(f) def memcached(*args): h = args out = cache.get(h, None) if out is None: out = f(*args) cache[h] = out return out return memcached
Cache a function in memory using an internal dictionary.
def cleanupContainers(self): for i in range(self.count() - 1, self.currentIndex(), -1): widget = self.widget(i) widget.close() widget.setParent(None) widget.deleteLater()
Cleans up all containers to the right of the current one.
def get_last_args(tp): if NEW_TYPING: raise ValueError( ) if is_classvar(tp): return (tp.__type__,) if tp.__type__ is not None else () if ( is_generic_type(tp) or is_union_type(tp) or is_callable_type(tp) or is_tuple_type(tp) ): return tp.__args__ if tp.__args__ is not None else () return ()
Get last arguments of (multiply) subscripted type. Parameters for Callable are flattened. Examples:: get_last_args(int) == () get_last_args(Union) == () get_last_args(ClassVar[int]) == (int,) get_last_args(Union[T, int]) == (T, int) get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T) get_last_args(Callable[[T], int]) == (T, int) get_last_args(Callable[[], int]) == (int,)
def string(cls, name, description=None, unit=, default=None, initial_status=None): return cls(cls.STRING, name, description, unit, None, default, initial_status)
Instantiate a new string sensor object. Parameters ---------- name : str The name of the sensor. description : str A short description of the sensor. units : str The units of the sensor value. May be the empty string if there are no applicable units. default : string An initial value for the sensor. Defaults to the empty string. initial_status : int enum or None An initial status for the sensor. If None, defaults to Sensor.UNKNOWN. `initial_status` must be one of the keys in Sensor.STATUSES
def list_packages(request): session = DBSession() names = [p.name for p in Package.all(session, order_by=Package.name)] return names
Retrieve a list of the package names registered with the package index. Returns a list of name strings.
def autoLayoutSelected( self, padX = None, padY = None, direction = Qt.Horizontal, layout = , animate = 0, centerOn = None, center = None): nodes = self.selectedNodes() return self.autoLayoutNodes(nodes, padX, padY, direction, layout, animate, centerOn, center)
Automatically lays out all the selected nodes in the scene using the \ autoLayoutNodes method. :param padX | <int> || None | default is 2 * cell width padY | <int> || None | default is 2 * cell height direction | <Qt.Direction> layout | <str> | name of the layout plugin to use animate | <int> | number of seconds to animate over :return {<XNode>: <QRectF>, ..} | new rects per node
def to_netcdf(self, filename, compress=True): mode = "w" if self._groups: for group in self._groups: data = getattr(self, group) kwargs = {} if compress: kwargs["encoding"] = {var_name: {"zlib": True} for var_name in data.variables} data.to_netcdf(filename, mode=mode, group=group, **kwargs) data.close() mode = "a" else: empty_netcdf_file = nc.Dataset(filename, mode="w", format="NETCDF4") empty_netcdf_file.close() return filename
Write InferenceData to file using netcdf4. Parameters ---------- filename : str Location to write to compress : bool Whether to compress result. Note this saves disk space, but may make saving and loading somewhat slower (default: True). Returns ------- str Location of netcdf file
def get_equipment(self, **kwargs): uri = uri = self.prepare_url(uri, kwargs) return super(ApiEquipment, self).get(uri)
Return list environments related with environment vip
def display_latex(*objs, **kwargs): raw = kwargs.pop(,False) if raw: for obj in objs: publish_latex(obj) else: display(*objs, include=[,])
Display the LaTeX representation of an object. Parameters ---------- objs : tuple of objects The Python objects to display, or if raw=True raw latex data to display. raw : bool Are the data objects raw data or Python objects that need to be formatted before display? [default: False]
def p_members(self, p): if len(p) == 1: p[0] = list() else: p[1].append(p[2]) p[0] = p[1]
members : | members member VALUE_SEPARATOR | members member
def silence(cls, *modules, **kwargs): level = kwargs.pop("level", logging.WARNING) for mod in modules: name = mod.__name__ if hasattr(mod, "__name__") else mod logging.getLogger(name).setLevel(level)
Args: *modules: Modules, or names of modules to silence (by setting their log level to WARNING or above) **kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise