code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def __hash_and_stat_file(self, path, saltenv=): try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): log.warning( , path, err ) return {}, None else: ret = {} hash_type = self.opts.get(, ) ret[] = salt.utils.hashutils.get_hash(path, form=hash_type) ret[] = hash_type return ret load = {: path, : saltenv, : } return self.channel.send(load)
Common code for hashing and stating files
def _lt_from_ge(self, other): op_result = self.__ge__(other) if op_result is NotImplemented: return NotImplemented return not op_result
Return a < b. Computed by @total_ordering from (not a >= b).
def _decompose_(self, qubits): qubits = list(qubits) while len(qubits) > 0: q_head = qubits.pop(0) yield cirq.H(q_head) for i, qubit in enumerate(qubits): yield (cirq.CZ**(-1/2.0**(i+1)))(qubit, q_head)
A quantum circuit (QFT_inv) with the following structure. ---H--@-------@--------@---------------------------------------------- | | | ------@^-0.5--+--------+---------H--@-------@------------------------- | | | | --------------@^-0.25--+------------@^-0.5--+---------H--@------------ | | | -----------------------@^-0.125-------------@^-0.25------@^-0.5---H--- The number of qubits can be arbitrary.
def _parse_substitutions(self, element): subs = element.findall() for sub in subs: self.agentml.set_substitution(attribute(sub, ), sub.text)
Parse word substitutions :param element: The XML Element object :type element: etree._Element
def should_build_with_cython(previous_cython_version, is_release): have_cython = False try: from Cython import __version__ as cython_version have_cython = True except ImportError: pass if have_cython and (not is_release or previous_cython_version == ): return cython_version else: return False
Returns the previously used Cython version (or 'unknown' if not previously built) if Cython should be used to build extension modules from pyx files.
def readsGenerator(self, request): if not request.reference_id: raise exceptions.UnmappedReadsNotSupported() if len(request.read_group_ids) < 1: raise exceptions.BadRequestException( "At least one readGroupId must be specified") elif len(request.read_group_ids) == 1: return self._readsGeneratorSingle(request) else: return self._readsGeneratorMultiple(request)
Returns a generator over the (read, nextPageToken) pairs defined by the specified request
def tr(self, args, color=None): width = self._term_size()[1] if not args: if color is not None: print(self._echo(" else: print(self._echo(" else: for each_symbol in args: chars = len(each_symbol) number_chars = width // chars if color is not None: print(self._echo(each_symbol * number_chars, color)) else: print(each_symbol * number_chars)
Method to print ASCII patterns to terminal
def query_string_attribute(self, target, display_mask, attr): reply = NVCtrlQueryStringAttributeReplyRequest(display=self.display, opcode=self.display.get_extension_major(extname), target_id=target.id(), target_type=target.type(), display_mask=display_mask, attr=attr) if not reply._data.get(): return None return str(reply._data.get()).strip()
Return the value of a string attribute
def create_collection(self, name="collection", position=None, **kwargs): if name in self.item_names: wt_exceptions.ObjectExistsWarning.warn(name) return self[name] collection = Collection( filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs ) if position is not None: self.attrs["item_names"] = np.insert( self.attrs["item_names"][:-1], position, collection.natural_name.encode() ) setattr(self, name, collection) return collection
Create a new child colleciton. Parameters ---------- name : string Unique identifier. position : integer (optional) Location to insert. Default is None (append). kwargs Additional arguments to child collection instantiation. Returns ------- WrightTools Collection New child.
def connect(self, packet=None): if isinstance(packet, RTMPPacket): packet = packet.packet else: packet = ffi.NULL res = librtmp.RTMP_Connect(self.rtmp, packet) if res < 1: raise RTMPError("Failed to connect") return RTMPCall(self, 1.0)
Connect to the server. :param packet: RTMPPacket, this packet will be sent instead of the regular "connect" packet. Raises :exc:`RTMPError` if the connect attempt fails.
def _handle_tag_text(self, text): next = self._read(1) if not self._can_recurse() or text not in self.MARKERS: self._emit_text(text) elif text == next == "{": self._parse_template_or_argument() elif text == next == "[": self._parse_wikilink() elif text == "<": self._parse_tag() else: self._emit_text(text)
Handle regular *text* inside of an HTML open tag.
def _gen_3spec(op, path, xattr=False): flags = 0 if xattr: flags |= _P.SDSPEC_F_XATTR return Spec(op, path, flags)
Returns a Spec tuple suitable for passing to the underlying C extension. This variant is called for operations that lack an input value. :param str path: The path to fetch :param bool xattr: Whether this is an extended attribute :return: a spec suitable for passing to the underlying C extension
def _get(pseudodict, key, single=True): matches = [item[1] for item in pseudodict if item[0] == key] if single: return matches[0] else: return matches
Helper method for getting values from "multi-dict"s
def tobytes(s, encoding=): if PY3K: if isinstance(s, bytes): return s else: return s.encode(encoding) else: if isinstance(s, unicode): return s.encode(encoding) else: return s
Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory.
def setup_deploy_key(keypath=, key_ext=, env_name=): key = os.environ.get(env_name, os.environ.get("DOCTR_DEPLOY_ENCRYPTION_KEY", None)) if not key: raise RuntimeError("{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from properly. You may need to re-run to fix this error." .format(env_name=env_name)) if (not os.path.isfile(keypath + key_ext) and os.path.isfile( + key_ext)): keypath = key_filename = os.path.basename(keypath) key = key.encode() decrypt_file(keypath + key_ext, key) key_path = os.path.expanduser("~/.ssh/" + key_filename) os.makedirs(os.path.expanduser("~/.ssh"), exist_ok=True) os.rename(keypath, key_path) with open(os.path.expanduser("~/.ssh/config"), ) as f: f.write("Host github.com" " LogLevel ERROR\n" % key_path) agent_info = subprocess.check_output([, ]) agent_info = agent_info.decode() agent_info = agent_info.split() AUTH_SOCK = agent_info[0].split()[1][:-1] AGENT_PID = agent_info[3].split()[1][:-1] os.putenv(, AUTH_SOCK) os.putenv(, AGENT_PID) run([, os.path.expanduser( + key_filename)])
Decrypts the deploy key and configures it with ssh The key is assumed to be encrypted as keypath + key_ext, and the encryption key is assumed to be set in the environment variable ``env_name``. If ``env_name`` is not set, it falls back to ``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility. If keypath + key_ext does not exist, it falls back to ``github_deploy_key.enc`` for backwards compatibility.
def download_release(download_file, release=None): if release is None: release = get_latest_release() url = % release misc.http_download(url, download_file)
Downloads the "go-basic.obo" file for the specified release.
def plot_ppc( data, kind="density", alpha=None, mean=True, figsize=None, textsize=None, data_pairs=None, var_names=None, coords=None, flatten=None, flatten_pp=None, num_pp_samples=None, random_seed=None, jitter=None, animated=False, animation_kwargs=None, legend=True, ): for group in ("posterior_predictive", "observed_data"): if not hasattr(data, group): raise TypeError( .format(group=group) ) if kind.lower() not in ("density", "cumulative", "scatter"): raise TypeError("`kind` argument must be either `density`, `cumulative`, or `scatter`") if data_pairs is None: data_pairs = {} if animation_kwargs is None: animation_kwargs = {} if platform.system() == "Linux": animation_kwargs.setdefault("blit", True) else: animation_kwargs.setdefault("blit", False) if animated and animation_kwargs["blit"] and platform.system() != "Linux": _log.warning( "If you experience problems rendering the animation try setting" "`animation_kwargs({:False}) or changing the plotting backend (e.g. to TkAgg)" ) if alpha is None: if animated: alpha = 1 else: if kind.lower() == "scatter": alpha = 0.7 else: alpha = 0.2 if jitter is None: jitter = 0.0 assert jitter >= 0.0 observed = data.observed_data posterior_predictive = data.posterior_predictive if var_names is None: var_names = observed.data_vars var_names = _var_names(var_names, observed) pp_var_names = [data_pairs.get(var, var) for var in var_names] if flatten_pp is None and flatten is None: flatten_pp = list(posterior_predictive.dims.keys()) elif flatten_pp is None: flatten_pp = flatten if flatten is None: flatten = list(observed.dims.keys()) if coords is None: coords = {} if random_seed is not None: np.random.seed(random_seed) total_pp_samples = posterior_predictive.sizes["chain"] * posterior_predictive.sizes["draw"] if num_pp_samples is None: if kind == "scatter" and not animated: num_pp_samples = min(5, total_pp_samples) else: num_pp_samples = total_pp_samples if ( not isinstance(num_pp_samples, Integral) or num_pp_samples < 1 or num_pp_samples > total_pp_samples ): raise TypeError( "`num_pp_samples` must be an integer between 1 and " + "{limit}.".format(limit=total_pp_samples) ) pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False) for key in coords.keys(): coords[key] = np.where(np.in1d(observed[key], coords[key]))[0] obs_plotters = list( xarray_var_iter( observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True ) ) pp_plotters = list( xarray_var_iter( posterior_predictive.isel(coords), var_names=pp_var_names, skip_dims=set(flatten_pp), combined=True, ) ) length_plotters = len(obs_plotters) rows, cols = default_grid(length_plotters) (figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size( figsize, textsize, rows, cols ) fig, axes = _create_axes_grid(length_plotters, rows, cols, figsize=figsize) for i, ax in enumerate(axes): var_name, selection, obs_vals = obs_plotters[i] pp_var_name, _, pp_vals = pp_plotters[i] dtype = posterior_predictive[pp_var_name].dtype.kind obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) pp_sampled_vals = pp_vals[pp_sample_ix] if kind == "density": plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth} if dtype == "i": plot_kwargs["drawstyle"] = "steps-pre" ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name)) if dtype == "f": plot_kde( obs_vals, label="Observed {}".format(var_name), plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3}, fill_kwargs={"alpha": 0}, ax=ax, legend=legend, ) else: nbins = round(len(obs_vals) ** 0.5) hist, bin_edges = np.histogram(obs_vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) ax.plot( bin_edges, hist, label="Observed {}".format(var_name), color="k", linewidth=linewidth, zorder=3, drawstyle=plot_kwargs["drawstyle"], ) if animated: animate, init = _set_animation( pp_sampled_vals, ax, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs ) else: pp_densities = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() if dtype == "f": pp_density, lower, upper = _fast_kde(vals) pp_x = np.linspace(lower, upper, len(pp_density)) pp_densities.extend([pp_x, pp_density]) else: nbins = round(len(vals) ** 0.5) hist, bin_edges = np.histogram(vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) pp_densities.extend([bin_edges, hist]) ax.plot(*pp_densities, **plot_kwargs) if mean: if dtype == "f": plot_kde( pp_vals.flatten(), plot_kwargs={ "color": "C0", "linestyle": "--", "linewidth": linewidth, "zorder": 2, }, label="Posterior predictive mean {}".format(pp_var_name), ax=ax, legend=legend, ) else: vals = pp_vals.flatten() nbins = round(len(vals) ** 0.5) hist, bin_edges = np.histogram(vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) ax.plot( bin_edges, hist, color="C0", linewidth=linewidth, label="Posterior predictive mean {}".format(pp_var_name), zorder=2, linestyle="--", drawstyle=plot_kwargs["drawstyle"], ) ax.tick_params(labelsize=xt_labelsize) ax.set_yticks([]) elif kind == "cumulative": drawstyle = "default" if dtype == "f" else "steps-pre" ax.plot( *_empirical_cdf(obs_vals), color="k", linewidth=linewidth, label="Observed {}".format(var_name), drawstyle=drawstyle, zorder=3 ) if animated: animate, init = _set_animation( pp_sampled_vals, ax, kind=kind, alpha=alpha, drawstyle=drawstyle, linewidth=linewidth, ) else: pp_densities = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() pp_x, pp_density = _empirical_cdf(vals) pp_densities.extend([pp_x, pp_density]) ax.plot( *pp_densities, alpha=alpha, color="C5", drawstyle=drawstyle, linewidth=linewidth ) ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name)) if mean: ax.plot( *_empirical_cdf(pp_vals.flatten()), color="C0", linestyle="--", linewidth=linewidth, drawstyle=drawstyle, label="Posterior predictive mean {}".format(pp_var_name) ) ax.set_yticks([0, 0.5, 1]) elif kind == "scatter": if mean: if dtype == "f": plot_kde( pp_vals.flatten(), plot_kwargs={ "color": "C0", "linestyle": "--", "linewidth": linewidth, "zorder": 3, }, label="Posterior predictive mean {}".format(pp_var_name), ax=ax, legend=legend, ) else: vals = pp_vals.flatten() nbins = round(len(vals) ** 0.5) hist, bin_edges = np.histogram(vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) ax.plot( bin_edges, hist, color="C0", linewidth=linewidth, label="Posterior predictive mean {}".format(pp_var_name), zorder=3, linestyle="--", drawstyle="steps-pre", ) _, limit = ax.get_ylim() limit *= 1.05 y_rows = np.linspace(0, limit, num_pp_samples + 1) jitter_scale = y_rows[1] - y_rows[0] scale_low = 0 scale_high = jitter_scale * jitter obs_yvals = np.zeros_like(obs_vals, dtype=np.float64) if jitter: obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals)) ax.plot( obs_vals, obs_yvals, "o", color="C0", markersize=markersize, alpha=alpha, label="Observed {}".format(var_name), zorder=4, ) if animated: animate, init = _set_animation( pp_sampled_vals, ax, kind=kind, height=y_rows.mean() * 0.5, markersize=markersize, ) else: for vals, y in zip(pp_sampled_vals, y_rows[1:]): vals = np.ravel(vals) yvals = np.full_like(vals, y, dtype=np.float64) if jitter: yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals)) ax.plot( vals, yvals, "o", zorder=2, color="C5", markersize=markersize, alpha=alpha ) ax.plot([], "C5o", label="Posterior predictive {}".format(pp_var_name)) ax.set_yticks([]) if var_name != pp_var_name: xlabel = "{} / {}".format(var_name, pp_var_name) else: xlabel = var_name ax.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize) if legend: if i == 0: ax.legend(fontsize=xt_labelsize * 0.75) else: ax.legend([]) if animated: ani = animation.FuncAnimation( fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs ) return axes, ani else: return axes
Plot for posterior predictive checks. Parameters ---------- data : az.InferenceData object InferenceData object containing the observed and posterior predictive data. kind : str Type of plot to display (density, cumulative, or scatter). Defaults to density. alpha : float Opacity of posterior predictive density curves. Defaults to 0.2 for kind = density and cumulative, for scatter defaults to 0.7 mean : bool Whether or not to plot the mean posterior predictive distribution. Defaults to True figsize : tuple Figure size. If None it will be defined automatically. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. data_pairs : dict Dictionary containing relations between observed data and posterior predictive data. Dictionary structure: Key = data var_name Value = posterior predictive var_name For example, `data_pairs = {'y' : 'y_hat'}` If None, it will assume that the observed data and the posterior predictive data have the same variable name. var_names : list List of variables to be plotted. Defaults to all observed variables in the model if None. coords : dict Dictionary mapping dimensions to selected coordinates to be plotted. Dimensions without a mapping specified will include all coordinates for that dimension. Defaults to including all coordinates for all dimensions if None. flatten : list List of dimensions to flatten in observed_data. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. flatten_pp : list List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`. num_pp_samples : int The number of posterior predictive samples to plot. For `kind` = 'scatter' and `animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7 unless defined otherwise. Otherwise it defaults to all provided samples. random_seed : int Random number generator seed passed to numpy.random.seed to allow reproducibility of the plot. By default, no seed will be provided and the plot will change each call if a random sample is specified by `num_pp_samples`. jitter : float If kind is "scatter", jitter will add random uniform noise to the height of the ppc samples and observed data. By default 0. animated : bool Create an animation of one posterior predictive sample per frame. Defaults to False. animation_kwargs : dict Keywords passed to `animation.FuncAnimation`. legend : bool Add legend to figure. By default True. Returns ------- axes : matplotlib axes Examples -------- Plot the observed data KDE overlaid on posterior predictive KDEs. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('radon') >>> az.plot_ppc(data) Plot the overlay with empirical CDFs. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='cumulative') Use the coords and flatten parameters to plot selected variable dimensions across multiple plots. .. plot:: :context: close-figs >>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[]) Plot the overlay using a stacked scatter plot that is particularly useful when the sample sizes are small. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='scatter', flatten=[], >>> coords={'observed_county': ['AITKIN', 'BELTRAMI']}) Plot random posterior predictive sub-samples. .. plot:: :context: close-figs >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
def greater_equal(lhs, rhs): return _ufunc_helper( lhs, rhs, op.broadcast_greater_equal, lambda x, y: 1 if x >= y else 0, _internal._greater_equal_scalar, _internal._lesser_equal_scalar)
Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs, otherwise return 0(false). Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray Output array of boolean values. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> (x >= 1).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (x >= y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.greater_equal(x, y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (z >= y).asnumpy() array([[ 1., 1.], [ 0., 1.]], dtype=float32)
def get_peak_number(self, sample): proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE) out, err = proc.communicate() sample["peakNumber"] = re.sub("\D.*", "", out) return sample
Counts number of peaks from a sample's peak file. :param pipelines.Sample sample: Sample object with "peaks" attribute.
def DEFINE_choice(self, name, default, choices, help, constant=False): self.AddOption( type_info.Choice( name=name, default=default, choices=choices, description=help), constant=constant)
A helper for defining choice string options.
def make_matrix(version, reserve_regions=True, add_timing=True): size = calc_matrix_size(version) row = [0x2] * size matrix = tuple([bytearray(row) for i in range(size)]) if reserve_regions: if version > 6: for i in range(6): matrix[i][-11] = 0x0 matrix[i][-10] = 0x0 matrix[i][-9] = 0x0 matrix[-11][i] = 0x0 matrix[-10][i] = 0x0 matrix[-9][i] = 0x0 for i in range(9): matrix[i][8] = 0x0 matrix[8][i] = 0x0 if version > 0: matrix[-i][8] = 0x0 matrix[8][- i] = 0x0 if add_timing: add_timing_pattern(matrix, version < 1) return matrix
\ Creates a matrix of the provided `size` (w x h) initialized with the (illegal) value 0x2. The "timing pattern" is already added to the matrix and the version and format areas are initialized with 0x0. :param int version: The (Micro) QR Code version :rtype: tuple of bytearrays
def _translate_dst_oprnd(self, operand): if isinstance(operand, ReilRegisterOperand): return self._translate_dst_register_oprnd(operand) else: raise Exception("Invalid operand type")
Translate destination operand to a SMT expression.
def separate_groups(groups, key, total): optimum, extra = compute_optimum(len(groups), total) over_loaded, under_loaded, optimal = _smart_separate_groups(groups, key, total) if not extra: return over_loaded, under_loaded potential_under_loaded = [ group for group in optimal if key(group) == optimum ] potential_over_loaded = [ group for group in optimal if key(group) > optimum ] revised_under_loaded = under_loaded + potential_under_loaded revised_over_loaded = over_loaded + potential_over_loaded return ( sorted(revised_over_loaded, key=key, reverse=True), sorted(revised_under_loaded, key=key), )
Separate the group into overloaded and under-loaded groups. The revised over-loaded groups increases the choice space for future selection of most suitable group based on search criteria. For example: Given the groups (a:4, b:4, c:3, d:2) where the number represents the number of elements for each group. smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded and 'd' as under-loaded. separate-groups combines 'a' with 'b' as over-loaded, allowing to select between these two groups to transfer the element to 'd'. :param groups: list of groups :param key: function to retrieve element count from group :param total: total number of elements to distribute :returns: sorted lists of over loaded (descending) and under loaded (ascending) group
def split_by_commas(maybe_s: str) -> Tuple[str, ...]: if not maybe_s: return () parts: List[str] = [] split_by_backslash = maybe_s.split(r) for split_by_backslash_part in split_by_backslash: splitby_comma = split_by_backslash_part.split() if parts: parts[-1] += + splitby_comma[0] else: parts.append(splitby_comma[0]) parts.extend(splitby_comma[1:]) return tuple(parts)
Split a string by commas, but allow escaped commas. - If maybe_s is falsey, returns an empty tuple - Ignore backslashed commas
def combine_first(self, other): import pandas.core.computation.expressions as expressions def extract_values(arr): if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view() return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0
def is_valid(self): for i in range(self.raster.RasterCount): try: checksum = self.raster.GetRasterBand(i + 1).Checksum() except RuntimeError: return False return True
Check image integrity. Tries to compute the checksum for each raster layer and returns False if this fails. See this forum entry: `How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_. Returns ------- bool is the file valid?
def topic_detail(request, slug): extra_context = { : Question.objects.published().filter(topic__slug=slug), } return object_detail(request, queryset=Topic.objects.published(), extra_context=extra_context, template_object_name=, slug=slug)
A detail view of a Topic Templates: :template:`faq/topic_detail.html` Context: topic An :model:`faq.Topic` object. question_list A list of all published :model:`faq.Question` objects that relate to the given :model:`faq.Topic`.
def update_selection(self): self.clear_boxes() self.clear_high_level_pars() if self.UPPER_LEVEL_SHOW != "specimens": self.mean_type_box.SetValue("None") coordinate_system = self.coordinates_box.GetValue() if coordinate_system == and \ len(self.Data[self.s][]) == 0: self.coordinates_box.SetStringSelection() elif coordinate_system == and \ len(self.Data[self.s][]) == 0: self.coordinates_box.SetStringSelection("specimen") if coordinate_system != self.coordinates_box.GetValue() and self.ie_open: self.ie.coordinates_box.SetStringSelection( self.coordinates_box.GetValue()) self.ie.update_editor() coordinate_system = self.coordinates_box.GetValue() self.COORDINATE_SYSTEM = coordinate_system self.update_bounds_boxes() high_level = self.level_box.GetValue() old_string = self.level_names.GetValue() new_string = old_string if high_level == : if self.s in self.Data_hierarchy[]: new_string = self.Data_hierarchy[][self.s] else: new_string = if high_level == : if self.s in self.Data_hierarchy[]: new_string = self.Data_hierarchy[][self.s] else: new_string = if high_level == : if self.s in self.Data_hierarchy[]: new_string = self.Data_hierarchy[][self.s] else: new_string = self.level_names.SetValue(new_string) if self.ie_open and new_string != old_string: self.ie.level_names.SetValue(new_string) self.ie.on_select_level_name(-1, True) self.update_PCA_box() self.generate_warning_text() self.update_warning_box() self.update_fit_boxes() self.update_mean_fit_box() self.Add_text() if self.current_fit: self.draw_figure(self.s, False) else: self.draw_figure(self.s, True) self.update_high_level_stats() self.update_GUI_with_new_interpretation()
Convenience function update display (figures, text boxes and statistics windows) with a new selection of specimen
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec: if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER: instance = self._backend.find(instance_id) return self._backend.create(instance, service_details.parameters, existing=True) raise ErrPlanUnsupported(service_details.plan_id)
Provision the new instance see openbrokerapi documentation Returns: ProvisionedServiceSpec
def dot_solve(self, y): r return np.dot(y.T, cho_solve(self._factor, y))
r""" Compute the inner product of a vector with the inverse of the covariance matrix applied to itself: .. math:: y\,K^{-1}\,y Args: y (ndarray[nsamples]): The vector :math:`y`.
def _extract_services_list_helper(services): if services is None: return {} if isinstance(services, dict): services = services.values() _s = OrderedDict() for s in services: if isinstance(s, dict) and in s: _s[s[]] = s.get(, []) if isinstance(s, str): _s[s] = [] return _s
Extract a OrderedDict of {service: [ports]} of the supplied services for use by the other functions. The services object can either be: - None : no services were passed (an empty dict is returned) - a list of strings - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - An array of [{'service': service_name, ...}, ...] @param services: see above @returns OrderedDict(service: [ports], ...)
def monitors(self, **kwargs): monitors = super(Classifier, self).monitors(**kwargs) regs = regularizers.from_kwargs(self, **kwargs) outputs, _ = self.build_graph(regs) return monitors + [(, self.losses[0].accuracy(outputs))]
Return expressions that should be computed to monitor training. Returns ------- monitors : list of (name, expression) pairs A list of named monitor expressions to compute for this network.
def blink(self, state=True): if self._blinking == state: return True elif not self.graphicsEffect(): return False else: self._blinking = state if state: self.startTimer(self.blinkInterval())
Starts or stops the blinking state for this button. This only works for when the toolbutton is in Shadowed or Colored mode. :param state | <bool> :return <bool> | success
def forward(self, data_batch, is_train=None): self._scores = data_batch.data[0] if is_train is None: is_train = self.for_training if is_train: self._labels = data_batch.label[0]
Forward computation. Here we do nothing but to keep a reference to the scores and the labels so that we can do backward computation. Parameters ---------- data_batch : DataBatch Could be anything with similar API implemented. is_train : bool Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
def get_grouped(self, go_ntsets, go_all, gosubdag, **kws): kws_grpd = {k:v for k, v in kws.items() if k in Grouped.kws_dict} kws_grpd[] = self._init_go2ntpresent(go_ntsets, go_all, gosubdag) return Grouped(gosubdag, self.godag.version, **kws_grpd)
Get Grouped object.
def _delete(self, url): req = self._session.delete(self._api_prefix + url) return self._action(req)
Wrapper around request.delete() to use the API prefix. Returns a JSON response.
def rank_width(self): rank_width = defaultdict(int) node_rank = self.node_rank() for rank in node_rank.values(): rank_width[rank] += 1 return dict(rank_width)
Returns the width of each rank in the graph. #TODO
def _build_connstr(host, port, bucket): hostlist = [] if isinstance(host, (tuple, list)): for curhost in host: if isinstance(curhost, (list, tuple)): hostlist.append(_fmthost(*curhost)) else: hostlist.append(curhost) else: hostlist.append(_fmthost(host, port)) return .format(.join(hostlist), bucket)
Converts a 1.x host:port specification to a connection string
def get_manylinux_wheel_url(self, package_name, package_version): cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), ) if not os.path.isdir(cached_pypi_info_dir): os.makedirs(cached_pypi_info_dir) return None
For a given package name, returns a link to the download URL, else returns None. Related: https://github.com/Miserlou/Zappa/issues/398 Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae This function downloads metadata JSON of `package_name` from Pypi and examines if the package has a manylinux wheel. This function also caches the JSON file so that we don't have to poll Pypi every time.
def set_version(context: Context, version=None, bump=False): if bump and version: raise TaskError() if bump: from mtp_common import VERSION version = list(VERSION) version[-1] += 1 else: try: version = list(map(int, version.split())) assert len(version) == 3 except (AttributeError, ValueError, AssertionError): raise TaskError() dotted_version = .join(map(str, version)) replacements = [ (r, % .join(map(str, version)), ), (r, % dotted_version, ), ] for search, replacement, path in replacements: with open(os.path.join(root_path, path)) as f: content = f.read() content = re.sub(search, replacement, content, flags=re.MULTILINE) with open(os.path.join(root_path, path), ) as f: f.write(content) context.debug( % dotted_version)
Updates the version of MTP-common
def _convert_date_to_dict(field_date): return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
Convert native python ``datetime.date`` object to a format supported by the API
def union(self, other, rename=False): t clash (i.e., youve explicitly chosen names to avoid clashes). If rename is True, nodes/links are relabeled such that the new "prefix" for each node is the graph name (i.e., for graph name A, node h1 is renamed A_h1). This method returns a new Topology object and does not modify either topology used for unioning. ' if rename: self.nxgraph = Topology.__relabel_graph(self.__nxgraph, self.name) other.nxgraph = Topology.__relabel_graph(other.__nxgraph, other.name) nxgraph = nx.union(self.nxgraph, other.nxgraph, name="{}_{}".format(self.name, other.name)) newtopo = Topology(nxgraph=nxgraph, name="{}_{}".format(self.name, other.name)) return newtopo
Union/add two topologies together to form a larger topology. If rename is False, the method assumes that node names don't clash (i.e., you've called addNodeLabelPrefix or you've explicitly chosen names to avoid clashes). If rename is True, nodes/links are relabeled such that the new "prefix" for each node is the graph name (i.e., for graph name A, node h1 is renamed A_h1). This method returns a new Topology object and does not modify either topology used for unioning.
def check_repository_existence(params): repodir = os.path.join(params.outdir, params.name) if os.path.isdir(repodir): raise Conflict( .format(repodir))
Check repository existence. :param argparse.Namespace params: parameters
def egress(self, envelope, http_headers, operation, binding_options): custom_headers = self._header_handler.GetHTTPHeaders() http_headers.update(custom_headers) return envelope, http_headers
Overriding the egress function to set our headers. Args: envelope: An Element with the SOAP request data. http_headers: A dict of the current http headers. operation: The SoapOperation instance. binding_options: An options dict for the SOAP binding. Returns: A tuple of the envelope and headers.
def validate_url(value): if not re.match(VIMEO_URL_RE, value) and not re.match(YOUTUBE_URL_RE, value): raise ValidationError()
Validate url.
def does_not_contain_duplicates(self): try: if len(self.val) == len(set(self.val)): return self except TypeError: raise TypeError() self._err( % self.val)
Asserts that val is iterable and does not contain any duplicate items.
def flagants(self, threshold=50): badsols = n.where( (n.median(self.amp)/self.amp > threshold) & (self.flagged == False))[0] if len(badsols): self.logger.info( % (str(badsols), self.mjd[badsols], self.antname[badsols], self.ifid[badsols])) for sol in badsols: self.flagged[sol] = True
Flags solutions with amplitude more than threshold larger than median.
def set_log_type_name(self, logType, name): assert logType in self.__logTypeStdoutFlags.keys(), "logType not defined" %logType assert isinstance(name, basestring), "name must be a string" name = str(name) self.__logTypeNames[logType] = name
Set a logtype name. :Parameters: #. logType (string): A defined logging type. #. name (string): The logtype new name.
def write(self, symbol, item, metadata=None, chunker=DateChunker(), audit=None, **kwargs): if not isinstance(item, (DataFrame, Series)): raise Exception("Can only chunk DataFrames and Series") self._arctic_lib.check_quota() previous_shas = [] doc = {} meta = {} doc[SYMBOL] = symbol doc[LEN] = len(item) doc[SERIALIZER] = self.serializer.TYPE doc[CHUNKER] = chunker.TYPE doc[USERMETA] = metadata sym = self._get_symbol_info(symbol) if sym: previous_shas = set([Binary(x[SHA]) for x in self._collection.find({SYMBOL: symbol}, projection={SHA: True, : False}, )]) ops = [] meta_ops = [] chunk_count = 0 for start, end, chunk_size, record in chunker.to_chunks(item, **kwargs): chunk_count += 1 data = self.serializer.serialize(record) doc[CHUNK_SIZE] = chunk_size doc[METADATA] = {: data[METADATA][COLUMNS] if COLUMNS in data[METADATA] else } meta = data[METADATA] for i in xrange(int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)): chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])} chunk[SEGMENT] = i chunk[START] = meta[START] = start chunk[END] = meta[END] = end chunk[SYMBOL] = meta[SYMBOL] = symbol dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode()] chunk[SHA] = self._checksum(dates, chunk[DATA]) meta_ops.append(pymongo.ReplaceOne({SYMBOL: symbol, START: start, END: end}, meta, upsert=True)) if chunk[SHA] not in previous_shas: ops.append(pymongo.UpdateOne({SYMBOL: symbol, START: start, END: end, SEGMENT: chunk[SEGMENT]}, {: chunk}, upsert=True)) else: previous_shas.remove(chunk[SHA]) if ops: self._collection.bulk_write(ops, ordered=False) if meta_ops: self._mdata.bulk_write(meta_ops, ordered=False) doc[CHUNK_COUNT] = chunk_count doc[APPEND_COUNT] = 0 if previous_shas: mongo_retry(self._collection.delete_many)({SYMBOL: symbol, SHA: {: list(previous_shas)}}) mongo_retry(self._symbols.update_one)({SYMBOL: symbol}, {: doc}, upsert=True) if audit is not None: audit[] = symbol audit[] = audit[] = chunk_count self._audit.insert_one(audit)
Writes data from item to symbol in the database Parameters ---------- symbol: str the symbol that will be used to reference the written data item: Dataframe or Series the data to write the database metadata: ? optional per symbol metadata chunker: Object of type Chunker A chunker that chunks the data in item audit: dict audit information kwargs: optional keyword args that are passed to the chunker. Includes: chunk_size: used by chunker to break data into discrete chunks. see specific chunkers for more information about this param. func: function function to apply to each chunk before writing. Function can not modify the date column.
def clean(self): cleaned_data = super(EnterpriseCustomerAdminForm, self).clean() if in cleaned_data and not cleaned_data[]: cleaned_data[] = None return cleaned_data
Clean form fields prior to database entry. In this case, the major cleaning operation is substituting a None value for a blank value in the Catalog field.
def flatten_check(out:Tensor, targ:Tensor) -> Tensor: "Check that `out` and `targ` have the same number of elements and flatten them." out,targ = out.contiguous().view(-1),targ.contiguous().view(-1) assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}." return out,targ
Check that `out` and `targ` have the same number of elements and flatten them.
def complete(self): self._set_state(self.COMPLETED) return self.task_spec._on_complete(self)
Called by the associated task to let us know that its state has changed (e.g. from FUTURE to COMPLETED.)
def split_on_condition(seq, condition): l1, l2 = tee((condition(item), item) for item in seq) return (i for p, i in l1 if p), (i for p, i in l2 if not p)
Split a sequence into two iterables without looping twice
def parameter_values(self): for param in self.data.get(, []): cache = ElementCache(data=self.make_request(href=param)) name = .format(cache.type.title()).replace(, ) yield type(name, (SituationParameterValue,), { : cache})(name=cache.name, type=cache.type, href=param)
Parameter values for this inspection situation. This correlate to the the situation_context. :rtype: list(SituationParameterValue)
def build_standard_field(self, field_name, model_field): field_mapping = ClassLookupDict(self.serializer_field_mapping) field_class = field_mapping[model_field] field_kwargs = get_field_kwargs(field_name, model_field) if in field_kwargs: field_class = self.serializer_choice_field valid_kwargs = set(( , , , , , , , , , , , , , )) for key in list(field_kwargs.keys()): if key not in valid_kwargs: field_kwargs.pop(key) if not issubclass(field_class, ModelField): field_kwargs.pop(, None) if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField): field_kwargs.pop(, None) if postgres_fields and isinstance(model_field, postgres_fields.ArrayField): child_model_field = model_field.base_field child_field_class, child_field_kwargs = self.build_standard_field( , child_model_field ) field_kwargs[] = child_field_class(**child_field_kwargs) return field_class, field_kwargs
Create regular model fields.
def pop_header(self, hkey, ignore_error=False): if type(hkey) is not str: try: return self.headers.pop(self.hkeys.pop(hkey)) except: if not ignore_error: print("ERROR: pop_header() could not find hkey "+str(hkey)) return None else: try: hkey = self.hkeys.index(hkey) return self.headers.pop(self.hkeys.pop(hkey)) except: if not ignore_error: print("ERROR: pop_header() could not find hkey "+str(hkey)) return
This will remove and return the specified header value. Parameters ---------- hkey Header key you wish to pop. You can specify either a key string or an index. ignore_error=False Whether to quietly ignore any errors (i.e., hkey not found).
def messages(self): return int(math.floor(((self.limit.unit_value - self.level) / self.limit.unit_value) * self.limit.value))
Return remaining messages before limiting.
def _check_buffer(self, data, ctype): assert ctype in _ffi_types.values() if not isinstance(data, bytes): data = _ffi.from_buffer(data) frames, remainder = divmod(len(data), self.channels * _ffi.sizeof(ctype)) if remainder: raise ValueError("Data size must be a multiple of frame size") return data, frames
Convert buffer to cdata and check for valid size.
def realms(self, details=False): def get_realm_info(realm, realms, satellites, details=False): res = { "name": realm.get_name(), "level": realm.level, "hosts": realm.members, "hostgroups": realm.group_members, "children": {}, "satellites": { } } for child in realm.realm_members: child = realms.find_by_name(child) if not child: continue realm_infos = get_realm_info(child, realms, satellites, details=details) res[][child.get_name()] = realm_infos for sat_type in [, , , , ]: res["satellites"][sat_type + ] = [] sats = realm.get_potential_satellites_by_type(satellites, sat_type) for sat in sats: if details: res["satellites"][sat_type + ][sat.name] = sat.give_satellite_json() else: res["satellites"][sat_type + ].append(sat.name) return res if details is not False: details = bool(details) if not getattr(self.app, , None) or not getattr(self.app, , None): return {: u, : "Not yet available. Please come back later."} res = {} higher_realms = [realm for realm in self.app.conf.realms if realm.level == 0] for realm in higher_realms: res[realm.get_name()] = get_realm_info(realm, self.app.conf.realms, self.app.dispatcher.all_daemons_links) return res
Return the realms / satellites configuration Returns an object containing the hierarchical realms configuration with the main information about each realm: { All: { satellites: { pollers: [ "poller-master" ], reactionners: [ "reactionner-master" ], schedulers: [ "scheduler-master", "scheduler-master-3", "scheduler-master-2" ], brokers: [ "broker-master" ], receivers: [ "receiver-master", "receiver-nsca" ] }, children: { }, name: "All", members: [ "host_1", "host_0", "host_3", "host_2", "host_11", "localhost" ], level: 0 }, North: { ... } } Sub realms defined inside a realm are provided in the `children` property of their parent realm and they contain the same information as their parent.. The `members` realm contain the list of the hosts members of the realm. If ``details`` is required, each realm will contain more information about each satellite involved in the realm management: { All: { satellites: { pollers: [ { passive: false, name: "poller-master", livestate_output: "poller/poller-master is up and running.", reachable: true, uri: "http://127.0.0.1:7771/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.593074, type: "poller" } ], reactionners: [ { passive: false, name: "reactionner-master", livestate_output: "reactionner/reactionner-master is up and running.", reachable: true, uri: "http://127.0.0.1:7769/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.587762, type: "reactionner" } ] :return: dict containing realms / satellites :rtype: dict
def mavlink_packet(self, msg): type = msg.get_type() master = self.master if type in [ ]: ilock = self.get_rc_input(msg, self.interlock_channel) if ilock <= 0: self.console.set_status(, , fg=, row=4) elif ilock >= 1800: self.console.set_status(, , fg=, row=4) else: self.console.set_status(, , fg=, row=4) override = self.get_rc_input(msg, self.override_channel) if override <= 0: self.console.set_status(, , fg=, row=4) elif override >= 1800: self.console.set_status(, , fg=, row=4) else: self.console.set_status(, , fg=, row=4) zeroi = self.get_rc_input(msg, self.zero_I_channel) if zeroi <= 0: self.console.set_status(, , fg=, row=4) elif zeroi >= 1800: self.console.set_status(, , fg=, row=4) else: self.console.set_status(, , fg=, row=4) novtol = self.get_rc_input(msg, self.no_vtol_channel) if novtol <= 0: self.console.set_status(, , fg=, row=4) elif novtol >= 1800: self.console.set_status(, , fg=, row=4) else: self.console.set_status(, , fg=, row=4) if type in [ ]: rsc = self.get_pwm_output(msg, self.rsc_out_channel) if rsc <= 0: self.console.set_status(, , fg=, row=4) elif rsc <= 1200: self.console.set_status(, % rsc, fg=, row=4) elif rsc <= 1600: self.console.set_status(, % rsc, fg=, row=4) else: self.console.set_status(, % rsc, fg=, row=4) thr = self.get_pwm_output(msg, self.fwd_thr_channel) if thr <= 0: self.console.set_status(, , fg=, row=4) elif thr <= 1100: self.console.set_status(, % thr, fg=, row=4) elif thr <= 1500: self.console.set_status(, % thr, fg=, row=4) else: self.console.set_status(, % thr, fg=, row=4) if type in [ ]: rpm = msg.rpm1 if rpm < 1000: rpm_colour = elif rpm < 2000: rpm_colour = else: rpm_colour = self.console.set_status(, % rpm, fg=rpm_colour, row=4)
handle an incoming mavlink packet
def listBlockSummaries(self, block_name="", dataset="", detail=False): if bool(dataset)+bool(block_name)!=1: dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "Dataset or block_names must be specified at a time.") if block_name and isinstance(block_name, basestring): try: block_name = [str(block_name)] except: dbsExceptionHandler("dbsException-invalid-input", "Invalid block_name for listBlockSummaries. ") for this_block_name in block_name: if re.search("[*, %]", this_block_name): dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No wildcards are allowed in block_name list") if re.search("[*, %]", dataset): dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No wildcards are allowed in dataset") data = [] try: with self.dbi.connection() as conn: data = self.dbsBlockSummaryListDAO.execute(conn, block_name, dataset, detail) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listBlockSummaries. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc()) dbsExceptionHandler(, dbsExceptionCode[], self.logger.exception, sError) for item in data: yield item
API that returns summary information like total size and total number of events in a dataset or a list of blocks :param block_name: list block summaries for block_name(s) :type block_name: str, list :param dataset: list block summaries for all blocks in dataset :type dataset: str :param detail: list summary by block names if detail=True, default=False :type detail: str, bool :returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
def _compose_restart(services): def _restart_container(client, container): log_to_client(.format(get_canonical_container_name(container))) client.restart(container[], timeout=1) assembled_specs = get_assembled_specs() if services == []: services = [spec.name for spec in assembled_specs.get_apps_and_services()] logging.info(.format(services)) client = get_docker_client() for service in services: container = get_container_for_app_or_service(service, include_exited=True) if container is None: log_to_client(.format(service)) continue stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs) if stopped_linked_containers: log_to_client(.format( stopped_linked_containers, service)) else: _restart_container(client, container)
Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Relevant fix which will make it into the next release: https://github.com/docker/compose/pull/1318
def launch_cif_clean(cif_filter, cif_select, group_cif_raw, group_cif_clean, group_structure, group_workchain, node, max_entries, skip_check, parse_engine, daemon): import inspect from datetime import datetime from aiida import orm from aiida.engine import launch from aiida.plugins import DataFactory, WorkflowFactory from aiida_codtools.common.cli import echo_utc from aiida_codtools.common.resources import get_default_options from aiida_codtools.common.utils import get_input_node CifData = DataFactory() CifCleanWorkChain = WorkflowFactory() local_vars = locals() launch_paramaters = {} for arg in inspect.getargspec(launch_cif_clean.callback).args: if arg in local_vars and local_vars[arg]: launch_paramaters[arg] = local_vars[arg] click.echo( * 80) click.echo(.format(datetime.utcnow().isoformat())) click.echo(.format(launch_paramaters)) click.echo( * 80) if group_cif_raw is not None: builder = orm.QueryBuilder() builder.append(orm.Group, filters={: {: group_cif_raw.pk}}, tag=) if skip_check: builder.append(CifData, with_group=, project=[]) else: submitted = orm.QueryBuilder() submitted.append(orm.WorkChainNode, tag=) submitted.append(orm.Group, filters={: {: group_workchain.pk}}, with_node=) submitted.append(orm.CifData, with_outgoing=, tag=, project=[]) submitted_nodes = set(pk for entry in submitted.all() for pk in entry) if submitted_nodes: filters = {: {: submitted_nodes}} else: filters = {} builder.append(CifData, with_group=, filters=filters, project=[]) if max_entries is not None: builder.limit(int(max_entries)) nodes = [entry[0] for entry in builder.all()] elif node is not None: nodes = [node] else: raise click.BadParameter() counter = 0 node_cif_filter_parameters = get_input_node(orm.Dict, { : True, : True, : True, }) node_cif_select_parameters = get_input_node(orm.Dict, { : True, : True, : True, : , : True, }) node_options = get_input_node(orm.Dict, get_default_options()) node_parse_engine = get_input_node(orm.Str, parse_engine) node_site_tolerance = get_input_node(orm.Float, 5E-4) node_symprec = get_input_node(orm.Float, 5E-3) for cif in nodes: inputs = { : cif, : cif_filter, : cif_select, : node_cif_filter_parameters, : node_cif_select_parameters, : node_options, : node_parse_engine, : node_site_tolerance, : node_symprec, } if group_cif_clean is not None: inputs[] = group_cif_clean if group_structure is not None: inputs[] = group_structure if daemon: workchain = launch.submit(CifCleanWorkChain, **inputs) echo_utc(.format(cif.pk, CifCleanWorkChain.__name__, workchain.pk)) else: echo_utc(.format(cif.pk, CifCleanWorkChain.__name__)) _, workchain = launch.run_get_node(CifCleanWorkChain, **inputs) if group_workchain is not None: group_workchain.add_nodes([workchain]) counter += 1 if max_entries is not None and counter >= max_entries: break click.echo( * 80) click.echo(.format(counter)) click.echo(.format(datetime.utcnow().isoformat())) click.echo( * 80)
Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes. It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if successful, will be added to the `group-structure` group.
def merge(self, other_roc): if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds): self.contingency_tables += other_roc.contingency_tables else: print("Input table thresholds do not match.")
Ingest the values of another DistributedROC object into this one and update the statistics inplace. Args: other_roc: another DistributedROC object.
def create_message(self, channel_id, text): baseurl = self.rest_baseurl + \ .format(channel_id) requests.post(baseurl, headers=self.headers, data=json.dumps({: text}))
Sends a message to a Discord channel or user via REST API Args: channel_id (string): ID of destingation Discord channel text (string): Content of message
def OSPFNeighborState_NeighborState(self, **kwargs): config = ET.Element("config") OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream") NeighborState = ET.SubElement(OSPFNeighborState, "NeighborState") NeighborState.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def toVerticalPotential(Pot,R,phi=None): Pot= flatten(Pot) if _APY_LOADED: if isinstance(R,units.Quantity): if hasattr(Pot,): R= R.to(units.kpc).value/Pot._ro else: R= R.to(units.kpc).value/Pot[0]._ro if isinstance(phi,units.Quantity): phi= phi.to(units.rad).value if isinstance(Pot,list): out= [] for pot in Pot: if isinstance(pot,linearPotential): out.append(pot) elif isinstance(pot,Potential): out.append(verticalPotential(pot,R,phi=phi)) elif isinstance(pot,planarPotential): raise PotentialError("Input to cannot be a planarPotential") else: raise PotentialError("Input to is neither an RZPotential-instance or a list of such instances") return out elif isinstance(Pot,Potential): return verticalPotential(Pot,R,phi=phi) elif isinstance(Pot,linearPotential): return Pot elif isinstance(Pot,planarPotential): raise PotentialError("Input to cannot be a planarPotential") else: raise PotentialError("Input to is neither an Potential-instance or a list of such instances")
NAME: toVerticalPotential PURPOSE: convert a Potential to a vertical potential at a given R INPUT: Pot - Potential instance or list of such instances R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity) phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric OUTPUT: (list of) linearPotential instance(s) HISTORY: 2018-10-07 - Written - Bovy (UofT)
def _register_factory(self, factory_name, factory, override): if not factory_name or not is_string(factory_name): raise ValueError("A factory name must be a non-empty string") if not inspect.isclass(factory): raise TypeError( "Invalid factory class ".format(type(factory).__name__) ) with self.__factories_lock: if factory_name in self.__factories: if override: _logger.info("Overriding factory ", factory_name) else: raise ValueError( " factory already exist".format(factory_name) ) self.__factories[factory_name] = factory self._fire_ipopo_event( constants.IPopoEvent.REGISTERED, factory_name )
Registers a component factory :param factory_name: The name of the factory :param factory: The factory class object :param override: If true, previous factory is overridden, else an exception is risen if a previous factory with that name already exists :raise ValueError: The factory name already exists or is invalid :raise TypeError: Invalid factory type
def pdfdump(self, filename=None, **kargs): canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=".pdf") canvas.writePDFfile(fname) subprocess.Popen([conf.prog.pdfreader, fname+".pdf"]) else: canvas.writePDFfile(filename)
pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called.
def qs_alphabet_filter(parser, token): bits = token.split_contents() if len(bits) == 3: return AlphabetFilterNode(bits[1], bits[2]) elif len(bits) == 4: if "=" in bits[3]: key, val = bits[3].split() return AlphabetFilterNode(bits[1], bits[2], strip_params=val) else: return AlphabetFilterNode(bits[1], bits[2], template_name=bits[3]) elif len(bits) == 5: key, val = bits[4].split() return AlphabetFilterNode(bits[1], bits[2], bits[3], bits[4]) else: raise TemplateSyntaxError("%s is called with a queryset and field " "name, and optionally a template." % bits[0])
The parser/tokenizer for the queryset alphabet filter. {% qs_alphabet_filter <queryset> <field name> [<template name>] [strip_params=comma,delim,list] %} {% qs_alphabet_filter objects lastname myapp/template.html %} The template name is optional and uses alphafilter/alphabet.html if not specified
def do_rewind(self, line): self.print_response("Rewinding from frame %s to 0" % self.bot._frame) self.bot._frame = 0
rewind
def list_sessions(self, updated_since=None, max_results=100, skip=0, **kwargs): if self.entity and not self.requires_session: raise ValueError("This is not a sessionful entity.") message = { : updated_since or datetime.datetime.utcfromtimestamp(0), : types.AMQPInt(skip), : types.AMQPInt(max_results), } with BaseHandler(self.entity_uri, self.auth_config, debug=self.debug, **kwargs) as handler: return handler._mgmt_request_response( REQUEST_RESPONSE_GET_MESSAGE_SESSIONS_OPERATION, message, mgmt_handlers.list_sessions_op)
List session IDs. List the Session IDs with pending messages in the queue where the state of the session has been updated since the timestamp provided. If no timestamp is provided, all will be returned. If the state of a session has never been set, it will not be returned regardless of whether there are messages pending. :param updated_since: The UTC datetime from which to return updated pending Session IDs. :type updated_since: datetime.datetime :param max_results: The maximum number of Session IDs to return. Default value is 100. :type max_results: int :param skip: The page value to jump to. Default value is 0. :type skip: int :rtype: list[str] Example: .. literalinclude:: ../examples/test_examples.py :start-after: [START list_sessions_service_bus] :end-before: [END list_sessions_service_bus] :language: python :dedent: 4 :caption: Get the Ids of session which have messages pending in the queue
def mcmc(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400, stdevs=0.1, start = 0.5, **problem): if in problem: numpy.random.seed(problem[]) n_params = len(parameter_names) def like(cube): cube = numpy.array(cube) if (cube <= 1e-10).any() or (cube >= 1-1e-10).any(): return -1e100 params = transform(cube) return loglikelihood(params) start = start + numpy.zeros(n_params) stdevs = stdevs + numpy.zeros(n_params) def compute_stepwidths(chain): return numpy.std(chain, axis=0) / 3 import matplotlib.pyplot as plt plt.figure(figsize=(7, 7)) steps = numpy.array([0.1]*(n_params)) print chain, prob, _, steps_ = mcmc_advance(start, steps, like, nsteps=nburn / 2, adapt=True) steps = compute_stepwidths(chain) print chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nburn / 2, adapt=True) steps = compute_stepwidths(chain) print chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nsteps) chain = numpy.array(chain) i = numpy.argmax(prob) final = chain[-1] print chain = numpy.array([transform(params) for params in chain]) return dict(start=chain[-1], maximum=chain[i], seeds=[final, chain[i]], chain=chain, method=)
**Metropolis Hastings MCMC** with automatic step width adaption. Burnin period is also used to guess steps. :param nburn: number of burnin steps :param stdevs: step widths to start with
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1): r G_tp = m/(pi/4*D**2) v_l = m*(1-x)/rhol/(pi/4*D**2) Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D) fd_l = friction_factor(Re=Re_l, eD=roughness/D) dP_l = fd_l*L/D*(0.5*rhol*v_l**2) v_g = m*x/rhog/(pi/4*D**2) Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D) fd_g = friction_factor(Re=Re_g, eD=roughness/D) dP_g = fd_g*L/D*(0.5*rhog*v_g**2) X = (dP_l/dP_g)**0.5 if G_tp >= 200: phi_g2 = 1 + 9.397*X**0.62 + 0.564*X**2.45 else: v_lo = m/rhol/(pi/4*D**2) Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D) C = 0.000004566*X**0.128*Re_lo**0.938*(rhol/rhog)**-2.15*(mul/mug)**5.1 phi_g2 = 1 + C*X + X**2 return dP_g*phi_g2
r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{g} \phi_g^2 .. math:: \phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s .. math:: \phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g} \right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1} .. math:: X^2 = \frac{\Delta P_l}{\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007.
def update(self, data, length=None): if self.digest_finalized: raise DigestError("No updates allowed") if not isinstance(data, bintype): raise TypeError("A byte string is expected") if length is None: length = len(data) elif length > len(data): raise ValueError("Specified length is greater than length of data") result = libcrypto.EVP_DigestUpdate(self.ctx, c_char_p(data), length) if result != 1: raise DigestError("Unable to update digest")
Hashes given byte string @param data - string to hash @param length - if not specifed, entire string is hashed, otherwise only first length bytes
def press_event(self): if self.mouse_event.press_event is None: return None ev = self.copy() ev.mouse_event = self.mouse_event.press_event return ev
The mouse press event that initiated a mouse drag, if any.
def _observe_timeseries_fn(timeseries): def observation_noise_fn(t): current_slice = timeseries[..., t, :] return tfd.MultivariateNormalDiag( loc=current_slice, scale_diag=tf.zeros_like(current_slice)) return observation_noise_fn
Build an observation_noise_fn that observes a Tensor timeseries.
def get_ddG_results(self): foldx_avg_df = self.df_mutation_ddG_avg foldx_avg_ddG = {} results = foldx_avg_df[[, , ]].T.to_dict().values() for r in results: ident = r[].split()[-1] ddG = r[] ddG_sd = r[] foldx_avg_ddG[self.mutation_index_to_group[int(ident)]] = (ddG, ddG_sd) return foldx_avg_ddG
Parse the results from BuildModel and get the delta delta G's. A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing. - highly stabilising (ΔΔG < −1.84 kcal/mol); - stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol); - slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol); - neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol); - slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol); - destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol); - highly destabilising (ΔΔG > +1.84 kcal/mol). Returns: dict: Dictionary of mutation group to predicted ddG.
def build_dir_tree(self, files): def helper(split_files): this_dir = { : {}, : {}} dirs = defaultdict(list) for fle in split_files: index = fle[0]; fileinfo = fle[1] if len(index) == 1: fileinfo[] = index[0] this_dir[][fileinfo[]] = fileinfo elif len(index) > 1: dirs[index[0]].append((index[1:], fileinfo)) for name,info in dirs.iteritems(): this_dir[][name] = helper(info) return this_dir return helper([(name.split()[1:], file_info) for name, file_info in files.iteritems()])
Convert a flat file dict into the tree format used for storage
def version(self): try: f = self.func.__call__.__code__ except AttributeError: f = self.func.__code__ h = md5() h.update(f.co_code) h.update(str(f.co_names).encode()) try: closure = self.func.__closure__ except AttributeError: return h.hexdigest() if closure is None or self.closure_fingerprint is None: return h.hexdigest() d = dict( (name, cell.cell_contents) for name, cell in zip(f.co_freevars, closure)) h.update(self.closure_fingerprint(d).encode()) return h.hexdigest()
Compute the version identifier for this functional node using the func code and local names. Optionally, also allow closed-over variable values to affect the version number when closure_fingerprint is specified
def _srm(self, data): subjects = len(data) self.random_state_ = np.random.RandomState(self.rand_seed) random_states = [ np.random.RandomState(self.random_state_.randint(2 ** 32)) for i in range(len(data))] w, _ = _init_w_transforms(data, self.features, random_states) shared_response = self._compute_shared_response(data, w) if logger.isEnabledFor(logging.INFO): objective = self._objective_function(data, w, shared_response) logger.info( % objective) for iteration in range(self.n_iter): logger.info( % (iteration + 1)) return w, shared_response
Expectation-Maximization algorithm for fitting the probabilistic SRM. Parameters ---------- data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. Returns ------- w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. s : array, shape=[features, samples] The shared response.
def _parse_reported_packages_from_install_output(output): reported_pkgs = {} install_pattern = re.compile(r) upgrade_pattern = re.compile(r) for line in salt.utils.itertools.split(output, ): match = install_pattern.match(line) if match is None: match = upgrade_pattern.match(line) if match: reported_pkgs[match.group()] = match.group() return reported_pkgs
Parses the output of "opkg install" to determine what packages would have been installed by an operation run with the --noaction flag. We are looking for lines like: Installing <package> (<version>) on <target> or Upgrading <package> from <oldVersion> to <version> on root
def parse(self, fail_callback=None): for field in self.field_arguments: self.values[field[]] = self.__get_value(field[]) if self.values[field[]] is None and field[]: if fail_callback is not None: fail_callback() self.__invalid_request(field[]) for file in self.file_arguments: self.files[file[]] = self.__get_file(file) if self.files[file[]] is None and file[]: if fail_callback is not None: fail_callback() self.__invalid_request(file[])
Parse text fields and file fields for values and files
def parse(self, input_text, syncmap): from lxml import etree smil_ns = "{http://www.w3.org/ns/SMIL}" root = etree.fromstring(gf.safe_bytes(input_text)) for par in root.iter(smil_ns + "par"): for child in par: if child.tag == (smil_ns + "text"): identifier = gf.safe_unicode(gf.split_url(child.get("src"))[1]) elif child.tag == (smil_ns + "audio"): begin_text = child.get("clipBegin") if ":" in begin_text: begin = gf.time_from_hhmmssmmm(begin_text) else: begin = gf.time_from_ssmmm(begin_text) end_text = child.get("clipEnd") if ":" in end_text: end = gf.time_from_hhmmssmmm(end_text) else: end = gf.time_from_ssmmm(end_text) self._add_fragment( syncmap=syncmap, identifier=identifier, lines=[u""], begin=begin, end=end )
Read from SMIL file. Limitations: 1. parses only ``<par>`` elements, in order 2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected) 3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated
def register_func_list(self, func_and_handler): for func, handler in func_and_handler: self._function_dispatch.register(func, handler) self.dispatch.cache_clear()
register a function to determine if the handle should be used for the type
def callback_prototype(prototype): protosig = signature(prototype) positional, keyword = [], [] for name, param in protosig.parameters.items(): if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD): raise TypeError("*args/**kwargs not supported in prototypes") if (param.default is not Parameter.empty) \ or (param.kind == Parameter.KEYWORD_ONLY): keyword.append(name) else: positional.append(name) kwargs = dict.fromkeys(keyword) def adapt(callback): sig = signature(callback) try: sig.bind(*positional, **kwargs) return callback except TypeError: pass unmatched_pos = positional[:] unmatched_kw = kwargs.copy() unrecognised = [] for name, param in sig.parameters.items(): if param.kind == Parameter.POSITIONAL_ONLY: if len(unmatched_pos) > 0: unmatched_pos.pop(0) else: unrecognised.append(name) elif param.kind == Parameter.POSITIONAL_OR_KEYWORD: if (param.default is not Parameter.empty) and (name in unmatched_kw): unmatched_kw.pop(name) elif len(unmatched_pos) > 0: unmatched_pos.pop(0) else: unrecognised.append(name) elif param.kind == Parameter.VAR_POSITIONAL: unmatched_pos = [] elif param.kind == Parameter.KEYWORD_ONLY: if name in unmatched_kw: unmatched_kw.pop(name) else: unrecognised.append(name) else: unmatched_kw = {} if unrecognised: raise TypeError("Function {!r} had unmatched arguments: {}".format(callback, unrecognised)) n_positional = len(positional) - len(unmatched_pos) @wraps(callback) def adapted(*args, **kwargs): args = args[:n_positional] for name in unmatched_kw: kwargs.pop(name) return callback(*args, **kwargs) return adapted prototype.adapt = adapt return prototype
Decorator to process a callback prototype. A callback prototype is a function whose signature includes all the values that will be passed by the callback API in question. The original function will be returned, with a ``prototype.adapt`` attribute which can be used to prepare third party callbacks.
def _make_exception(self, response): headers = response.headers limit_headers = [] if in headers: limit_headers = [ headers[], headers[], headers[], headers[], headers[], headers[] ] x_error_code = int(headers[]) exc = PocketException if x_error_code in self.auth_error_codes: exc = PocketAutException return exc( response.status_code, x_error_code, headers[], *limit_headers )
In case of exception, construct the exception object that holds all important values returned by the response. :return: The exception instance :rtype: PocketException
def _basis_notes_path(name, data_dir): data_dir = fix_data_dir(data_dir) bs_data = _get_basis_metadata(name, data_dir) filebase = bs_data[] file_path = os.path.join(data_dir, filebase + ) return file_path
Form a path to the notes for a basis set
def _deserialize(self, value, attr, data): value = super(TrimmedString, self)._deserialize(value, attr, data) return value.strip()
Deserialize string value.
def police_priority_map_exceed_map_pri3_exceed(self, **kwargs): config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop() exceed = ET.SubElement(police_priority_map, "exceed") map_pri3_exceed = ET.SubElement(exceed, "map-pri3-exceed") map_pri3_exceed.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def plot_punchcard(df, metric=, title=, by=None): if not HAS_MPL: raise ImportError() if by is not None: unique_vals = set(df[by].values.tolist()) else: unique_vals = [] for idx, val in enumerate(unique_vals): if by is not None: sub_df = df[df[by] == val] else: sub_df = df fig = plt.figure(figsize=(8, title and 3 or 2.5), facecolor=) ax = fig.add_subplot(, axisbg=) fig.subplots_adjust(left=0.06, bottom=0.04, right=0.98, top=0.95) if by is not None: ax.set_title(title + % (str(val), ), y=0.96).set_color() else: ax.set_title(title, y=0.96).set_color() ax.set_frame_on(False) ax.scatter(sub_df[], sub_df[], s=sub_df[metric], c=, edgecolor=) for line in ax.get_xticklines() + ax.get_yticklines(): line.set_alpha(0.0) dist = -0.8 ax.plot([dist, 23.5], [dist, dist], c=) ax.plot([dist, dist], [dist, 6.4], c=) ax.set_xlim(-1, 24) ax.set_ylim(-0.9, 6.9) ax.set_yticks(range(7)) for tx in ax.set_yticklabels([, , , , , , ]): tx.set_color() tx.set_size() ax.set_xticks(range(24)) for tx in ax.set_xticklabels([ % x for x in range(24)]): tx.set_color() tx.set_size() ax.set_aspect() if idx + 1 == len(unique_vals): plt.show(block=True) else: plt.show(block=False)
Uses modified plotting code from https://bitbucket.org/birkenfeld/hgpunchcard :param df: :param metric: :param title: :return:
def cache_name(self): lang = get_language() cache = build_localized_fieldname(self.accessor, lang) return "_%s_cache" % cache
Used in django 1.x
def _execute(self, execute_inputs, execute_outputs, backward_execution=False): self._script.build_module() outcome_item = self._script.execute(self, execute_inputs, execute_outputs, backward_execution) if backward_execution: return if self.preempted: return Outcome(-2, "preempted") if outcome_item in self.outcomes: return self.outcomes[outcome_item] for outcome_id, outcome in self.outcomes.items(): if outcome.name == outcome_item: return self.outcomes[outcome_id] logger.error("Returned outcome of {0} not existing: {1}".format(self, outcome_item)) return Outcome(-1, "aborted")
Calls the custom execute function of the script.py of the state
def length_of_national_destination_code(numobj): if numobj.extension is not None: copied_numobj = PhoneNumber() copied_numobj.merge_from(numobj) copied_numobj.extension = None else: copied_numobj = numobj nsn = format_number(copied_numobj, PhoneNumberFormat.INTERNATIONAL) number_groups = re.split(NON_DIGITS_PATTERN, nsn) if len(number_groups) <= 3: return 0 if number_type(numobj) == PhoneNumberType.MOBILE: mobile_token = country_mobile_token(numobj.country_code) if mobile_token != U_EMPTY_STRING: return len(number_groups[2]) + len(number_groups[3]) return len(number_groups[2])
Return length of the national destination code code for a number. Gets the length of the national destination code (NDC) from the PhoneNumber object passed in, so that clients could use it to split a national significant number into NDC and subscriber number. The NDC of a phone number is normally the first group of digit(s) right after the country calling code when the number is formatted in the international format, if there is a subscriber number part that follows. N.B.: similar to an area code, not all numbers have an NDC! An example of how this could be used: >>> import phonenumbers >>> numobj = phonenumbers.parse("18002530000", "US") >>> nsn = phonenumbers.national_significant_number(numobj) >>> ndc_len = phonenumbers.length_of_national_destination_code(numobj) >>> if ndc_len > 0: ... national_destination_code = nsn[:ndc_len] ... subscriber_number = nsn[ndc_len:] ... else: ... national_destination_code = "" ... subscriber_number = nsn Refer to the unittests to see the difference between this function and length_of_geographical_area_code. Arguments: numobj -- The PhoneNumber object to find the length of the NDC from. Returns the length of NDC of the PhoneNumber object passed in, which could be zero.
def get_offset(self, envelope): if isinstance(envelope, collections.Sequence): envelope = Envelope(envelope) if not (self.envelope.contains(envelope) or self.envelope.intersects(envelope)): raise ValueError() coords = self.affine.transform((envelope.ul, envelope.lr)) nxy = [(min(dest, size) - origin) or 1 for size, origin, dest in zip(self.size, *coords)] return coords[0] + tuple(nxy)
Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size). Arguments: envelope -- coordinate extent tuple or Envelope
async def get_entry(self, entry): params = {: self.token} url = .format(entry=entry, ext=self.format) return await self.query(url, "get", **params)
GET /api/entries/{entry}.{_format} Retrieve a single entry :param entry: \w+ an integer The Entry ID :return data related to the ext
def parse_func_body(self): self.save() self._expected = [] if self.next_is_rc(Tokens.OPAR, False): self.handle_hidden_right() args = self.parse_param_list() if args is not None: if self.next_is_rc(Tokens.CPAR, False): self.handle_hidden_right() body = self.parse_block() if body: self._expected = [] token = self.next_is_rc(Tokens.END, False) if token: body.stop_char = token.stop self.success() return args, body else: self.abort() else: self.abort() return self.failure()
If success, return a tuple (args, body)
def no_intersection(to_validate, constraint, violation_cfg): if len(constraint) == 0 or len(set(constraint).intersection(to_validate)) > 0: return None else: violation_cfg[Check.CFG_KEY_VIOLATION_MSG] = violation_cfg[Check.CFG_KEY_VIOLATION_MSG].format(constraint) return violation_cfg
Returns violation message if validated and constraint sets have no intersection :param to_validate: :param constraint: :param violation_cfg: :return:
def pack_req(cls, trd_side, order_type, price, qty, code, adjust_limit, trd_env, sec_mkt_str, acc_id, trd_mkt, conn_id): from futuquant.common.pb.Trd_PlaceOrder_pb2 import Request req = Request() serial_no = get_unique_id32() req.c2s.packetID.serialNo = serial_no req.c2s.packetID.connID = conn_id req.c2s.header.trdEnv = TRD_ENV_MAP[trd_env] req.c2s.header.accID = acc_id req.c2s.header.trdMarket = TRD_MKT_MAP[trd_mkt] req.c2s.trdSide = TRD_SIDE_MAP[trd_side] req.c2s.orderType = ORDER_TYPE_MAP[order_type] req.c2s.code = code req.c2s.qty = qty req.c2s.price = price req.c2s.adjustPrice = adjust_limit != 0 req.c2s.adjustSideAndLimit = adjust_limit proto_qot_mkt = MKT_MAP.get(sec_mkt_str, Qot_Common_pb2.QotMarket_Unknown) proto_trd_sec_mkt = QOT_MARKET_TO_TRD_SEC_MARKET_MAP.get(proto_qot_mkt, Trd_Common_pb2.TrdSecMarket_Unknown) req.c2s.secMarket = proto_trd_sec_mkt return pack_pb_req(req, ProtoId.Trd_PlaceOrder, conn_id, serial_no)
Convert from user request for place order to PLS request
def do_erase(self): self._increase_logging(["pyocd.tools.loader", "pyocd"]) session = ConnectHelper.session_with_chosen_probe( project_dir=self._args.project_dir, config_file=self._args.config, user_script=self._args.script, no_config=self._args.no_config, pack=self._args.pack, unique_id=self._args.unique_id, target_override=self._args.target_override, frequency=self._args.frequency, blocking=False, **convert_session_options(self._args.options)) if session is None: sys.exit(1) with session: mode = self._args.erase_mode or loader.FlashEraser.Mode.SECTOR eraser = loader.FlashEraser(session, mode) addresses = flatten_args(self._args.addresses) eraser.erase(addresses)
! @brief Handle 'erase' subcommand.