code
stringlengths
64
7.01k
docstring
stringlengths
2
15.8k
#vtb def delete(block_id): _url = get_root_url() try: DB.delete_processing_block(block_id) response = dict(message=, id=.format(block_id), links=dict(list=.format(_url), home=.format(_url))) return response, HTTPStatus.OK except RuntimeError as error: response = dict(error=. format(block_id), reason=str(error), links=dict(list=.format(_url), home=.format(_url))) return response, HTTPStatus.OK
Processing block detail resource.
#vtb def total_stored(self, wanted, slots=None): if slots is None: slots = self.window.slots wanted = make_slot_check(wanted) return sum(slot.amount for slot in slots if wanted(slot))
Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata)
#vtb def select_entry(self, core_element_id, by_cursor=True): for row_num, element_row in enumerate(self.list_store): if element_row[self.ID_STORAGE_ID] == core_element_id: if by_cursor: self.tree_view.set_cursor(row_num) else: self.tree_view.get_selection().select_path((row_num, )) break
Selects the row entry belonging to the given core_element_id by cursor or tree selection
#vtb def get_activities_for_objective(self, objective_id=None): if objective_id is None: raise NullArgument() url_path = construct_url(, bank_id=self._catalog_idstr, obj_id=objective_id) return objects.ActivityList(self._get_request(url_path))
Gets the activities for the given objective. In plenary mode, the returned list contains all of the activities mapped to the objective Id or an error results if an Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveId (osid.id.Id): Id of the Objective return: (osid.learning.ActivityList) - list of enrollments raise: NotFound - objectiveId not found raise: NullArgument - objectiveId is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented.
#vtb def get_broadcast_date(pid): print("Extracting first broadcast date...") broadcast_etree = open_listing_page(pid + ) original_broadcast_date, = broadcast_etree.xpath( ) return original_broadcast_date
Take BBC pid (string); extract and return broadcast date as string.
#vtb def seek(self, timestamp): if not re.match(r, timestamp): raise ValueError() self.avTransport.Seek([ (, 0), (, ), (, timestamp) ])
Seek to a given timestamp in the current track, specified in the format of HH:MM:SS or H:MM:SS. Raises: ValueError: if the given timestamp is invalid.
#vtb def stream_fastq_full(fastq, threads): logging.info("Nanoget: Starting to collect full metrics from plain fastq file.") inputfastq = handle_compressed_input(fastq) with cfutures.ProcessPoolExecutor(max_workers=threads) as executor: for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")): yield results logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght
#vtb def expand_recurring(number, repeat=5): if "[" in number: pattern_index = number.index("[") pattern = number[pattern_index + 1:-1] number = number[:pattern_index] number = number + pattern * (repeat + 1) return number
Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9)
#vtb def is_revision_chain_placeholder(pid): return d1_gmn.app.models.ReplicaRevisionChainReference.objects.filter( pid__did=pid ).exists()
For replicas, the PIDs referenced in revision chains are reserved for use by other replicas.
#vtb def update_ports(self, ports, id_or_uri, timeout=-1): resources = merge_default_values(ports, {: }) uri = self._client.build_uri(id_or_uri) + "/update-ports" return self._client.update(resources, uri, timeout)
Updates the interconnect ports. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. ports (list): Ports to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: The interconnect.
#vtb def qs_from_dict(qsdict, prefix=""): prefix = prefix + if prefix else "" def descend(qsd): for key, val in sorted(qsd.items()): if val: yield qs_from_dict(val, prefix + key) else: yield prefix + key return ",".join(descend(qsdict))
Same as dict_from_qs, but in reverse i.e. {"period": {"di": {}, "fhr": {}}} => "period.di,period.fhr"
#vtb def plot_transaction_rate_heterogeneity( model, suptitle="Heterogeneity in Transaction Rate", xlabel="Transaction Rate", ylabel="Density", suptitle_fontsize=14, **kwargs ): from matplotlib import pyplot as plt r, alpha = model._unload_params("r", "alpha") rate_mean = r / alpha rate_var = r / alpha ** 2 rv = stats.gamma(r, scale=1 / alpha) lim = rv.ppf(0.99) x = np.linspace(0, lim, 100) fig, ax = plt.subplots(1) fig.suptitle("Heterogeneity in Transaction Rate", fontsize=suptitle_fontsize, fontweight="bold") ax.set_title("mean: {:.3f}, var: {:.3f}".format(rate_mean, rate_var)) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.plot(x, rv.pdf(x), **kwargs) return ax
Plot the estimated gamma distribution of lambda (customers' propensities to purchase). Parameters ---------- model: lifetimes model A fitted lifetimes model, for now only for BG/NBD suptitle: str, optional Figure suptitle xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot
#vtb def Ctrl(cls, key): element = cls._element() element.send_keys(Keys.CONTROL, key)
在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X'
#vtb def connect(self): try: if S3Handler.S3_KEYS: self.s3 = BotoClient(self.opt, S3Handler.S3_KEYS[0], S3Handler.S3_KEYS[1]) else: self.s3 = BotoClient(self.opt) except Exception as e: raise RetryFailure( % e)
Connect to S3 storage
#vtb def internal_energy(self, t, structure=None): if t==0: return self.zero_point_energy(structure=structure) freqs = self._positive_frequencies dens = self._positive_densities coth = lambda x: 1.0 / np.tanh(x) wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t) e = np.trapz(freqs * coth(wd2kt) * dens, x=freqs) / 2 e *= THZ_TO_J * const.Avogadro if structure: formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms e /= formula_units return e
Phonon contribution to the internal energy at temperature T obtained from the integration of the DOS. Only positive frequencies will be used. Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number of Avogadro times the atoms in a unit cell. To compare with experimental data the result should be divided by the number of unit formulas in the cell. If the structure is provided the division is performed internally and the result is in J/mol Args: t: a temperature in K structure: the structure of the system. If not None it will be used to determine the numer of formula units Returns: Phonon contribution to the internal energy
#vtb def parse_raid(rule): parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith(): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument() parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) parser.add_argument(, dest=, action=) args = clean_args(vars(parser.parse_args(rules))) if partitions: args[] = partitions parser = None return args
Parse the raid line
#vtb def reverse_whois(self, query, exclude=[], scope=, mode=None, **kwargs): return self._results(, , terms=delimited(query), exclude=delimited(exclude), scope=scope, mode=mode, **kwargs)
List of one or more terms to search for in the Whois record, as a Python list or separated with the pipe character ( | ).
#vtb def render_files(self, root=None): if root is None: tmp = os.environ.get() root = sys.path[1 if tmp and tmp in sys.path else 0] items = [] for filename in os.listdir(root): f,ext = os.path.splitext(filename) if ext in [, ]: items.append(FILE_TMPL.format( name=filename, id=filename )) return "".join(items)
Render the file path as accordions
#vtb def org_update(object_id, input_params={}, always_retry=True, **kwargs): return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /org-xxxx/update API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2Fupdate
#vtb def upload_file(self, local_path, remote_path): logger.debug("{0}: uploading {1} to {0}:{2}".format(self.target_address, local_path, remote_path)) try: sftp = paramiko.SFTPClient.from_transport(self.transport()) sftp.put(local_path, remote_path) sftp.close() except SSHException as ex: logger.warn(("{0}: LiME module upload failed with exception:" "{1}".format(self.target_address, ex)))
Upload a file from the local filesystem to the remote host :type local_path: str :param local_path: path of local file to upload :type remote_path: str :param remote_path: destination path of upload on remote host
#vtb def get_all_related_many_to_many_objects(opts): if django.VERSION < (1, 9): return opts.get_all_related_many_to_many_objects() else: return [r for r in opts.related_objects if r.field.many_to_many]
Django 1.8 changed meta api, see docstr in compat.get_all_related_objects() :param opts: Options instance :return: list of many-to-many relations
#vtb def fold_columns_to_rows(df, levels_from=2): df = df.copy() df.reset_index(inplace=True, drop=True) df = df.T a = [list( set( df.index.get_level_values(i) ) ) for i in range(0, levels_from)] combinations = list(itertools.product(*a)) names = df.index.names[:levels_from] concats = [] for c in combinations: try: dfcc = df.loc[c] except KeyError: continue else: if len(dfcc.shape) == 1: continue dfcc.columns = pd.MultiIndex.from_tuples([c]*dfcc.shape[1], names=names) concats.append(dfcc) dfc = pd.concat(concats, axis=1) dfc.sort_index(axis=1, inplace=True) if dfc.index.name is None: dfc.index.name = df.index.names[-1] return dfc
Take a levels from the columns and fold down into the row index. This destroys the existing index; existing rows will appear as columns under the new column index :param df: :param levels_from: The level (inclusive) from which column index will be folded :return:
#vtb def update_energy(self, bypass_check=False): for outlet in self.outlets: outlet.update_energy(bypass_check)
Fetch updated energy information about devices
#vtb def mesh(**kwargs): obs_params = [] syn_params, constraints = mesh_syn(syn=False, **kwargs) obs_params += syn_params.to_list() obs_params += [SelectParameter(qualifier=, value=kwargs.get(, []), description=, choices=[])] obs_params += [SelectParameter(qualifier=, value=kwargs.get(, []), description=, choices=_mesh_columns)] return ParameterSet(obs_params), constraints
Create parameters for a new mesh dataset. Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_dataset` :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s
#vtb def reads(paths, filename=, options=None, **keywords): if groupname not in f \ or not isinstance(f[groupname], h5py.Group): raise exceptions.CantReadError( \ \ + groupname + ) datas.append(utilities.read_data(f, f[groupname], targetname, options)) except: raise finally: if f is not None: f.close() return datas
Reads data from an HDF5 file (high level). High level function to read one or more pieces of data from an HDF5 file located at the paths specified in `paths` into Python types. Each path is specified as a POSIX style path where the data to read is located. There are various options that can be used to influence how the data is read. They can be passed as an already constructed ``Options`` into `options` or as additional keywords that will be used to make one by ``options = Options(**keywords)``. Paths are POSIX style and can either be given directly as ``str`` or ``bytes``, or the separated path can be given as an iterable of ``str`` and ``bytes``. Each part of a separated path is escaped using ``utilities.escape_path``. Otherwise, the path is assumed to be already escaped. Escaping is done so that targets with a part that starts with one or more periods, contain slashes, and/or contain nulls can be used without causing the wrong Group to be looked in or the wrong target to be looked at. It essentially allows one to make a Dataset named ``'..'`` or ``'a/a'`` instead of moving around in the Dataset hierarchy. Parameters ---------- paths : iterable of paths An iterable of paths to read data from. Each must be a POSIX style path where the directory name is the Group to put it in and the basename is the name to write it to. The format of paths is described in the paragraph above. filename : str, optional The name of the HDF5 file to read data from. options : Options, optional The options to use when reading. Is mutually exclusive with any additional keyword arguments given (set to ``None`` or don't provide to use them). **keywords : If `options` was not provided or was ``None``, these are used as arguments to make a ``Options``. Returns ------- datas : iterable An iterable holding the piece of data for each path in `paths` in the same order. Raises ------ exceptions.CantReadError If reading the data can't be done. See Also -------- utilities.process_path utilities.escape_path read : Reads just a single piece of data writes write Options utilities.read_data : Low level version.
#vtb def register(self, model_alias, code=, name=None, order=None, display_filter=None): model_alias = self.get_model_alias(model_alias) def wrapper(create_layout): item = TabItem( code=code, create_layout=create_layout, name=name, order=order, display_filter=display_filter ) if item in self.tabs[model_alias]: raise Exception("Tab {} already registered for model {}".format(code, model_alias)) self.tabs[model_alias].append(item) self.tabs[model_alias] = sorted(self.tabs[model_alias], key=lambda item: item.order if item.order else 999) return create_layout return wrapper
Register new tab :param model_alias: :param code: :param name: :param order: :return:
#vtb def ops_to_words(item): unsupp_ops = ["~=", "==="] supp_ops = [">=", ">", "==", "<=", "<", "!="] tokens = sorted(item.split(","), reverse=True) actual_tokens = [] for req in tokens: for op in unsupp_ops: if req.startswith(op): raise RuntimeError("Unsupported version specification: {0}".format(op)) for op in supp_ops: if req.startswith(op): actual_tokens.append(op) break else: raise RuntimeError("Illegal comparison operator: {0}".format(op)) if len(list(set(actual_tokens))) != len(actual_tokens): raise RuntimeError("Multiple comparison operators of the same type") if "!=" in actual_tokens: return ( " and ".join([op_to_words(token) for token in tokens[:-1]]) + " " + op_to_words(tokens[-1]) ) return " and ".join([op_to_words(token) for token in tokens])
Translate requirement specification to words.
#vtb def error(self, i: int=None) -> str: head = "[" + colors.red("error") + "]" if i is not None: head = str(i) + " " + head return head
Returns an error message
#vtb def anoteElements(ax, anotelist, showAccName=False, efilter=None, textypos=None, **kwargs): defaultstyle = {: 0.8, : dict(arrowstyle=), : -60, : } defaultstyle.update(kwargs) anote_list = [] if efilter is None: for anote in anotelist: if textypos is None: textxypos = tuple(anote[]) else: textxypos = tuple((anote[][0], textypos)) if not showAccName and anote[] in (, ): kwstyle = {k: v for k, v in defaultstyle.items()} kwstyle.pop() note_text = ax.text(anote[][][0], anote[][][1], anote[][], **kwstyle) else: note_text = ax.annotate(s=anote[], xy=anote[], xytext=textxypos, **defaultstyle) anote_list.append(note_text) else: if not isinstance(efilter, tuple): filter = tuple(efilter) for anote in anotelist: if anote[] in efilter: if textypos is None: textxypos = tuple(anote[]) else: textxypos = tuple((anote[][0], textypos)) if not showAccName and anote[] in (, ): kwstyle = {k: v for k, v in defaultstyle.items()} kwstyle.pop() note_text = ax.text(anote[][][0], anote[][][1], anote[][], **kwstyle) else: note_text = ax.annotate(s=anote[], xy=anote[], xytext=textxypos, **defaultstyle) anote_list.append(note_text) return anote_list
annotate elements to axes :param ax: matplotlib axes object :param anotelist: element annotation object list :param showAccName: tag name for accelerator tubes? default is False, show acceleration band type, e.g. 'S', 'C', 'X', or for '[S,C,X]D' for cavity :param efilter: element type filter, default is None, annotate all elements could be defined to be one type name or type name list/tuple, e.g. filter='QUAD' or filter=('QUAD', 'CSRCSBEN') :param textypos: y coordinator of annotated text string :param kwargs: alpha=0.8, arrowprops=dict(arrowstyle='->'), rotation=-60, fontsize='small' return list of annotation objects
#vtb def palette_image(self): if self.pimage is None: palette = [] for i in range(self.NETSIZE): palette.extend(self.colormap[i][:3]) palette.extend([0] * (256 - self.NETSIZE) * 3) self.pimage = Image.new("P", (1, 1), 0) self.pimage.putpalette(palette) return self.pimage
PIL weird interface for making a paletted image: create an image which already has the palette, and use that in Image.quantize. This function returns this palette image.
#vtb def visit_console_html(self, node): if self.builder.name in (, ) and node[]: self.document._console_directive_used_flag = True uid = node[] self.body.append( % {: uid}) try: self.visit_literal_block(node) except nodes.SkipNode: pass self.body.append() self.body.append( % {: uid}) win_text = node[] highlight_args = {: True} if in node: linenos = node[] else: linenos = win_text.count() >= self.highlightlinenothreshold - 1 def warner(msg): self.builder.warn(msg, (self.builder.current_docname, node.line)) highlighted = self.highlighter.highlight_block( win_text, , warn=warner, linenos=linenos, **highlight_args ) self.body.append(highlighted) self.body.append() self.body.append() raise nodes.SkipNode else: self.visit_literal_block(node)
Generate HTML for the console directive.
#vtb def _mkfs(root, fs_format, fs_opts=None): if fs_opts is None: fs_opts = {} if fs_format in (, , ): __salt__[](root, fs_format, **fs_opts) elif fs_format in (,): __salt__[](root, **fs_opts) elif fs_format in (,): __salt__[](root, **fs_opts)
Make a filesystem using the appropriate module .. versionadded:: Beryllium
#vtb def random_indexes(max_index, subset_size=None, seed=None, rng=None): subst_ = np.arange(0, max_index) rng = ensure_rng(seed if rng is None else rng) rng.shuffle(subst_) if subset_size is None: subst = subst_ else: subst = subst_[0:min(subset_size, max_index)] return subst
random unrepeated indicies Args: max_index (?): subset_size (None): (default = None) seed (None): (default = None) rng (RandomState): random number generator(default = None) Returns: ?: subst CommandLine: python -m utool.util_numpy --exec-random_indexes Example: >>> # DISABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> max_index = 10 >>> subset_size = None >>> seed = None >>> rng = np.random.RandomState(0) >>> subst = random_indexes(max_index, subset_size, seed, rng) >>> result = ('subst = %s' % (str(subst),)) >>> print(result)
#vtb def parse_expression(expression: str) -> Tuple[Set[str], List[CompositeAxis]]: identifiers = set() composite_axes = [] if in expression: if not in expression: raise EinopsError() if str.count(expression, ) != 1 or str.count(expression, ) != 3: raise EinopsError() expression = expression.replace(, _ellipsis) bracket_group = None def add_axis_name(x): if x is not None: if x in identifiers: raise ValueError(.format(x)) identifiers.add(x) if bracket_group is None: composite_axes.append([x]) else: bracket_group.append(x) current_identifier = None for char in expression: if char in + _ellipsis: add_axis_name(current_identifier) current_identifier = None if char == _ellipsis: if bracket_group is not None: raise EinopsError("Ellipsis can()Brackets are not balanced09t start with a digit") current_identifier += char elif <= char <= : if current_identifier is None: current_identifier = char else: current_identifier += char else: if <= char <= : raise EinopsError("Only lower-case latin letters allowed in names, not ".format(char)) raise EinopsError("Unknown character ".format(char)) if bracket_group is not None: raise EinopsError(.format(expression)) add_axis_name(current_identifier) return identifiers, composite_axes
Parses an indexing expression (for a single tensor). Checks uniqueness of names, checks usage of '...' (allowed only once) Returns set of all used identifiers and a list of axis groups
#vtb def invokeCompletionIfAvailable(self, requestedByUser=False): if self._qpart.completionEnabled and self._wordSet is not None: wordBeforeCursor = self._wordBeforeCursor() wholeWord = wordBeforeCursor + self._wordAfterCursor() forceShow = requestedByUser or self._completionOpenedManually if wordBeforeCursor: if len(wordBeforeCursor) >= self._qpart.completionThreshold or forceShow: if self._widget is None: model = _CompletionModel(self._wordSet) model.setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(model, forceShow): self._createWidget(model) return True else: self._widget.model().setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(self._widget.model(), forceShow): self._widget.updateGeometry() return True self._closeCompletion() return False
Invoke completion, if available. Called after text has been typed in qpart Returns True, if invoked
#vtb def _set_request_cache_if_django_cache_hit(key, django_cached_response): if django_cached_response.is_found: DEFAULT_REQUEST_CACHE.set(key, django_cached_response.value)
Sets the value in the request cache if the django cached response was a hit. Args: key (string) django_cached_response (CachedResponse)
#vtb def to_json(self): res_dict = {} def gen_dep_edge(node, edge, dep_tgt, aliases): return { : dep_tgt.address.spec, : self._edge_type(node.concrete_target, edge, dep_tgt), : len(edge.products_used), : self._used_ratio(dep_tgt, edge), : [alias.address.spec for alias in aliases], } for node in self._nodes.values(): res_dict[node.concrete_target.address.spec] = { : self._cost(node.concrete_target), : self._trans_cost(node.concrete_target), : node.products_total, : [gen_dep_edge(node, edge, dep_tgt, node.dep_aliases.get(dep_tgt, {})) for dep_tgt, edge in node.dep_edges.items()], } yield str(json.dumps(res_dict, indent=2, sort_keys=True))
Outputs the entire graph.
#vtb def run_transaction(self, command_list, do_commit=True): pass for c in command_list: if c.find(";") != -1 or c.find("\\G") != -1: raise Exception("The SQL command contains a semi-colon or \\G. This is a potential SQL injection." % c) if do_commit: sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n".join(command_list) else: sql = "START TRANSACTION;\n%s;" % "\n".join(command_list) return
This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.
#vtb def _serialize_value(self, value): if isinstance(value, (list, tuple, set)): return [self._serialize_value(v) for v in value] elif isinstance(value, dict): return dict([(k, self._serialize_value(v)) for k, v in value.items()]) elif isinstance(value, ModelBase): return value._serialize() elif isinstance(value, datetime.date): return value.isoformat() else: return value
Called by :py:meth:`._serialize` to serialise an individual value.
#vtb def latcyl(radius, lon, lat): radius = ctypes.c_double(radius) lon = ctypes.c_double(lon) lat = ctypes.c_double(lat) r = ctypes.c_double() lonc = ctypes.c_double() z = ctypes.c_double() libspice.latcyl_c(radius, lon, lat, ctypes.byref(r), ctypes.byref(lonc), ctypes.byref(z)) return r.value, lonc.value, z.value
Convert from latitudinal coordinates to cylindrical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latcyl_c.html :param radius: Distance of a point from the origin. :type radius: :param lon: Angle of the point from the XZ plane in radians. :param lat: Angle of the point from the XY plane in radians. :return: (r, lonc, z) :rtype: tuple
#vtb def _pull_out_perm_rhs(rest, rhs, out_port, in_port): in_im, rhs_red = rhs._factor_rhs(in_port) return (Feedback.create( SeriesProduct.create(*rest), out_port=out_port, in_port=in_im) << rhs_red)
Similar to :func:`_pull_out_perm_lhs` but on the RHS of a series product self-feedback.
#vtb def postinit(self, value, conversion=None, format_spec=None): self.value = value self.conversion = conversion self.format_spec = format_spec
Do some setup after initialisation. :param value: The value to be formatted into the string. :type value: NodeNG :param conversion: The type of formatting to be applied to the value. :type conversion: int or None :param format_spec: The formatting to be applied to the value. :type format_spec: JoinedStr or None
#vtb def spiro_image(R, r, r_, resolution=2*PI/1000, spins=50, size=[32, 32]): x, y = give_dots(200, r, r_, spins=20) xy = np.array([x, y]).T xy = np.array(np.around(xy), dtype=np.int64) xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) & (xy[:, 0] < 250) & (xy[:, 1] < 250)] xy = xy + 250 img = np.ones([500, 500], dtype=np.uint8) img[:] = 255 img[xy[:, 0], xy[:, 1]] = 0 img = misc.imresize(img, size) fimg = img / 255.0 return fimg
Create image with given Spirograph parameters using numpy and scipy.
#vtb def match(self, pattern, context=None): matches = [] regex = pattern if regex == : regex = regex = re.compile(regex) for choice in self.choices(context): if regex.search(choice): matches.append(choice) return matches
This method returns a (possibly empty) list of strings that match the regular expression ``pattern`` provided. You can also provide a ``context`` as described above. This method calls ``choices`` to get a list of all possible choices and then filters the list by performing a regular expression search on each choice using the supplied ``pattern``.
#vtb def get_formset(self, request, obj=None, **kwargs): data = super().get_formset(request, obj, **kwargs) if obj: data.form.base_fields[].initial = request.user.id return data
Default user to the current version owner.
#vtb def r_q_send(self, msg_dict): no_pickle_keys = self.invalid_dict_pickle_keys(msg_dict) if no_pickle_keys == []: self.r_q.put(msg_dict) else: hash_func = md5() hash_func.update(str(msg_dict)) dict_hash = str(hash_func.hexdigest())[-7:] linesep = os.linesep sys.stderr.write( "{0} {1}r_q_send({2}) Can{7}{4} {5}{7}{6}{0}{2}{0}{3}{0}']: {1}{2}{3}{4}".format( key, Fore.GREEN, repr(msg_dict.get(key)), Style.RESET_ALL, linesep, ) )
Send message dicts through r_q, and throw explicit errors for pickle problems
#vtb def assert_not_equal(first, second, msg_fmt="{msg}"): if first == second: msg = "{!r} == {!r}".format(first, second) fail(msg_fmt.format(msg=msg, first=first, second=second))
Fail if first equals second, as determined by the '==' operator. >>> assert_not_equal(5, 8) >>> assert_not_equal(-7, -7.0) Traceback (most recent call last): ... AssertionError: -7 == -7.0 The following msg_fmt arguments are supported: * msg - the default error message * first - the first argument * second - the second argument
#vtb def guest_stop(self, userid, **kwargs): requestData = "PowerVM " + userid + " off" if in kwargs.keys() and kwargs[]: requestData += + str(kwargs[]) if in kwargs.keys() and kwargs[]: requestData += + str(kwargs[]) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData)
Power off VM.
#vtb def from_irc(self, irc_nickname=None, irc_password=None): if have_bottom: from .findall import IrcListener bot = IrcListener(irc_nickname=irc_nickname, irc_password=irc_password) results = bot.loop.run_until_complete(bot.collect_data()) bot.loop.close() self.update(results) else: return(False)
Connect to the IRC channel and find all servers presently connected. Slow; takes 30+ seconds but authoritative and current. OBSOLETE.
#vtb def register_frontend_media(request, media): if not hasattr(request, ): request._fluent_contents_frontend_media = Media() add_media(request._fluent_contents_frontend_media, media)
Add a :class:`~django.forms.Media` class to the current request. This will be rendered by the ``render_plugin_media`` template tag.
#vtb def _parse_call_args(self,*args,**kwargs): interp= kwargs.get(,self._useInterp) if len(args) == 5: raise IOError("Must specify phi for streamdf") elif len(args) == 6: if kwargs.get(,False): if isinstance(args[0],(int,float,numpy.float32,numpy.float64)): out= numpy.empty((6,1)) else: out= numpy.empty((6,len(args[0]))) for ii in range(6): out[ii,:]= args[ii] return out else: return self._approxaA(*args,interp=interp) elif isinstance(args[0],Orbit): o= args[0] return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(), interp=interp) elif isinstance(args[0],list) and isinstance(args[0][0],Orbit): R, vR, vT, z, vz, phi= [], [], [], [], [], [] for o in args[0]: R.append(o.R()) vR.append(o.vR()) vT.append(o.vT()) z.append(o.z()) vz.append(o.vz()) phi.append(o.phi()) return self._approxaA(numpy.array(R),numpy.array(vR), numpy.array(vT),numpy.array(z), numpy.array(vz),numpy.array(phi), interp=interp)
Helper function to parse the arguments to the __call__ and related functions, return [6,nobj] array of frequencies (:3) and angles (3:)
#vtb def change_node_subscriptions(self, jid, node, subscriptions_to_set): iq = aioxmpp.stanza.IQ( type_=aioxmpp.structs.IQType.SET, to=jid, payload=pubsub_xso.OwnerRequest( pubsub_xso.OwnerSubscriptions( node, subscriptions=[ pubsub_xso.OwnerSubscription( jid, subscription ) for jid, subscription in subscriptions_to_set ] ) ) ) yield from self.client.send(iq)
Update the subscriptions at a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the node to modify :type node: :class:`str` :param subscriptions_to_set: The subscriptions to set at the node. :type subscriptions_to_set: :class:`~collections.abc.Iterable` of tuples consisting of the JID to (un)subscribe and the subscription level to use. :raises aioxmpp.errors.XMPPError: as returned by the service `subscriptions_to_set` must be an iterable of pairs (`jid`, `subscription`), where the `jid` indicates the JID for which the `subscription` is to be set.
#vtb def image_preprocessing(image_buffer, bbox, train, thread_id=0): if bbox is None: raise ValueError() image = decode_jpeg(image_buffer) height = FLAGS.image_size width = FLAGS.image_size if train: image = distort_image(image, height, width, bbox, thread_id) else: image = eval_image(image, height, width) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image
Decode and preprocess one image for evaluation or training. Args: image_buffer: JPEG encoded string Tensor bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. train: boolean thread_id: integer indicating preprocessing thread Returns: 3-D float Tensor containing an appropriately scaled image Raises: ValueError: if user does not provide bounding box
#vtb def __create_url_node_for_content(self, content, content_type, url=None, modification_time=None): loc = url if loc is None: loc = urljoin(self.url_site, self.context.get().format(**content.url_format)) lastmod = None if modification_time is not None: lastmod = modification_time.strftime() else: if content is not None: if getattr(content, , None) is not None: lastmod = getattr(content, ).strftime() elif getattr(content, , None) is not None: lastmod = getattr(content, ).strftime() output = "<loc>{}</loc>".format(loc) if lastmod is not None: output += "\n<lastmod>{}</lastmod>".format(lastmod) output += "\n<changefreq>{}</changefreq>".format(self.settings.get().get(content_type)) output += "\n<priority>{:.2f}</priority>".format(self.settings.get().get(content_type)) return self.template_url.format(output)
Creates the required <url> node for the sitemap xml. :param content: the content class to handle :type content: pelican.contents.Content | None :param content_type: the type of the given content to match settings.EXTENDED_SITEMAP_PLUGIN :type content_type; str :param url; if given, the URL to use instead of the url of the content instance :type url: str :param modification_time: the modification time of the url, will be used instead of content date if given :type modification_time: datetime.datetime | None :returns: the text node :rtype: str
#vtb def _nextSequence(cls, name=None): if not name: name = cls._sqlSequence if not name: curs = cls.cursor() curs.execute("SELECT nextval()" % name) value = curs.fetchone()[0] curs.close() return value
Return a new sequence number for insertion in self._sqlTable. Note that if your sequences are not named tablename_primarykey_seq (ie. for table 'blapp' with primary key 'john_id', sequence name blapp_john_id_seq) you must give the full sequence name as an optional argument to _nextSequence)
#vtb def from_time( year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None ): def str_or_stars(i, length): if i is None: return "*" * length else: return str(i).rjust(length, "0") wmi_time = "" wmi_time += str_or_stars(year, 4) wmi_time += str_or_stars(month, 2) wmi_time += str_or_stars(day, 2) wmi_time += str_or_stars(hours, 2) wmi_time += str_or_stars(minutes, 2) wmi_time += str_or_stars(seconds, 2) wmi_time += "." wmi_time += str_or_stars(microseconds, 6) if timezone is None: wmi_time += "+" else: try: int(timezone) except ValueError: wmi_time += "+" else: if timezone >= 0: wmi_time += "+" else: wmi_time += "-" timezone = abs(timezone) wmi_time += str_or_stars(timezone, 3) return wmi_time
Convenience wrapper to take a series of date/time elements and return a WMI time of the form `yyyymmddHHMMSS.mmmmmm+UUU`. All elements may be int, string or omitted altogether. If omitted, they will be replaced in the output string by a series of stars of the appropriate length. :param year: The year element of the date/time :param month: The month element of the date/time :param day: The day element of the date/time :param hours: The hours element of the date/time :param minutes: The minutes element of the date/time :param seconds: The seconds element of the date/time :param microseconds: The microseconds element of the date/time :param timezone: The timeezone element of the date/time :returns: A WMI datetime string of the form: `yyyymmddHHMMSS.mmmmmm+UUU`
#vtb def normalizeGlyphHeight(value): if not isinstance(value, (int, float)): raise TypeError("Glyph height must be an :ref:`type-int-float`, not " "%s." % type(value).__name__) return value
Normalizes glyph height. * **value** must be a :ref:`type-int-float`. * Returned value is the same type as the input value.
#vtb def input(self, input, song): try: cmd = getattr(self, self.CMD_MAP[input][1]) except (IndexError, KeyError): return self.screen.print_error( "Invalid command {!r}!".format(input)) cmd(song)
Input callback, handles key presses
#vtb def run_process(self, slug, inputs): def export_files(value): if isinstance(value, str) and os.path.isfile(value): print("export {}".format(value)) elif isinstance(value, dict): for item in value.values(): export_files(item) elif isinstance(value, list): for item in value: export_files(item) export_files(inputs) print(.format(json.dumps({: slug, : inputs}, separators=(, ))))
Run a new process from a running process.
#vtb def run_pipes(executable, input_path, output_path, more_args=None, properties=None, force_pydoop_submitter=False, hadoop_conf_dir=None, logger=None, keep_streams=False): if logger is None: logger = utils.NullLogger() if not hdfs.path.exists(executable): raise IOError("executable %s not found" % executable) if not hdfs.path.exists(input_path) and not (set(input_path) & GLOB_CHARS): raise IOError("input path %s not found" % input_path) if properties is None: properties = {} properties.setdefault(, ) properties.setdefault(, ) if force_pydoop_submitter: use_pydoop_submit = True else: use_pydoop_submit = False ver = pydoop.hadoop_version_info() if ver.has_security(): if ver.is_cdh_mrv2() and hdfs.default_is_local(): raise RuntimeError("mrv2 on local fs not supported yet") use_pydoop_submit = hdfs.default_is_local() args = [ "-program", executable, "-input", input_path, "-output", output_path, ] if more_args is not None: args.extend(more_args) if use_pydoop_submit: submitter = "it.crs4.pydoop.pipes.Submitter" pydoop_jar = pydoop.jar_path() args.extend(("-libjars", pydoop_jar)) return run_class(submitter, args, properties, classpath=pydoop_jar, logger=logger, keep_streams=keep_streams) else: return run_mapred_cmd("pipes", args=args, properties=properties, hadoop_conf_dir=hadoop_conf_dir, logger=logger, keep_streams=keep_streams)
Run a pipes command. ``more_args`` (after setting input/output path) and ``properties`` are passed to :func:`run_cmd`. If not specified otherwise, this function sets the properties ``mapreduce.pipes.isjavarecordreader`` and ``mapreduce.pipes.isjavarecordwriter`` to ``"true"``. This function works around a bug in Hadoop pipes that affects versions of Hadoop with security when the local file system is used as the default FS (no HDFS); see https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those set-ups, the function uses Pydoop's own pipes submitter application. You can force the use of Pydoop's submitter by passing the argument force_pydoop_submitter=True.
#vtb def get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_received(self, **kwargs): config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop() remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop() lldp_pdu_received = ET.SubElement(lldp_neighbor_detail, "lldp-pdu-received") lldp_pdu_received.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
#vtb def get_schedule(self, ehr_username, start_date, changed_since, include_pix, other_user=, end_date=, appointment_types=None, status_filter=): if not start_date: raise ValueError() if end_date: start_date = % (start_date, end_date) if not changed_since: changed_since = magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_GET_SCHEDULE, app_name=self._app_name, user_id=ehr_username, token=self._token.token, parameter1=start_date, parameter2=changed_since, parameter3=include_pix, parameter4=other_user, parameter5=appointment_types, parameter6=status_filter) response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic) result = self._get_results_or_raise_if_magic_invalid( magic, response, TouchWorksMagicConstants.RESULT_GET_SCHEDULE) return result
invokes TouchWorksMagicConstants.ACTION_GET_SCHEDULE action :return: JSON response
#vtb def get_last_modified_datetime(dir_path=os.path.dirname(__file__)): max_mtime = 0 for root, dirs, files in os.walk(dir_path): for f in files: p = os.path.join(root, f) try: max_mtime = max(max_mtime, os.stat(p).st_mtime) except FileNotFoundError: pass return datetime.utcfromtimestamp(max_mtime)
Return datetime object of latest change in kerncraft module directory.
#vtb def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000): img_rows = 32 img_cols = 32 nb_classes = 10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() if tf.keras.backend.image_data_format() == : x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3) x_train = x_train.astype() x_test = x_test.astype() x_train /= 255 x_test /= 255 print(, x_train.shape) print(x_train.shape[0], ) print(x_test.shape[0], ) y_train = np_utils.to_categorical(y_train, nb_classes) y_test = np_utils.to_categorical(y_test, nb_classes) x_train = x_train[train_start:train_end, :, :, :] y_train = y_train[train_start:train_end, :] x_test = x_test[test_start:test_end, :] y_test = y_test[test_start:test_end, :] return x_train, y_train, x_test, y_test
Preprocess CIFAR10 dataset :return:
#vtb def to_size(value, convert_to_human=True): value = from_size(value) if value is None: value = if isinstance(value, Number) and value > 1024 and convert_to_human: v_power = int(math.floor(math.log(value, 1024))) v_multiplier = math.pow(1024, v_power) v_size_float = float(value) / v_multiplier if v_size_float == int(v_size_float): value = "{:.0f}{}".format( v_size_float, zfs_size[v_power-1], ) else: for v_precision in ["{:.2f}{}", "{:.1f}{}", "{:.0f}{}"]: v_size = v_precision.format( v_size_float, zfs_size[v_power-1], ) if len(v_size) <= 5: value = v_size break return value
Convert python int (bytes) to zfs size NOTE: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/pyzfs/common/util.py#114
#vtb def address_to_scripthash(address: str) -> UInt160: AddressVersion = 23 data = b58decode(address) if len(data) != 25: raise ValueError() if data[0] != AddressVersion: raise ValueError() checksum_data = data[:21] checksum = hashlib.sha256(hashlib.sha256(checksum_data).digest()).digest()[:4] if checksum != data[21:]: raise Exception() return UInt160(data=data[1:21])
Just a helper method
#vtb def remove_cable_distributor(self, cable_dist): if cable_dist in self.cable_distributors() and isinstance(cable_dist, MVCableDistributorDing0): self._cable_distributors.remove(cable_dist) if self._graph.has_node(cable_dist): self._graph.remove_node(cable_dist)
Removes a cable distributor from _cable_distributors if existing
#vtb def kappa_statistic(self): r if self.population() == 0: return float() random_accuracy = ( (self._tn + self._fp) * (self._tn + self._fn) + (self._fn + self._tp) * (self._fp + self._tp) ) / self.population() ** 2 return (self.accuracy() - random_accuracy) / (1 - random_accuracy)
r"""Return κ statistic. The κ statistic is defined as: :math:`\kappa = \frac{accuracy - random~ accuracy} {1 - random~ accuracy}` The κ statistic compares the performance of the classifier relative to the performance of a random classifier. :math:`\kappa` = 0 indicates performance identical to random. :math:`\kappa` = 1 indicates perfect predictive success. :math:`\kappa` = -1 indicates perfect predictive failure. Returns ------- float The κ statistic of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.kappa_statistic() 0.5344129554655871
#vtb def percentile(values, percent): N = sorted(values) if not N: return None k = (len(N) - 1) * percent f = int(math.floor(k)) c = int(math.ceil(k)) if f == c: return N[int(k)] d0 = N[f] * (c - k) d1 = N[c] * (k - f) return d0 + d1
PERCENTILE WITH INTERPOLATION RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
#vtb def get_comments_content_object(parser, token): keywords = token.contents.split() if len(keywords) != 5: raise template.TemplateSyntaxError( " tag takes exactly 2 arguments" % (keywords[0],)) if keywords[1] != : raise template.TemplateSyntaxError( "first argument to tag must be " % (keywords[0],)) if keywords[3] != : raise template.TemplateSyntaxError( "first argument to tag must be " % (keywords[0],)) return GetCommentsContentObject(keywords[2], keywords[4])
Get a limited set of comments for a given object. Defaults to a limit of 5. Setting the limit to -1 disables limiting. usage: {% get_comments_content_object for form_object as variable_name %}
#vtb def get_open_orders(self, market=None): return self._api_query(path_dict={ API_V1_1: , API_V2_0: }, options={: market, : market} if market else None, protection=PROTECTION_PRV)
Get all orders that you currently have opened. A specific market can be requested. Endpoint: 1.1 /market/getopenorders 2.0 /key/market/getopenorders :param market: String literal for the market (ie. BTC-LTC) :type market: str :return: Open orders info in JSON :rtype : dict
#vtb def create_markdown_cell(block): kwargs = {: block[], : block[]} markdown_cell = nbbase.new_markdown_cell(**kwargs) return markdown_cell
Create a markdown cell from a block.
#vtb def clusterQueues(self): servers = yield self.getClusterServers() queues = {} for sname in servers: qs = yield self.get( % sname) uuid = yield self.get( % sname) qs = json.loads(qs) for q in qs: if q not in queues: queues[q] = [] queues[q].append({: sname, : uuid}) defer.returnValue(queues)
Return a dict of queues in cluster and servers running them
#vtb def parse_from_array(arr): syn_set = SynonymSet() for synonyms in arr: _set = set() for synonym in synonyms: _set.add(synonym) syn_set.add_set(_set) return syn_set
Parse 2d array into synonym set Every array inside arr is considered a set of synonyms
#vtb def fetch_all_messages(self, conn, directory, readonly): conn.select(directory, readonly) message_data = [] typ, data = conn.search(None, ) for num in data[0].split(): typ, data = conn.fetch(num, ) for response_part in data: if isinstance(response_part, tuple): email_parser = email.parser.BytesFeedParser() email_parser.feed(response_part[1]) msg = email_parser.close() body = self.get_body(msg) subject = self.get_subject(msg) message_data.append((subject, body)) return message_data
Fetches all messages at @conn from @directory. Params: conn IMAP4_SSL connection directory The IMAP directory to look for readonly readonly mode, true or false Returns: List of subject-body tuples
#vtb def save_files(self, nodes): metrics = {"Opened": 0, "Cached": 0} for node in nodes: file = node.file if self.__container.get_editor(file): if self.__container.save_file(file): metrics["Opened"] += 1 self.__uncache(file) else: cache_data = self.__files_cache.get_content(file) if cache_data is None: LOGGER.warning( "!> {0} | file doesn{1}t exists in files cache!".format(self.__class__.__name__, file)) self.__container.engine.notifications_manager.notify( "{0} | opened file(s) and cached file(s) saved!".format(self.__class__.__name__, metrics["Opened"], metrics["Cached"]))
Saves user defined files using give nodes. :param nodes: Nodes. :type nodes: list :return: Method success. :rtype: bool
#vtb def ext_pillar(minion_id, pillar, **kwargs): filter_out_source_path_option(kwargs) set_inventory_base_uri_default(__opts__, kwargs) return reclass_ext_pillar(minion_id, pillar, **kwargs) except TypeError as e: if in six.text_type(e): arg = six.text_type(e).split()[-1] raise SaltInvocationError( + arg) else: raise except KeyError as e: if in six.text_type(e): raise SaltInvocationError( ) else: raise except ReclassException as e: raise SaltInvocationError(.format(e))
Obtain the Pillar data from **reclass** for the given ``minion_id``.
#vtb def namedbuffer(buffer_name, fields_spec): if not len(buffer_name): raise ValueError() if not len(fields_spec): raise ValueError() fields = [ field for field in fields_spec if not isinstance(field, Pad) ] if any(field.size_bytes < 0 for field in fields): raise ValueError() if any(len(field.name) < 0 for field in fields): raise ValueError() names_fields = { field.name: field for field in fields } if in names_fields: raise ValueError() if any(count > 1 for count in Counter(field.name for field in fields).values()): raise ValueError() fields_format = + .join(field.format_string for field in fields_spec) size = sum(field.size_bytes for field in fields_spec) names_slices = compute_slices(fields_spec) sorted_names = sorted(names_fields.keys()) @staticmethod def get_bytes_from(buffer_, name): slice_ = names_slices[name] return buffer_[slice_] def __init__(self, data): if len(data) < size: raise InvalidProtocolMessage( .format(size), ) object.__setattr__(self, , data) def __getattribute__(self, name): if name in names_slices: slice_ = names_slices[name] field = names_fields[name] data = object.__getattribute__(self, ) value = data[slice_] if field.encoder: value = field.encoder.decode(value) return value if name == : return object.__getattribute__(self, ) raise AttributeError def __setattr__(self, name, value): if name in names_slices: slice_ = names_slices[name] field = names_fields[name] if field.encoder: field.encoder.validate(value) value = field.encoder.encode(value, field.size_bytes) length = len(value) if length > field.size_bytes: msg = .format( length=length, attr=name, ) raise ValueError(msg) elif length < field.size_bytes: pad_size = field.size_bytes - length pad_value = b * pad_size value = pad_value + value data = object.__getattribute__(self, ) if isinstance(value, str): value = value.encode() data[slice_] = value else: super(self.__class__, self).__setattr__(name, value) def __repr__(self): return .format(buffer_name) def __len__(self): return size def __dir__(self): return sorted_names attributes = { : __init__, : (,), : __getattribute__, : __setattr__, : __repr__, : __len__, : __dir__, : fields_spec, : fields_format, : size, : get_bytes_from, } return type(buffer_name, (), attributes)
Class factory, returns a class to wrap a buffer instance and expose the data as fields. The field spec specifies how many bytes should be used for a field and what is the encoding / decoding function.
#vtb def input_yn(conf_mess): ui_erase_ln() ui_print(conf_mess) with term.cbreak(): input_flush() val = input_by_key() return bool(val.lower() == )
Print Confirmation Message and Get Y/N response from user.
#vtb def get_domain(self): if self.domain is None: return np.array([self.points.min(axis=0), self.points.max(axis=0)]) return self.domain
:returns: opposite vertices of the bounding prism for this object. :rtype: ndarray([min], [max])
#vtb def check_query(state, query, error_msg=None, expand_msg=None): if error_msg is None: error_msg = "Running `{{query}}` after your submission generated an error." if expand_msg is None: expand_msg = "The autograder verified the result of running `{{query}}` against the database. " msg_kwargs = {"query": query} has_no_error(state) _msg = state.build_message(error_msg, fmt_kwargs=msg_kwargs) with dbconn(state.solution_conn) as conn: _ = runQuery(conn, state.solution_code) sol_res = runQuery(conn, query) if sol_res is None: raise ValueError("Solution failed: " + _msg) with dbconn(state.student_conn) as conn: _ = runQuery(conn, state.student_code) stu_res = runQuery(conn, query) if stu_res is None: state.do_test(_msg) return state.to_child( append_message={"msg": expand_msg, "kwargs": msg_kwargs}, student_result=stu_res, solution_result=sol_res, )
Run arbitrary queries against to the DB connection to verify the database state. For queries that do not return any output (INSERTs, UPDATEs, ...), you cannot use functions like ``check_col()`` and ``is_equal()`` to verify the query result. ``check_query()`` will rerun the solution query in the transaction prepared by sqlbackend, and immediately afterwards run the query specified in ``query``. Next, it will also run this query after rerunning the student query in a transaction. Finally, it produces a child state with these results, that you can then chain off of with functions like ``check_column()`` and ``has_equal_value()``. Args: query: A SQL query as a string that is executed after the student query is re-executed. error_msg: if specified, this overrides the automatically generated feedback message in case the query generated an error. expand_msg: if specified, this overrides the automatically generated feedback message that is prepended to feedback messages that are thrown further in the SCT chain. :Example: Suppose we are checking whether an INSERT happened correctly: :: INSERT INTO company VALUES (2, 'filip', 28, 'sql-lane', 42) We can write the following SCT: :: Ex().check_query('SELECT COUNT(*) AS c FROM company').has_equal_value()
#vtb def fixed_string(self, data=None): old = self.fixed if data != None: new = self._decode_input_string(data) if len(new) <= 16: self.fixed = new else: raise yubico_exception.InputError() return old
The fixed string is used to identify a particular Yubikey device. The fixed string is referred to as the 'Token Identifier' in OATH-HOTP mode. The length of the fixed string can be set between 0 and 16 bytes. Tip: This can also be used to extend the length of a static password.
#vtb def path_regex(self): try: path = % urljoin(self.monthly_build_list_regex, self.builds[self.build_index]) if self.application in APPLICATIONS_MULTI_LOCALE \ and self.locale != : path = % urljoin(path, self.locale) return path except Exception: folder = urljoin(self.base_url, self.monthly_build_list_regex) raise errors.NotFoundError("Specified sub folder cannot be found", folder)
Return the regex for the path to the build folder.
#vtb def get_F_y(fname=, y=[]): f = open(fname,) data = json.load(f) f.close() occurr = [] for cell_type in y: occurr += [data[][cell_type][]] return list(np.array(occurr)/np.sum(occurr))
Extract frequency of occurrences of those cell types that are modeled. The data set contains cell types that are not modeled (TCs etc.) The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1
#vtb def open_package(locals=None, dr=None): if locals is None: locals = caller_locals() try: return op(locals[]) except KeyError: package_name = None build_package_dir = None source_package = None if dr is None: dr = getcwd() for i, e in enumerate(walk_up(dr)): intr = set([DEFAULT_METATAB_FILE, LINES_METATAB_FILE, IPYNB_METATAB_FILE]) & set(e[2]) if intr: source_package = join(e[0], list(intr)[0]) p = op(source_package) package_name = p.find_first_value("Root.Name") if not package_name: raise PackageError("Source package in {} does not have root.Name term".format(e[0])) if PACKAGE_PREFIX in e[1]: build_package_dir = join(e[0], PACKAGE_PREFIX) break if i > 2: break if build_package_dir and package_name and exists(join(build_package_dir, package_name)): built_package = join(build_package_dir, package_name) try: return op(built_package) except RowGeneratorError as e: pass if source_package: return op(source_package) raise PackageError("Failed to find package, either in locals() or above dir ".format(dr))
Try to open a package with the metatab_doc variable, which is set when a Notebook is run as a resource. If that does not exist, try the local _packages directory
#vtb def custom_callback(self, view_func): @wraps(view_func) def decorated(*args, **kwargs): plainreturn, data = self._process_callback() if plainreturn: return data else: return view_func(data, *args, **kwargs) self._custom_callback = decorated return decorated
Wrapper function to use a custom callback. The custom OIDC callback will get the custom state field passed in with redirect_to_auth_server.
#vtb def save_data_files(vr, bs, prefix=None, directory=None): filename = .format(prefix) if prefix else directory = directory if directory else filename = os.path.join(directory, filename) if bs.is_metal(): zero = vr.efermi else: zero = bs.get_vbm()[] with open(filename, ) as f: header = f.write(header) for band in bs.bands[Spin.up]: for d, e in zip(bs.distance, band): f.write(.format(d, e - zero)) f.write() if bs.is_spin_polarized: for band in bs.bands[Spin.down]: for d, e in zip(bs.distance, band): f.write(.format(d, e - zero)) f.write() return filename
Write the band structure data files to disk. Args: vs (`Vasprun`): Pymatgen `Vasprun` object. bs (`BandStructureSymmLine`): Calculated band structure. prefix (`str`, optional): Prefix for data file. directory (`str`, optional): Directory in which to save the data. Returns: The filename of the written data file.
#vtb def plot_rebit_prior(prior, rebit_axes=REBIT_AXES, n_samples=2000, true_state=None, true_size=250, force_mean=None, legend=True, mean_color_index=2 ): pallette = plt.rcParams[] plot_rebit_modelparams(prior.sample(n_samples), c=pallette[0], label=, rebit_axes=rebit_axes ) if true_state is not None: plot_rebit_modelparams(true_state, c=pallette[1], label=, marker=, s=true_size, rebit_axes=rebit_axes ) if hasattr(prior, ) or force_mean is not None: mean = force_mean if force_mean is not None else prior._mean plot_rebit_modelparams( prior._basis.state_to_modelparams(mean)[None, :], edgecolors=pallette[mean_color_index], s=250, facecolors=, linewidth=3, label=, rebit_axes=rebit_axes ) plot_decorate_rebits(prior.basis, rebit_axes=rebit_axes ) if legend: plt.legend(loc=, ncol=3, scatterpoints=1)
Plots rebit states drawn from a given prior. :param qinfer.tomography.DensityOperatorDistribution prior: Distribution over rebit states to plot. :param list rebit_axes: List containing indices for the :math:`x` and :math:`z` axes. :param int n_samples: Number of samples to draw from the prior. :param np.ndarray true_state: State to be plotted as a "true" state for comparison.
#vtb def nearest_overlap(self, overlap, bins): bins_overlap = overlap * bins if bins_overlap % 2 != 0: bins_overlap = math.ceil(bins_overlap / 2) * 2 overlap = bins_overlap / bins logger.warning( .format(overlap)) return overlap
Return nearest overlap/crop factor based on number of bins
#vtb def ft2file(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy[] = kwargs.get( , self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft2file_format.format(**kwargs_copy) if kwargs.get(, False): return self.fullpath(localpath=localpath) return localpath
return the name of the input ft2 file list
#vtb def _get_parseable_methods(cls): _LOG.debug("Retrieving parseable methods for ", cls.__name__) init_parser = None methods_to_parse = {} for name, obj in vars(cls).items(): if name == "__init__": init_parser = obj.parser else: methods_to_parse[obj.__name__] = obj.parser return (init_parser, methods_to_parse)
Return all methods of cls that are parseable i.e. have been decorated by '@create_parser'. Args: cls: the class currently being decorated Note: classmethods will not be included as they can only be referenced once the class has been defined Returns: a 2-tuple with the parser of the __init__ method if any and a dict of the form {'method_name': associated_parser}
#vtb def p_namespace(self, p): if p[1] == : doc = None if len(p) > 4: doc = p[5] p[0] = AstNamespace( self.path, p.lineno(1), p.lexpos(1), p[2], doc) else: raise ValueError()
namespace : KEYWORD ID NL | KEYWORD ID NL INDENT docsection DEDENT
#vtb def write_block_data(self, address, register, value): return self.smbus.write_block_data(address, register, value)
SMBus Block Write: i2c_smbus_write_block_data() ================================================ The opposite of the Block Read command, this writes up to 32 bytes to a device, to a designated register that is specified through the Comm byte. The amount of data is specified in the Count byte. S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA
#vtb def generate(self): part = creator.Particle( [random.uniform(-1, 1) for _ in range(len(self._params[]))]) part.speed = [ random.uniform(-self._params[], self._params[]) for _ in range(len(self._params[]))] part.smin = -self._params[] part.smax = self._params[] part.ident = None part.neighbours = None return part
Generates a particle using the creator function. Notes ----- Position and speed are uniformly randomly seeded within allowed bounds. The particle also has speed limit settings taken from global values. Returns ------- particle object
#vtb def _recurse(data, obj): content = _ContentManager() for child in obj.get_children(): if isinstance(child, mpl.spines.Spine): continue if isinstance(child, mpl.axes.Axes): ax = axes.Axes(data, child) if ax.is_colorbar: continue if data["extra axis options [base]"]: ax.axis_options.extend(data["extra axis options [base]"]) data["current mpl axes obj"] = child data["current axes"] = ax data, children_content = _recurse(data, child) if data["add axis environment"]: content.extend( ax.get_begin_code() + children_content + [ax.get_end_code(data)], 0 ) else: content.extend(children_content, 0) if data["show_info"]: print("=========================================================") print("These would have been the properties of the environment:") print("".join(ax.get_begin_code()[1:])) print("=========================================================") elif isinstance(child, mpl.lines.Line2D): data, cont = line2d.draw_line2d(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.image.AxesImage): data, cont = img.draw_image(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.patches.Patch): data, cont = patch.draw_patch(data, child) content.extend(cont, child.get_zorder()) elif isinstance( child, (mpl.collections.PatchCollection, mpl.collections.PolyCollection) ): data, cont = patch.draw_patchcollection(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.collections.PathCollection): data, cont = path.draw_pathcollection(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.collections.LineCollection): data, cont = line2d.draw_linecollection(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.collections.QuadMesh): data, cont = qmsh.draw_quadmesh(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, mpl.legend.Legend): data = legend.draw_legend(data, child) if data["legend colors"]: content.extend(data["legend colors"], 0) elif isinstance(child, (mpl.text.Text, mpl.text.Annotation)): data, cont = text.draw_text(data, child) content.extend(cont, child.get_zorder()) elif isinstance(child, (mpl.axis.XAxis, mpl.axis.YAxis)): pass else: warnings.warn( "matplotlib2tikz: Don't know how to handle object {}.".format( type(child) ) ) return data, content.flatten()
Iterates over all children of the current object, gathers the contents contributing to the resulting PGFPlots file, and returns those.
#vtb def is_in_range(self, values, unit=None, raise_exception=True): self._is_numeric(values) if unit is None or unit == self.units[0]: minimum = self.min maximum = self.max else: namespace = {: self} self.is_unit_acceptable(unit, True) min_statement = "self._{}_to_{}(self.min)".format( self._clean(self.units[0]), self._clean(unit)) max_statement = "self._{}_to_{}(self.max)".format( self._clean(self.units[0]), self._clean(unit)) minimum = eval(min_statement, namespace) maximum = eval(max_statement, namespace) for value in values: if value < minimum or value > maximum: if not raise_exception: return False else: raise ValueError( .format( self.__class__.__name__, self.min, self.max, value ) ) return True
Check if a list of values is within physically/mathematically possible range. Args: values: A list of values. unit: The unit of the values. If not specified, the default metric unit will be assumed. raise_exception: Set to True to raise an exception if not in range.
#vtb def user( state, host, name, present=True, home=None, shell=None, group=None, groups=None, public_keys=None, delete_keys=False, ensure_home=True, system=False, uid=None, ): users = host.fact.users or {} user = users.get(name) if groups is None: groups = [] if home is None: home = .format(name) if not present: if user: yield .format(name) return )) yield files.put( state, host, keys_file, filename, user=name, group=name, mode=600, ) else: yield files.file( state, host, filename, user=name, group=name, mode=600, ) for key in public_keys: yield files.line( state, host, filename, key, )
Add/remove/update system users & their ssh `authorized_keys`. + name: name of the user to ensure + present: whether this user should exist + home: the users home directory + shell: the users shell + group: the users primary group + groups: the users secondary groups + public_keys: list of public keys to attach to this user, ``home`` must be specified + delete_keys: whether to remove any keys not specified in ``public_keys`` + ensure_home: whether to ensure the ``home`` directory exists + system: whether to create a system account Home directory: When ``ensure_home`` or ``public_keys`` are provided, ``home`` defaults to ``/home/{name}``.
#vtb def factory(cls, registry): cls_name = str(cls.__name__) MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry}) return MyMetricsHandler
Returns a dynamic MetricsHandler class tied to the passed registry.
#vtb def _reorderForPreference(themeList, preferredThemeName): for theme in themeList: if preferredThemeName == theme.themeName: themeList.remove(theme) themeList.insert(0, theme) return
Re-order the input themeList according to the preferred theme. Returns None.
#vtb def page_sequence(n_sheets: int, one_based: bool = True) -> List[int]: n_pages = calc_n_virtual_pages(n_sheets) assert n_pages % 4 == 0 half_n_pages = n_pages // 2 firsthalf = list(range(half_n_pages)) secondhalf = list(reversed(range(half_n_pages, n_pages))) sequence = [] top = True for left, right in zip(secondhalf, firsthalf): if not top: left, right = right, left sequence += [left, right] top = not top if one_based: sequence = [x + 1 for x in sequence] log.debug("{} sheets => page sequence {!r}", n_sheets, sequence) return sequence
Generates the final page sequence from the starting number of sheets.