code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def create_key(key_name, save_path, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: key = conn.create_key_pair(key_name) log.debug("the key to return is : %s", key) key.save(save_path) return key.material except boto.exception.BotoServerError as e: log.debug(e) return False
Creates a key and saves it to a given path. Returns the private key. CLI Example: .. code-block:: bash salt myminion boto_ec2.create_key mykey /root/
def save(self, designName=""): self.try_stateful_function( ss.SAVING, ss.READY, self.do_save, designName)
Save the current design to file
def registerInternalRouters(app): urlPath = CONF.get("endpointRoot", "flask-profiler") fp = Blueprint( , __name__, url_prefix="/" + urlPath, static_folder="static/dist/", static_url_path=) @fp.route("/".format(urlPath)) @auth.login_required def index(): return fp.send_static_file("index.html") @fp.route("/api/measurements/".format(urlPath)) @auth.login_required def filterMeasurements(): args = dict(request.args.items()) measurements = collection.filter(args) return jsonify({"measurements": list(measurements)}) @fp.route("/api/measurements/grouped".format(urlPath)) @auth.login_required def getMeasurementsSummary(): args = dict(request.args.items()) measurements = collection.getSummary(args) return jsonify({"measurements": list(measurements)}) @fp.route("/api/measurements/<measurementId>".format(urlPath)) @auth.login_required def getContext(measurementId): return jsonify(collection.get(measurementId)) @fp.route("/api/measurements/timeseries/".format(urlPath)) @auth.login_required def getRequestsTimeseries(): args = dict(request.args.items()) return jsonify({"series": collection.getTimeseries(args)}) @fp.route("/api/measurements/methodDistribution/".format(urlPath)) @auth.login_required def getMethodDistribution(): args = dict(request.args.items()) return jsonify({ "distribution": collection.getMethodDistribution(args)}) @fp.route("/db/dumpDatabase") @auth.login_required def dumpDatabase(): response = jsonify({ "summary": collection.getSummary()}) response.headers["Content-Disposition"] = "attachment; filename=dump.json" return response @fp.route("/db/deleteDatabase") @auth.login_required def deleteDatabase(): response = jsonify({ "status": collection.truncate()}) return response @fp.after_request def x_robots_tag_header(response): response.headers[] = return response app.register_blueprint(fp)
These are the endpoints which are used to display measurements in the flask-profiler dashboard. Note: these should be defined after wrapping user defined endpoints via wrapAppEndpoints() :param app: Flask application instance :return:
def _parse_pool_transaction_file( ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators, ledger_size=None): for _, txn in ledger.getAllTxn(to=ledger_size): if get_type(txn) == NODE: txn_data = get_payload_data(txn) nodeName = txn_data[DATA][ALIAS] clientStackName = nodeName + CLIENT_STACK_SUFFIX nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \ if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \ else None cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \ if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \ else None if nHa: nodeReg[nodeName] = HA(*nHa) if cHa: cliNodeReg[clientStackName] = HA(*cHa) try: key_type = verkey = cryptonymToHex(str(txn_data[TARGET_NYM])) key_type = cryptonymToHex(get_from(txn)) except ValueError: logger.exception( .format(key_type)) exit(.format(key_type)) nodeKeys[nodeName] = verkey services = txn_data[DATA].get(SERVICES) if isinstance(services, list): if VALIDATOR in services: activeValidators.add(nodeName) else: activeValidators.discard(nodeName)
helper function for parseLedgerForHaAndKeys
def RunValidationFromOptions(feed, options): if options.performance: return ProfileRunValidationOutputFromOptions(feed, options) else: return RunValidationOutputFromOptions(feed, options)
Validate feed, run in profiler if in options, and return an exit code.
def discard(self, changeset_id: uuid.UUID) -> None: self._validate_changeset(changeset_id) self.journal.pop_changeset(changeset_id)
Throws away all journaled data starting at the given changeset
def textalign(text, maxlength, align=): if align == : return text elif align == or align == : spaces = * (int((maxlength - len(text)) / 2)) elif align == : spaces = (maxlength - len(text)) else: raise ValueError("Invalid alignment specified.") return spaces + text
Align Text When Given Full Length
def integrate(self,t,pot,method=,dt=None): if hasattr(self,): delattr(self,) if hasattr(self,): delattr(self,) thispot= RZToplanarPotential(pot) self.t= nu.array(t) self._pot= thispot self.orbit, msg= _integrateROrbit(self.vxvv,thispot,t,method,dt) return msg
NAME: integrate PURPOSE: integrate the orbit INPUT: t - list of times at which to output (0 has to be in this!) pot - potential instance or list of instances method= 'odeint' for scipy's odeint 'leapfrog' for a simple leapfrog implementation 'leapfrog_c' for a simple leapfrog implementation in C 'rk4_c' for a 4th-order Runge-Kutta integrator in C 'rk6_c' for a 6-th order Runge-Kutta integrator in C 'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest) dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: error message number (get the actual orbit using getOrbit() HISTORY: 2010-07-20
def INDEX_OF_CP(string_expression, substring_expression, start=None, end=None): res = [string_expression, substring_expression] if start is not None: res.append(start) if end is not None: res.append(end) return {: res}
Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence. If the substring is not found, returns -1. https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/ for more details :param string_expression: The string or expression of string :param substring_expression: The string or expression of substring :param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search. :param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search. :return: Aggregation operator
def getFieldsForActiveJobsOfType(self, jobType, fields=[]): dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = .join([] + dbFields) with ConnectionFactory.get() as conn: query = \ \ \ \ \ % (dbFieldsStr, self.jobsTableName, self.modelsTableName) conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType]) return conn.cursor.fetchall()
Helper function for querying the models table including relevant job info where the job type matches the specified jobType. Only records for which there is a matching jobId in both tables is returned, and only the requested fields are returned in each result, assuming that there is not a conflict. This function is useful, for example, in querying a cluster for a list of actively running production models (according to the state of the client jobs database). jobType must be one of the JOB_TYPE_XXXX enumerations. Parameters: ---------------------------------------------------------------- jobType: jobType enum fields: list of fields to return Returns: List of tuples containing the jobId and requested field values
def _is_updated(old_conf, new_conf): changed = {} new_conf = _json_to_unicode(salt.utils.json.loads( salt.utils.json.dumps(new_conf, ensure_ascii=False))) old_conf = salt.utils.json.loads(salt.utils.json.dumps(old_conf, ensure_ascii=False)) for key, value in old_conf.items(): oldval = six.text_type(value).lower() if key in new_conf: newval = six.text_type(new_conf[key]).lower() if oldval == or oldval == : oldval = if key in new_conf and newval != oldval: changed[key] = {: oldval, : newval} return changed
Compare the API results to the current statefile data
def field_value(self, value): if not self.is_array: return self.field_type(value) if isinstance(value, (list, tuple, set)): return [self.field_type(item) for item in value] return self.field_type(value)
Validate against NodeType.
def get_monitor_value(self): "Pick the monitored value." if self.monitor== and len(self.learn.recorder.losses) == 0: return None elif len(self.learn.recorder.val_losses) == 0: return None values = {:self.learn.recorder.losses[-1].cpu().numpy(), :self.learn.recorder.val_losses[-1]} if values[] is None: return if self.learn.recorder.metrics: for m, n in zip(self.learn.recorder.metrics[-1],self.learn.recorder.names[3:-1]): values[n] = m if values.get(self.monitor) is None: warn(f) return values.get(self.monitor)
Pick the monitored value.
def ndcg(truth, recommend, k=None): if k is None: k = len(recommend) def idcg(n_possible_truth): res = 0. for n in range(n_possible_truth): res += 1. / np.log2(n + 2) return res dcg = 0. for n, r in enumerate(recommend[:k]): if r not in truth: continue dcg += 1. / np.log2(n + 2) res_idcg = idcg(np.min([truth.size, k])) if res_idcg == 0.: return 0. return dcg / res_idcg
Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG.
def bind(self, source, dest, destmeth): self.connect(source, getattr(dest, destmeth))
Guild compatible version of :py:meth:`connect`. This allows Pyctools compound components to be used in `Guild <https://github.com/sparkslabs/guild>`_ pipelines.
def will_set(self, topic, payload=None, qos=0, retain=False): if topic is None or len(topic) == 0: raise ValueError() if qos<0 or qos>2: raise ValueError() if isinstance(payload, str): self._will_payload = payload.encode() elif isinstance(payload, bytearray): self._will_payload = payload elif isinstance(payload, int) or isinstance(payload, float): self._will_payload = str(payload) elif payload is None: self._will_payload = None else: raise TypeError() self._will = True self._will_topic = topic.encode() self._will_qos = qos self._will_retain = retain
Set a Will to be sent by the broker in case the client disconnects unexpectedly. This must be called before connect() to have any effect. topic: The topic that the will message should be published on. payload: The message to send as a will. If not given, or set to None a zero length message will be used as the will. Passing an int or float will result in the payload being converted to a string representing that number. If you wish to send a true int/float, use struct.pack() to create the payload you require. qos: The quality of service level to use for the will. retain: If set to true, the will message will be set as the "last known good"/retained message for the topic. Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has zero string length.
def Close(self): if self.locked and self.CheckLease() == 0: raise LockError("Can not update lease that has already expired.") self._WriteAttributes() if self.locked: self.transaction.Release() if self.parent: self.parent.Close() self.mode = ""
Close and destroy the object. This is similar to Flush, but does not maintain object validity. Hence the object should not be interacted with after Close(). Raises: LockError: The lease for this object has expired.
def rle_decode(mask_rle:str, shape:Tuple[int,int])->NPArrayMask: "Return an image array from run-length encoded string `mask_rle` with `shape`." s = mask_rle.split() starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])] starts -= 1 ends = starts + lengths img = np.zeros(shape[0]*shape[1], dtype=np.uint) for low, up in zip(starts, ends): img[low:up] = 1 return img.reshape(shape)
Return an image array from run-length encoded string `mask_rle` with `shape`.
def retired(self): def gen(): import csv import re from datetime import datetime from pkg_resources import resource_filename with open(resource_filename(__package__, )) as rf: rtd = list(csv.reader(rf, delimiter=))[1:] rc = [r[0] for r in rtd] for i, _, _, m, s, d in rtd: d = datetime.strptime(d, ) if not m: m = re.findall(, s) if m: m = [m] if isinstance(m, str) else m yield i, (d, [self.get(part3=x) for x in m if x not in rc], s) else: yield i, (d, [], s) yield , self.get(part3=) return dict(gen())
Function for generating retired languages. Returns a dict('code', (datetime, [language, ...], 'description')).
def add_mvn(self, name, input_name, output_name, across_channels = True, normalize_variance = True, epsilon = 1e-5): spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.mvn spec_layer_params.acrossChannels = across_channels spec_layer_params.normalizeVariance = normalize_variance spec_layer_params.epsilon = epsilon
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. across_channels: boolean If False, each channel plane is normalized separately If True, mean/variance is computed across all C, H and W dimensions normalize_variance: boolean If False, only mean subtraction is performed. epsilon: float small bias to avoid division by zero. See Also -------- add_l2_normalize, add_lrn
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel): time = self.timefunc() + delay return self.enterabs(time, priority, action, argument, kwargs)
A variant that specifies the time as a relative time. This is actually the more commonly used interface.
def _alignment(elist, flist, e2f, f2e): neighboring = {(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)} e2f = set(e2f) f2e = set(f2e) m = len(elist) n = len(flist) alignment = e2f.intersection(f2e) while True: set_len = len(alignment) for e_word in range(1, m+1): for f_word in range(1, n+1): if (e_word, f_word) in alignment: for (e_diff, f_diff) in neighboring: e_new = e_word + e_diff f_new = f_word + f_diff if not alignment: if (e_new, f_new) in e2f.union(f2e): alignment.add((e_new, f_new)) else: if ((e_new not in list(zip(*alignment))[0] or f_new not in list(zip(*alignment))[1]) and (e_new, f_new) in e2f.union(f2e)): alignment.add((e_new, f_new)) if set_len == len(alignment): break for e_word in range(1, m+1): for f_word in range(1, n+1): if not alignment: if (e_word, f_word) in e2f.union(f2e): alignment.add((e_word, f_word)) else: if ((e_word not in list(zip(*alignment))[0] or f_word not in list(zip(*alignment))[1]) and (e_word, f_word) in e2f.union(f2e)): alignment.add((e_word, f_word)) return alignment
elist, flist wordlist for each language e2f translatoin alignment from e to f alignment is [(e, f)] f2e translatoin alignment from f to e alignment is [(e, f)] return alignment: {(f, e)} flist ----------------- e | | l | | i | | s | | t | | -----------------
def fix_header(pofile): ( , ), ( , ), (, ), ) for src, dest in fixes: header = header.replace(src, dest) pofile.header = header
Replace default headers with edX headers
def isclose(a, b, rel_tol=1e-10, abs_tol=0.0): return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
Compares two parameter values. :param a: First parameter :param b: Second parameter :param rel_tol: Relative tolerance :param abs_tol: Absolute tolerance :return: Boolean telling whether or not the parameters are close enough to be the same
def create_widget(self): d = self.declaration style = d.style or self.window = PopupWindow(self.get_context(), None, 0, style) self.showing = False
Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent.
def revoke_role(self, role_name, principal_name, principal_type): self.send_revoke_role(role_name, principal_name, principal_type) return self.recv_revoke_role()
Parameters: - role_name - principal_name - principal_type
def get_default_config(self): config = super(EndecaDgraphCollector, self).get_default_config() config.update({ : , : , : 8080, : 1, }) return config
Returns the default collector settings
def get_registry_value(key, default=None): registry = queryUtility(IRegistry) return registry.get(key, default)
Gets the utility for IRegistry and returns the value for the key passed in. If there is no value for the key passed in, returns default value :param key: the key in the registry to look for :param default: default value if the key is not registered :return: value in the registry for the key passed in
def parse_size_name(type_name): if in type_name: raise ArgumentError("There should not be a space in config variable type specifier", specifier=type_name) variable = False count = 1 base_type = type_name if type_name[-1] == : variable = True start_index = type_name.find() if start_index == -1: raise ArgumentError("Could not find matching [ for ] character", specifier=type_name) count = int(type_name[start_index+1:-1], 0) base_type = type_name[:start_index] matched_type = TYPE_CODES.get(base_type) if matched_type is None: raise ArgumentError("Could not find base type name", base_type=base_type, type_string=type_name) base_size = struct.calcsize("<%s" % matched_type) total_size = base_size*count return total_size, base_size, matched_type, variable
Calculate size and encoding from a type name. This method takes a C-style type string like uint8_t[10] and returns - the total size in bytes - the unit size of each member (if it's an array) - the scruct.{pack,unpack} format code for decoding the base type - whether it is an array.
def encrypt_to_file(contents, filename): if not filename.endswith(): raise ValueError("%s does not end with .enc" % filename) key = Fernet.generate_key() fer = Fernet(key) encrypted_file = fer.encrypt(contents) with open(filename, ) as f: f.write(encrypted_file) return key
Encrypts ``contents`` and writes it to ``filename``. ``contents`` should be a bytes string. ``filename`` should end with ``.enc``. Returns the secret key used for the encryption. Decrypt the file with :func:`doctr.travis.decrypt_file`.
def _call_yum(args, **kwargs): params = {: , : False, : salt.utils.environment.get_module_environment(globals())} params.update(kwargs) cmd = [] if salt.utils.systemd.has_scope(__context__) and __salt__[](, True): cmd.extend([, ]) cmd.append(_yum()) cmd.extend(args) return __salt__[](cmd, **params)
Call yum/dnf.
def filter_data(self, min_len, max_len): logging.info(f) initial_len = len(self.src) filtered_src = [] for src in self.src: if min_len <= len(src) <= max_len: filtered_src.append(src) self.src = filtered_src filtered_len = len(self.src) logging.info(f)
Preserves only samples which satisfy the following inequality: min_len <= sample sequence length <= max_len :param min_len: minimum sequence length :param max_len: maximum sequence length
def clone(id, path): data_source = get_data_object(id, use_data_config=False) if not data_source: if in id: floyd_logger.info("Note: You cannot clone the output of a running job. You need to wait for it to finish.") sys.exit() if path: if in id: resource_type = resource_id = data_source.id else: resource_type = try: experiment = ExperimentClient().get(normalize_job_name(id, use_config=False)) except FloydException: experiment = ExperimentClient().get(id) resource_id = experiment.id data_url = "{}/api/v1/download/artifacts/{}/{}?is_dir=true&path={}".format(floyd.floyd_host, resource_type, resource_id, path) else: data_url = "{}/api/v1/resources/{}?content=true&download=true".format(floyd.floyd_host, data_source.resource_id) DataClient().download_tar(url=data_url, untar=True, delete_after_untar=True)
- Download all files in a dataset or from a Job output Eg: alice/projects/mnist/1/files, alice/projects/mnist/1/output or alice/dataset/mnist-data/1/ Using /output will download the files that are saved at the end of the job. Note: This will download the files that are saved at the end of the job. - Download a directory from a dataset or from Job output Specify the path to a directory and download all its files and subdirectories. Eg: --path models/checkpoint1
def remove_useless(self): if not self.is_contextfree(): raise ValueError("grammar must be context-free") by_lhs = collections.defaultdict(list) by_rhs = collections.defaultdict(list) for [lhs], rhs in self.rules: by_lhs[lhs].append((lhs, rhs)) for y in rhs: if y in self.nonterminals: by_rhs[y].append((lhs, rhs)) agenda = collections.deque([self.start]) reachable = set() while len(agenda) > 0: x = agenda.popleft() if x in reachable: continue reachable.add(x) for _, rhs in by_lhs[x]: for y in rhs: if y in by_lhs: agenda.append(y) agenda = collections.deque() productive = set() for [lhs], rhs in self.rules: if all(y not in self.nonterminals for y in rhs): agenda.append(lhs) while len(agenda) > 0: y = agenda.popleft() if y in productive: continue productive.add(y) for lhs, rhs in by_rhs[y]: if all(y not in self.nonterminals or y in productive for y in rhs): agenda.append(lhs) g = Grammar() g.set_start(self.start) for [lhs], rhs in self.rules: if (lhs in reachable & productive and all(y not in self.nonterminals or y in reachable & productive for y in rhs)): g.add_rule([lhs], rhs) return g
Returns a new grammar containing just useful rules.
def load_project(cls, fname, auto_update=None, make_plot=True, draw=False, alternative_axes=None, main=False, encoding=None, enable_post=False, new_fig=True, clear=None, **kwargs): from pkg_resources import iter_entry_points def get_ax_base(name, alternatives): ax_base = next(iter(obj(arr_name=name).axes), None) if ax_base is None: ax_base = next(iter(obj(arr_name=alternatives).axes), None) if ax_base is not None: alternatives.difference_update(obj(ax=ax_base).arr_names) return ax_base pwd = kwargs.pop(, None) if isinstance(fname, six.string_types): with open(fname, ) as f: pickle_kws = {} if not encoding else {: encoding} d = pickle.load(f, **pickle_kws) pwd = pwd or os.path.dirname(fname) else: d = dict(fname) pwd = pwd or getcwd() for ep in iter_entry_points(, name=): patches = ep.load() for arr_d in d.get().values(): plotter_cls = arr_d.get(, {}).get() if plotter_cls is not None and plotter_cls in patches: patches[plotter_cls](arr_d[], d.get(, {})) fig_map = {} if alternative_axes is None: for fig_dict in six.itervalues(d.get(, {})): orig_num = fig_dict.get() or 1 fig_map[orig_num] = _ProjectLoader.load_figure( fig_dict, new_fig=new_fig).number elif not isinstance(alternative_axes, dict): alternative_axes = cycle(iter(alternative_axes)) obj = cls.from_dict(d[], pwd=pwd, **kwargs) if main: obj = project(None, obj) axes = {} arr_names = obj.arr_names sharex = defaultdict(set) sharey = defaultdict(set) for arr, (arr_name, arr_dict) in zip( obj, filter(lambda t: t[0] in arr_names, six.iteritems(d[]))): if not arr_dict.get(): continue plot_dict = arr_dict[] plotter_cls = getattr( import_module(plot_dict[][0]), plot_dict[][1]) ax = None if alternative_axes is not None: if isinstance(alternative_axes, dict): ax = alternative_axes.get(arr.arr_name) else: ax = next(alternative_axes, None) if ax is None and in plot_dict: already_opened = plot_dict[].get( , set()).intersection(axes) if already_opened: ax = axes[next(iter(already_opened))] else: plot_dict[].pop(, None) plot_dict[][] = fig_map[ plot_dict[].get() or 1] if plot_dict[].get(): sharex[plot_dict[].pop()].add( arr.psy.arr_name) if plot_dict[].get(): sharey[plot_dict[].pop()].add( arr.psy.arr_name) axes[arr.psy.arr_name] = ax = _ProjectLoader.load_axes( plot_dict[]) plotter_cls( arr, make_plot=False, draw=False, clear=False, ax=ax, project=obj.main, enable_post=enable_post, **plot_dict[]) for key, names in sharex.items(): ax_base = get_ax_base(key, names) if ax_base is not None: ax_base.get_shared_x_axes().join( ax_base, *obj(arr_name=names).axes) for ax in obj(arr_name=names).axes: ax._sharex = ax_base for key, names in sharey.items(): ax_base = get_ax_base(key, names) if ax_base is not None: ax_base.get_shared_y_axes().join( ax_base, *obj(arr_name=names).axes) for ax in obj(arr_name=names).axes: ax._sharey = ax_base for arr in obj.with_plotter: shared = d[][arr.psy.arr_name][].get(, {}) for key, arr_names in six.iteritems(shared): arr.psy.plotter.share(obj(arr_name=arr_names).plotters, keys=[key]) if make_plot: for plotter in obj.plotters: plotter.reinit( draw=False, clear=clear or ( clear is None and plotter_cls._get_sample_projection() is not None)) if draw is None: draw = rcParams[] if draw: obj.draw() if rcParams[]: obj.show() if auto_update is None: auto_update = rcParams[] if not main: obj._main = gcp(True) obj.main.extend(obj, new_name=True) obj.no_auto_update = not auto_update scp(obj) return obj
Load a project from a file or dict This classmethod allows to load a project that has been stored using the :meth:`save_project` method and reads all the data and creates the figures. Since the data is stored in external files when saving a project, make sure that the data is accessible under the relative paths as stored in the file `fname` or from the current working directory if `fname` is a dictionary. Alternatively use the `alternative_paths` parameter or the `pwd` parameter Parameters ---------- fname: str or dict The string might be the path to a file created with the :meth:`save_project` method, or it might be a dictionary from this method %(InteractiveBase.parameters.auto_update)s %(Project._add_data.parameters.make_plot)s %(InteractiveBase.start_update.parameters.draw)s alternative_axes: dict, None or list alternative axes instances to use - If it is None, the axes and figures from the saving point will be reproduced. - a dictionary should map from array names in the created project to matplotlib axes instances - a list should contain axes instances that will be used for iteration main: bool, optional If True, a new main project is created and returned. Otherwise (by default default) the data is added to the current main project. encoding: str The encoding to use for loading the project. If None, it is automatically determined by pickle. Note: Set this to ``'latin1'`` if using a project created with python2 on python3. enable_post: bool If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is enabled and post processing scripts are allowed. Do only set this parameter to ``True`` if you know you can trust the information in `fname` new_fig: bool If True (default) and `alternative_axes` is None, new figures are created if the figure already exists %(Project._add_data.parameters.clear)s pwd: str or None, optional Path to the working directory from where the data can be imported. If None and `fname` is the path to a file, `pwd` is set to the directory of this file. Otherwise the current working directory is used. %(ArrayList.from_dict.parameters.no_d|pwd)s Other Parameters ---------------- %(ArrayList.from_dict.parameters)s Returns ------- Project The project in state of the saving point
def turnstile_command(conf_file, command, arguments=[], channel=None, debug=False): conf = config.Config(conf_file=conf_file) db = conf.get_database() control_channel = conf[].get(, ) command = command.lower() ts_conv = False if command == : if arguments: channel = arguments[0] else: channel = str(uuid.uuid4()) arguments = [channel] if len(arguments) < 2: arguments.append(time.time()) ts_conv = True arguments = arguments[:2] if debug: cmd = [command] + arguments print >>sys.stderr, ("Issuing command: %s" % .join(cmd)) database.command(db, control_channel, command, *arguments) if not channel: return if debug: formatted = pprint.pformat(msg) print >>sys.stderr, "Received message: %s" % formatted if (msg[] not in (, ) or msg[] != channel): continue count += 1 response = msg[].split() if ts_conv and response[0] == : try: rtt = (time.time() - float(response[2])) * 100 response.append( % rtt) except Exception: pass print "Response % 5d: %s" % (count, .join(response)) except KeyboardInterrupt: pass
Issue a command to all running control daemons. :param conf_file: Name of the configuration file. :param command: The command to execute. Note that 'ping' is handled specially; in particular, the "channel" parameter is implied. (A random value will be used for the channel to listen on.) :param arguments: A list of arguments for the command. Note that the colon character (':') cannot be used. :param channel: If not None, specifies the name of a message channel to listen for responses on. Will wait indefinitely; to terminate the listening loop, use the keyboard interrupt sequence. :param debug: If True, debugging messages are emitted while sending the command.
def create_email(self, name, subject, html, text=): return self.create_template(name, subject, html, text)
[DECPRECATED] API call to create an email
def _raw_pack(key_handle, flags, data): return struct.pack(, key_handle, flags, len(data)) + data
Common code for packing payload to YHSM_HMAC_SHA1_GENERATE command.
def _stop_ubridge_capture(self, adapter_number): vnet = "ethernet{}.vnet".format(adapter_number) if vnet not in self._vmx_pairs: raise VMwareError("vnet {} not in VMX file".format(vnet)) if not self._ubridge_hypervisor: raise VMwareError("Cannot stop the packet capture: uBridge is not running") yield from self._ubridge_send("bridge stop_capture {name}".format(name=vnet))
Stop a packet capture in uBridge. :param adapter_number: adapter number
def revnet_cifar_base(): hparams = revnet_base() hparams.num_channels_init_block = 32 hparams.first_batch_norm = [False, True, True] hparams.init_stride = 1 hparams.init_kernel_size = 3 hparams.init_maxpool = False hparams.strides = [1, 2, 2] hparams.batch_size = 128 hparams.weight_decay = 1e-4 hparams.learning_rate = 0.1 hparams.learning_rate_cosine_cycle_steps = 5000 return hparams
Tiny hparams suitable for CIFAR/etc.
def normalize_path(path, base_path=u, is_dir=None): u path = posixpath.normpath(path) base_path = posixpath.normpath(base_path) if len(base_path) == 0: raise ValueError(u) if base_path[-1] != u: base_path += u if path.startswith(base_path): path = u + posixpath.relpath(path, base_path) elif path.startswith(u): raise ValueError(u.format(path, base_path)) if is_dir is None: return path elif is_dir and path[-1:] != u: return path + u elif not is_dir and path[-1:] == u: return path[:-1] return path
u""" Normalize a path to use it with a gitmatch pattern. This ensures that the separators are forward slashes. If a path is rooted (starts with a slash), it has to be a subdirectory of `base_path`. The path root is then changed to be based of `base_path`. :type path: text_type :param path: A POSIX path to normalize :type base_path: text_type :param base_path: A POSIX path to the base directory, `path` must be inside `base_path`. :type is_dir: text_type :param is_dir: If `true`, adds a trailing slash. If `false` removes any trailing slash. If `None`, keeps the current ending. :return:
def getSingleVisualProperty(self, visualProperty, verbose=None): response=api(url=self.___url++str(visualProperty)+, method="GET", verbose=verbose, parse_params=False) return response
Return the Visual Property specified by the `visualProperty` parameter. Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param visualProperty: ID of the Visual Property :param verbose: print more :returns: 200: successful operation
def find_match_command(self, rule): command_string = rule[] command_list = command_string.split() self.logdebug( % (command_list, self.original_command_list)) if rule.get(): self.logdebug( ) if (self.original_command_list[:len(command_list)] == command_list): self.logdebug() return {: self.original_command_list} else: self.logdebug() elif rule.get(): if re.search(command_string, self.original_command_string): return {: self.original_command_list} elif command_list == self.original_command_list: return {: command_list}
Return a matching (possibly munged) command, if found in rule.
def nac_v(msg): tc = typecode(msg) if tc != 19: raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg) msgbin = common.hex2bin(msg) NACv = common.bin2int(msgbin[42:45]) try: HFOMr = uncertainty.NACv[NACv][] VFOMr = uncertainty.NACv[NACv][] except KeyError: HFOMr, VFOMr = uncertainty.NA, uncertainty.NA return HFOMr, VFOMr
Calculate NACv, Navigation Accuracy Category - Velocity Args: msg (string): 28 bytes hexadecimal message string, TC = 19 Returns: int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
def create_server(cloud, **kwargs): if cloud == : _create_server_ec2(**kwargs) elif cloud == : _create_server_rackspace(**kwargs) elif cloud == : _create_server_gce(**kwargs) else: raise ValueError("Unknown cloud type: {}".format(cloud))
Create a new instance
def permission_required_with_ajax(perm, login_url=None): return user_passes_test_with_ajax(lambda u: u.has_perm(perm), login_url=login_url)
Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary, but returns a special response for ajax requests. See :meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`. Usage is the same as :meth:`django.contrib.auth.decorators.permission_required` :: @permission_required_with_ajax('polls.can_vote', login_url='/loginpage/') def my_view(request): ...
def get_website_endpoint(self): l = [self.name] l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) l.append(.join(self.connection.host.split()[-2:])) return .join(l)
Returns the fully qualified hostname to use is you want to access this bucket as a website. This doesn't validate whether the bucket has been correctly configured as a website or not.
def batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size=16, shard_files=True, output_format=, cloud=False): job = batch_predict_async( training_dir=training_dir, prediction_input_file=prediction_input_file, output_dir=output_dir, mode=mode, batch_size=batch_size, shard_files=shard_files, output_format=output_format, cloud=cloud) job.wait() print( + str(job.state))
Blocking versoin of batch_predict. See documentation of batch_prediction_async.
def create_snapshot(self, volume, name=None, description=None, force=False): return self._snapshot_manager.create(volume=volume, name=name, description=description, force=force)
Creates a snapshot of the volume, with an optional name and description. Normally snapshots will not happen if the volume is attached. To override this default behavior, pass force=True.
def clear_selection(self): text_cursor = self._editor.textCursor() text_cursor.clearSelection() self._editor.setTextCursor(text_cursor)
Clears text cursor selection.
def import_authors(self, tree): self.write_out(self.style.STEP()) post_authors = set() for item in tree.findall(): post_type = item.find( % WP_NS).text if post_type == : post_authors.add(item.find( ).text) self.write_out( % len(post_authors)) authors = {} for post_author in post_authors: if self.default_author: authors[post_author] = self.default_author else: authors[post_author] = self.migrate_author( post_author.replace(, )) return authors
Retrieve all the authors used in posts and convert it to new or existing author and return the conversion.
def set(self, name, value): if self.vars is None: self.vars = {} self.vars[name] = value
Stores the given variable/value in the object for later retrieval. :type name: string :param name: The name of the variable. :type value: object :param value: The value of the variable.
def arc(self, radius, initial_angle, final_angle, number_of_points=0.01, max_points=199, final_width=None, final_distance=None, layer=0, datatype=0): warn = True cx = self.x - radius * numpy.cos(initial_angle) cy = self.y - radius * numpy.sin(initial_angle) self.x = cx + radius * numpy.cos(final_angle) self.y = cy + radius * numpy.sin(final_angle) if final_angle > initial_angle: self.direction = final_angle + numpy.pi * 0.5 else: self.direction = final_angle - numpy.pi * 0.5 old_w = self.w old_distance = self.distance if final_width is not None: self.w = final_width * 0.5 if final_distance is not None: self.distance = final_distance if isinstance(number_of_points, float): number_of_points = 2 * int( abs((final_angle - initial_angle) * (radius + max(old_distance, self.distance) * (self.n - 1) * 0.5 + max(old_w, self.w)) / number_of_points) + 0.5) + 2 number_of_points = max(number_of_points, 3) pieces = int(numpy.ceil(number_of_points / float(max_points))) number_of_points = number_of_points // pieces widths = numpy.linspace(old_w, self.w, pieces + 1) distances = numpy.linspace(old_distance, self.distance, pieces + 1) angles = numpy.linspace(initial_angle, final_angle, pieces + 1) if (self.w != 0) or (old_w != 0): for jj in range(pieces): for ii in range(self.n): self.polygons.append(numpy.zeros((number_of_points, 2))) r0 = radius + ii * distances[jj + 1] - ( self.n - 1) * distances[jj + 1] * 0.5 old_r0 = radius + ii * distances[jj] - ( self.n - 1) * distances[jj] * 0.5 pts2 = number_of_points // 2 pts1 = number_of_points - pts2 ang = numpy.linspace(angles[jj], angles[jj + 1], pts1) rad = numpy.linspace(old_r0 + widths[jj], r0 + widths[jj + 1], pts1) self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy if widths[jj + 1] == 0: pts1 -= 1 pts2 += 1 if widths[jj] == 0: self.polygons[-1][:pts1 - 1, :] = numpy.array( self.polygons[-1][1:pts1, :]) pts1 -= 1 pts2 += 1 ang = numpy.linspace(angles[jj + 1], angles[jj], pts2) rad = numpy.linspace(r0 - widths[jj + 1], old_r0 - widths[jj], pts2) if (rad[0] <= 0 or rad[-1] <= 0) and warn: warnings.warn( "[GDSPY] Path arc with width larger than radius " "created: possible self-intersecting polygon.", stacklevel=2) warn = False self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy self.length += abs((angles[jj + 1] - angles[jj]) * radius) if isinstance(layer, list): self.layers.extend( (layer * (self.n // len(layer) + 1))[:self.n]) else: self.layers.extend(layer for _ in range(self.n)) if isinstance(datatype, list): self.datatypes.extend( (datatype * (self.n // len(datatype) + 1))[:self.n]) else: self.datatypes.extend(datatype for _ in range(self.n)) return self
Add a curved section to the path. Parameters ---------- radius : number Central radius of the section. initial_angle : number Initial angle of the curve (in *radians*). final_angle : number Final angle of the curve (in *radians*). number_of_points : integer or float If integer: number of vertices that form the object (polygonal approximation). If float: approximate curvature resolution. The actual number of points is automatically calculated. max_points : integer if ``number_of_points > max_points``, the element will be fractured in smaller polygons with at most ``max_points`` each. final_width : number If set, the paths of this segment will have their widths linearly changed from their current value to this one. final_distance : number If set, the distance between paths is linearly change from its current value to this one along this segment. layer : integer, list The GDSII layer numbers for the elements of each path. If the number of layers in the list is less than the number of paths, the list is repeated. datatype : integer, list The GDSII datatype for the elements of each path (between 0 and 255). If the number of datatypes in the list is less than the number of paths, the list is repeated. Returns ------- out : ``Path`` This object. Notes ----- The GDSII specification supports only a maximum of 199 vertices per polygon.
def remove_external_data_field(tensor, field_key): for (i, field) in enumerate(tensor.external_data): if field.key == field_key: del tensor.external_data[i]
Remove a field from a Tensor's external_data key-value store. Modifies tensor object in place. @params tensor: Tensor object from which value will be removed field_key: The key of the field to be removed
def err(self, output, newline=True): click.echo(output, nl=newline, err=True)
Outputs an error string to the console (stderr).
def delete_collection_cluster_role_binding(self, **kwargs): kwargs[] = True if kwargs.get(): return self.delete_collection_cluster_role_binding_with_http_info(**kwargs) else: (data) = self.delete_collection_cluster_role_binding_with_http_info(**kwargs) return data
delete collection of ClusterRoleBinding This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
def ToStream(value): ms = StreamManager.GetStream() writer = BinaryWriter(ms) value.Serialize(writer) retVal = ms.getvalue() StreamManager.ReleaseStream(ms) return retVal
Serialize the given `value` to a an array of bytes. Args: value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin. Returns: bytes: not hexlified
def describe_vpc_peering_connection(name, region=None, key=None, keyid=None, profile=None): conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile) return { : _get_peering_connection_ids(name, conn) }
Returns any VPC peering connection id(s) for the given VPC peering connection name. VPC peering connection ids are only returned for connections that are in the ``active``, ``pending-acceptance`` or ``provisioning`` state. .. versionadded:: 2016.11.0 :param name: The string name for this VPC peering connection :param region: The aws region to use :param key: Your aws key :param keyid: The key id associated with this aws account :param profile: The profile to use :return: dict CLI Example: .. code-block:: bash salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc # Specify a region salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2
def echo(msg, *args, **kwargs): file = kwargs.pop(, None) nl = kwargs.pop(, True) err = kwargs.pop(, False) color = kwargs.pop(, None) msg = safe_unicode(msg).format(*args, **kwargs) click.echo(msg, file=file, nl=nl, err=err, color=color)
Wraps click.echo, handles formatting and check encoding
def send(self, messages): tmpSms = SMS(to=, message=) if str(type(messages)) == str(type(tmpSms)): messages = [messages] xml_root = self.__init_xml() wrapper_id = 0 for m in messages: m.wrapper_id = wrapper_id msg = self.__build_sms_data(m) sms = etree.SubElement(xml_root, ) for sms_element in msg: element = etree.SubElement(sms, sms_element) element.text = msg[sms_element] response = clockwork_http.request(SMS_URL, etree.tostring(xml_root, encoding=)) response_data = response[] data_etree = etree.fromstring(response_data) err_desc = data_etree.find() if err_desc is not None: raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find().text) results = [] for sms in data_etree: matching_sms = next((s for s in messages if str(s.wrapper_id) == sms.find().text), None) new_result = SMSResponse( sms = matching_sms, id = if sms.find() is None else sms.find().text, error_code = 0 if sms.find() is None else sms.find().text, error_message = if sms.find() is None else sms.find().text, success = True if sms.find() is None else (sms.find().text == 0) ) results.append(new_result) if len(results) > 1: return results return results[0]
Send a SMS message, or an array of SMS messages
def itin(self): area = self.random_int(min=900, max=999) serial = self.random_int(min=0, max=9999) group = random.choice([x for x in range(70, 100) if x not in [89, 93]]) itin = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial) return itin
Generate a random United States Individual Taxpayer Identification Number (ITIN). An United States Individual Taxpayer Identification Number (ITIN) is a tax processing number issued by the Internal Revenue Service. It is a nine-digit number that always begins with the number 9 and has a range of 70-88 in the fourth and fifth digit. Effective April 12, 2011, the range was extended to include 900-70-0000 through 999-88-9999, 900-90-0000 through 999-92-9999 and 900-94-0000 through 999-99-9999. https://www.irs.gov/individuals/international-taxpayers/general-itin-information
def ingest(topic, text, **kwargs): if not text: raise ValueError( + topic) data = {: topic, : text.strip()} data.update(kwargs) db.markovify.insert(data)
Ingest the given text for the topic
def start(self, callback, rate=SENSOR_DELAY_NORMAL): if not self.manager: raise RuntimeError( "Cannot start a sensor without a SensorManager!") self.onSensorChanged.connect(callback) return self.manager.registerListener(self.getId(), self, rate)
Start listening to sensor events. Sensor event data depends on the type of sensor that was given to Parameters ---------- callback: Callable A callback that takes one argument that will be passed the sensor data. Sensor data is a dict with data based on the type of sensor. rate: Integer How fast to update. One of the Sensor.SENSOR_DELAY values Returns ------- result: Future A future that resolves to whether the register call completed.
def _debug_log(self, msg): if not self.debug: return sys.stderr.write(.format(msg))
Debug log messages if debug=True
def getLaneChangeState(self, vehID, direction): self._connection._beginMessage( tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID, 1 + 4) self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, direction) result = self._connection._checkResult(tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID) return result.read("!iBiBi")[2::2]
getLaneChangeState(string, int) -> (int, int) Return the lane change state for the vehicle
def start(self, phase, stage, **kwargs): return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs)
Start a new routine, stage or phase
def calloc(self, sim_nmemb, sim_size): raise NotImplementedError("%s not implemented for %s" % (self.calloc.__func__.__name__, self.__class__.__name__))
A somewhat faithful implementation of libc `calloc`. :param sim_nmemb: the number of elements to allocated :param sim_size: the size of each element (in bytes) :returns: the address of the allocation, or a NULL pointer if the allocation failed
def handle_one_request(self): try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = self.request_version = self.command = self.send_error(414) return if not self.raw_requestline: self.close_connection = 1 return if not self.parse_request(): return mname = + self.command if not hasattr(self, mname): self.send_error(501, "Unsupported method (%r)" % self.command) return method = getattr(self, mname) method() self.wfile.flush() except socket.timeout: self.log_error("Request timed out: %r", sys.exc_info()[1]) self.close_connection = 1 return
Handle a single HTTP request. You normally don't need to override this method; see the class __doc__ string for information on how to handle specific HTTP commands such as GET and POST.
def reverse(self, *args, **kwargs): (name,) = args return self._templates[name].fill(**kwargs)
Look up a path by name and fill in the provided parameters. Example: >>> handler = lambda: None # just a bogus handler >>> router = PathRouter(('post', '/posts/{slug}', handler)) >>> router.reverse('post', slug='my-post') '/posts/my-post'
def main(): parser = argparse.ArgumentParser(description=) parser.add_argument("--filter", action="store", default=, dest="filter", choices=[, ], help="Passband: default is r.") parser.add_argument("--family", , action="store", default=None, help=) parser.add_argument("--member", , action="store", default=None, help=) args = parser.parse_args() if args.family != None and args.member == None: get_family_info(str(args.family), args.filter) elif args.family == None and args.member != None: get_member_info(str(args.member), args.filter) else: print "Please input either a family or single member name"
Input asteroid family, filter type, and image type to query SSOIS
def to_csc(self): self._X_train = csc_matrix(self._X_train) self._X_test = csc_matrix(self._X_test)
Convert Dataset to scipy's Compressed Sparse Column matrix.
def unlink(self): if os.name == "posix": self.__linux_unlink__() elif os.name == "nt": self.__windows_unlink__() else: raise HolodeckException("Currently unsupported os: " + os.name)
unlinks the shared memory
def _schedule_snapshot_retrieve(dataset, prefix, snapshots): testtest-yyyymmdd_hhmm for snap in sorted(__salt__[](dataset, **{: True, : 1, : }).keys()): snap_name = snap[snap.index()+1:] if not snap_name.startswith(.format(prefix)): continue snap_holds = __salt__[](snap) if not snap_holds: snapshots[].append(snap) for hold in snap_holds: if hold in snapshots[].keys(): snapshots[hold].append(snap) return snapshots
Update snapshots dict with current snapshots dataset: string name of filesystem or volume prefix : string prefix for the snapshots e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' snapshots : OrderedDict preseeded OrderedDict with configuration
def queue_actions(self, source, actions, event_args=None): source.event_args = event_args ret = self.trigger_actions(source, actions) source.event_args = None return ret
Queue a list of \a actions for processing from \a source. Triggers an aura refresh afterwards.
def execute(self, correlation_id, args): return self._intercepter.execute(_next, correlation_id, args)
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason.
def to_joint_gaussian(self): variables = nx.topological_sort(self) mean = np.zeros(len(variables)) covariance = np.zeros((len(variables), len(variables))) for node_idx in range(len(variables)): cpd = self.get_cpds(variables[node_idx]) mean[node_idx] = sum([coeff * mean[variables.index(parent)] for coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.beta_0 covariance[node_idx, node_idx] = sum( [coeff * coeff * covariance[variables.index(parent), variables.index(parent)] for coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.variance for node_i_idx in range(len(variables)): for node_j_idx in range(len(variables)): if covariance[node_j_idx, node_i_idx] != 0: covariance[node_i_idx, node_j_idx] = covariance[node_j_idx, node_i_idx] else: cpd_j = self.get_cpds(variables[node_j_idx]) covariance[node_i_idx, node_j_idx] = sum( [coeff * covariance[node_i_idx, variables.index(parent)] for coeff, parent in zip(cpd_j.beta_vector, cpd_j.evidence)]) return GaussianDistribution(variables, mean, covariance)
The linear Gaussian Bayesian Networks are an alternative representation for the class of multivariate Gaussian distributions. This method returns an equivalent joint Gaussian distribution. Returns ------- GaussianDistribution: An equivalent joint Gaussian distribution for the network. Reference --------- Section 7.2, Example 7.3, Probabilistic Graphical Models, Principles and Techniques Examples -------- >>> from pgmpy.models import LinearGaussianBayesianNetwork >>> from pgmpy.factors.continuous import LinearGaussianCPD >>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')]) >>> cpd1 = LinearGaussianCPD('x1', [1], 4) >>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1']) >>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2']) >>> model.add_cpds(cpd1, cpd2, cpd3) >>> jgd = model.to_joint_gaussian() >>> jgd.variables ['x1', 'x2', 'x3'] >>> jgd.mean array([[ 1. ], [-4.5], [ 8.5]]) >>> jgd.covariance array([[ 4., 2., -2.], [ 2., 5., -5.], [-2., -5., 8.]])
def put(self, message, indent=0): color = self._colors_conf.get(indent + indent % 2, self._colors_conf.get(0, self._default_color)) for chunk in [ * indent, self._colors[color], message, self._colors[]]: self._device.write(str(chunk)) self._device.write(os.linesep) self._device.flush()
Print message with an indent. :param message: :param indent: :return:
def mapreads(data, sample, nthreads, force): LOGGER.info("Entering mapreads(): %s %s", sample.name, nthreads) derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq") sample.files.dereps = [derepfile] mumapfile = sample.files.unmapped_reads umap1file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap1.fastq") umap2file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap2.fastq") if "pair" in data.paramsdict["datatype"]: sample.files.split1 = os.path.join(data.dirs.edits, sample.name+"-split1.fastq") sample.files.split2 = os.path.join(data.dirs.edits, sample.name+"-split2.fastq") sample.files.dereps = [sample.files.split1, sample.files.split2] split_merged_reads(sample.files.dereps, derepfile) if "smalt" in data._hackersonly["aligner"]: cmd1 = [ipyrad.bins.smalt, "map", "-f", "sam", "-n", str(max(1, nthreads)), "-y", str(data.paramsdict[]), "-o", os.path.join(data.dirs.refmapping, sample.name+".sam"), "-x", data.paramsdict[] ] + sample.files.dereps cmd1_stdout = sps.PIPE cmd1_stderr = sps.STDOUT else: cmd1 = [ipyrad.bins.bwa, "mem", "-t", str(max(1, nthreads)), "-M", data.paramsdict[] ] + sample.files.dereps try: bwa_args = data._hackersonly["bwa_args"].split() bwa_args.reverse() for arg in bwa_args: cmd1.insert(2, arg) except KeyError: pass cmd1_stdout = open(os.path.join(data.dirs.refmapping, sample.name+".sam"), ) cmd1_stderr = None cmd2 = [ipyrad.bins.samtools, "view", "-b", "-F", "0x904", "-U", os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam"), os.path.join(data.dirs.refmapping, sample.name+".sam")] cmd3 = [ipyrad.bins.samtools, "sort", "-T", os.path.join(data.dirs.refmapping, sample.name+".sam.tmp"), "-O", "bam", "-o", sample.files.mapped_reads] cmd4 = [ipyrad.bins.samtools, "index", sample.files.mapped_reads] cmd5 = [ipyrad.bins.samtools, "bam2fq", "-v 45", os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")] if in data.paramsdict["datatype"]: if "smalt" in data._hackersonly["aligner"]: cmd1.insert(2, "pe") cmd1.insert(2, "-l") else: pass cmd2.insert(2, "0x3") cmd2.insert(2, "-f") cmd5.insert(2, umap1file) cmd5.insert(2, "-1") cmd5.insert(2, umap2file) cmd5.insert(2, "-2") else: cmd5.insert(2, mumapfile) cmd5.insert(2, "-0") LOGGER.debug(" ".join(cmd1)) proc1 = sps.Popen(cmd1, stderr=cmd1_stderr, stdout=cmd1_stdout) try: error1 = proc1.communicate()[0] except KeyboardInterrupt: proc1.kill() if proc1.returncode: raise IPyradWarningExit(error1) LOGGER.debug(" ".join(cmd2)) proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE) LOGGER.debug(" ".join(cmd3)) proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout) error3 = proc3.communicate()[0] if proc3.returncode: raise IPyradWarningExit(error3) proc2.stdout.close() LOGGER.debug(" ".join(cmd4)) proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE) error4 = proc4.communicate()[0] if proc4.returncode: raise IPyradWarningExit(error4) LOGGER.debug(" ".join(cmd5)) proc5 = sps.Popen(cmd5, stderr=sps.STDOUT, stdout=sps.PIPE) error5 = proc5.communicate()[0] if proc5.returncode: raise IPyradWarningExit(error5) if in data.paramsdict["datatype"]: LOGGER.info("Merging unmapped reads {} {}".format(umap1file, umap2file)) merge_pairs_after_refmapping(data, [(umap1file, umap2file)], mumapfile)
Attempt to map reads to reference sequence. This reads in the fasta files (samples.files.edits), and maps each read to the reference. Unmapped reads are dropped right back in the de novo pipeline. Reads that map successfully are processed and pushed downstream and joined with the rest of the data post muscle_align. Mapped reads end up in a sam file.
def symbolic(self, A): if self.sparselib == : return umfpack.symbolic(A) elif self.sparselib == : return klu.symbolic(A)
Return the symbolic factorization of sparse matrix ``A`` Parameters ---------- sparselib Library name in ``umfpack`` and ``klu`` A Sparse matrix Returns symbolic factorization -------
def find_rings(self, including=None): undirected = self.graph.to_undirected() directed = undirected.to_directed() cycles_nodes = [] cycles_edges = [] all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2] unique_sorted = [] unique_cycles = [] for cycle in all_cycles: if sorted(cycle) not in unique_sorted: unique_sorted.append(sorted(cycle)) unique_cycles.append(cycle) if including is None: cycles_nodes = unique_cycles else: for i in including: for cycle in unique_cycles: if i in cycle and cycle not in cycles_nodes: cycles_nodes.append(cycle) for cycle in cycles_nodes: edges = [] for i, e in enumerate(cycle): edges.append((cycle[i-1], e)) cycles_edges.append(edges) return cycles_edges
Find ring structures in the MoleculeGraph. :param including: list of site indices. If including is not None, then find_rings will only return those rings including the specified sites. By default, this parameter is None, and all rings will be returned. :return: dict {index:cycle}. Each entry will be a ring (cycle, in graph theory terms) including the index found in the Molecule. If there is no cycle including an index, the value will be an empty list.
def empirical(X): print("Empirical") cov = np.dot(X.T, X) / n_samples return cov, np.linalg.inv(cov)
Compute empirical covariance as baseline estimator.
def _generate(self): n = self._n w = self._w assert type(w) is int, "List for w not supported" for i in xrange(n / w): pattern = set(xrange(i * w, (i+1) * w)) self._patterns[i] = pattern
Generates set of consecutive patterns.
def help(project, task, step, variables): task_name = step.args or variables[] try: task = project.find_task(task_name) except NoSuchTaskError as e: yield events.task_not_found(task_name, e.similarities) raise StopTask text = f text += text += task.description text += text += .format(.join(task.variables)) yield events.help_output(text)
Run a help step.
def _lookup_proxmox_task(upid): log.debug(, upid) tasks = query(, ) if tasks: for task in tasks: if task[] == upid: log.debug(, task) return task return False
Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed.
def set_default_headers(self, *args, **kwargs): self.set_header(, ) self.set_header(, ) self.set_header(, )
Set the default headers for all requests.
def get_subordinate_clauses(tiger_docgraph): subord_clause_rels = \ dg.select_edges_by_attribute( tiger_docgraph, attribute=, value=[, , ]) subord_clause_nodes = [] for src_id, target_id in subord_clause_rels: src_cat = tiger_docgraph.node[src_id].get() if src_cat == and not dg.istoken(tiger_docgraph, target_id): subord_clause_nodes.append(target_id) return subord_clause_nodes
given a document graph of a TIGER syntax tree, return all node IDs of nodes representing subordinate clause constituents. Parameters ---------- tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph document graph from which subordinate clauses will be extracted Returns ------- subord_clause_nodes : list(str) list of node IDs of nodes directly dominating subordinate clauses
def extractfile(self, member): may be a filename or an RPMInfo object. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() ' if not isinstance(member, RPMInfo): member = self.getmember(member) return _SubFile(self.data_file, member.file_start, member.size)
Extract a member from the archive as a file object. `member' may be a filename or an RPMInfo object. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell()
def with_updated_configuration(self, options=None, attribute_options=None): return self._mapping.with_updated_configuration(options=options, attribute_options= attribute_options)
Returns a context in which this representer is updated with the given options and attribute options.
def copy(self, deep=True, data=None): if data is None: data = self._data else: data = as_compatible_data(data) if self.shape != data.shape: raise ValueError("Data shape {} must match shape of object {}" .format(data.shape, self.shape)) return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Deep is always ignored. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original.
def show_page(self, course): username = self.user_manager.session_username() if not self.user_manager.course_is_open_to_user(course, lti=False): return self.template_helper.get_renderer().course_unavailable() else: tasks = course.get_tasks() last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}}) for submission in last_submissions: submission["taskname"] = tasks[submission[]].get_name(self.user_manager.session_language()) tasks_data = {} user_tasks = self.database.user_tasks.find({"username": username, "courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}}) is_admin = self.user_manager.has_staff_rights_on_course(course, username) tasks_score = [0.0, 0.0] for taskid, task in tasks.items(): tasks_data[taskid] = {"visible": task.get_accessible_time().after_start() or is_admin, "succeeded": False, "grade": 0.0} tasks_score[1] += task.get_grading_weight() if tasks_data[taskid]["visible"] else 0 for user_task in user_tasks: tasks_data[user_task["taskid"]]["succeeded"] = user_task["succeeded"] tasks_data[user_task["taskid"]]["grade"] = user_task["grade"] weighted_score = user_task["grade"]*tasks[user_task["taskid"]].get_grading_weight() tasks_score[0] += weighted_score if tasks_data[user_task["taskid"]]["visible"] else 0 course_grade = round(tasks_score[0]/tasks_score[1]) if tasks_score[1] > 0 else 0 tag_list = course.get_all_tags_names_as_list(is_admin, self.user_manager.session_language()) user_info = self.database.users.find_one({"username": username}) return self.template_helper.get_renderer().course(user_info, course, last_submissions, tasks, tasks_data, course_grade, tag_list)
Prepares and shows the course page
def fit(self, X, y=None, **kwargs): self.estimator.fit(X, y) if self.classes_ is None: self.classes_ = self.estimator.classes_ return self
Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs: keyword arguments passed to Scikit-Learn API. Returns ------- self : instance Returns the instance of the classification score visualizer
def _DrawHours(self): tmpstrs = [] for i in range(0, self._gwidth, self._min_grid): if i % self._hour_grid == 0: tmpstrs.append( \ % (i + .5 + 20, 20, i + .5 + 20, self._gheight)) tmpstrs.append( % (i + 20, 20, (i / self._hour_grid + self._offset) % 24)) else: tmpstrs.append( \ % (i + .5 + 20, 20, i + .5 + 20, self._gheight)) return "".join(tmpstrs)
Generates svg to show a vertical hour and sub-hour grid Returns: # A string containing a polyline tag for each grid line " <polyline class="FullHour" points="20,0 ..."
def merge_dicts(dict1, dict2, append_lists=False): for key in dict2: if isinstance(dict2[key], dict): if key in dict1 and key in dict2: merge_dicts(dict1[key], dict2[key], append_lists) else: dict1[key] = dict2[key] elif isinstance(dict2[key], list) and append_lists: if key in dict1 and isinstance(dict1[key], list): dict1[key].extend( [k for k in dict2[key] if k not in dict1[key]]) else: dict1[key] = dict2[key] else: dict1[key] = dict2[key]
Merge the second dict into the first Not intended to merge list of dicts. :param append_lists: If true, instead of clobbering a list with the new value, append all of the new values onto the original list.
def dispatch_request(self): req = _request_ctx_stack.top.request if req.routing_exception is not None: self.raise_routing_exception(req) rule = req.url_rule if getattr(rule, , False) \ and req.method == : return self.make_default_options_response() return self.view_functions[rule.endpoint](**req.view_args)
Does the request dispatching. Matches the URL and returns the return value of the view or error handler. This does not have to be a response object. In order to convert the return value to a proper response object, call :func:`make_response`. .. versionchanged:: 0.7 This no longer does the exception handling, this code was moved to the new :meth:`full_dispatch_request`.
def parametrize(params): returned = str(params[0]) returned += "".join("[" + str(p) + "]" for p in params[1:]) return returned
Return list of params as params. >>> parametrize(['a']) 'a' >>> parametrize(['a', 'b']) 'a[b]' >>> parametrize(['a', 'b', 'c']) 'a[b][c]'
def _resolve_deps(self, formula_def): pkg_info = self.pkgdb[.format(self.db_prov)](formula_def[]) if not isinstance(pkg_info, dict): pkg_info = {} can_has = {} cant_has = [] if in formula_def and formula_def[] is None: formula_def[] = for dep in formula_def.get(, ).split(): dep = dep.strip() if not dep: continue if self.pkgdb[.format(self.db_prov)](dep): continue if dep in self.avail_pkgs: can_has[dep] = self.avail_pkgs[dep] else: cant_has.append(dep) optional = formula_def.get(, ).split() recommended = formula_def.get(, ).split() inspected = [] to_inspect = can_has.copy() while to_inspect: dep = next(six.iterkeys(to_inspect)) del to_inspect[dep] also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula) can_has.update(also_can) cant_has = sorted(set(cant_has + also_cant)) optional = sorted(set(optional + opt_dep)) recommended = sorted(set(recommended + rec_dep)) return can_has, cant_has, optional, recommended
Return a list of packages which need to be installed, to resolve all dependencies
def _write(self, dap_index, transfer_count, transfer_request, transfer_data): assert dap_index == 0 assert isinstance(transfer_count, six.integer_types) assert isinstance(transfer_request, six.integer_types) assert transfer_data is None or len(transfer_data) > 0 transfer = None if transfer_request & READ: transfer = _Transfer(self, dap_index, transfer_count, transfer_request, transfer_data) self._transfer_list.append(transfer) cmd = self._crnt_cmd is_read = transfer_request & READ size_to_transfer = transfer_count trans_data_pos = 0 while size_to_transfer > 0: size = cmd.get_request_space(size_to_transfer, transfer_request, dap_index) if size == 0: if LOG_PACKET_BUILDS: self._logger.debug("_write: send packet [size==0]") self._send_packet() cmd = self._crnt_cmd continue if transfer_data is None: data = None else: data = transfer_data[trans_data_pos:trans_data_pos + size] cmd.add(size, transfer_request, data, dap_index) size_to_transfer -= size trans_data_pos += size if cmd.get_full(): if LOG_PACKET_BUILDS: self._logger.debug("_write: send packet [full]") self._send_packet() cmd = self._crnt_cmd if not self._deferred_transfer: self.flush() return transfer
Write one or more commands
def Split(cls, extended_path_mask): import os.path r_tree_recurse = extended_path_mask[0] in r_flat_recurse = extended_path_mask[0] in r_dirname, r_filters = os.path.split(extended_path_mask) if r_tree_recurse: r_dirname = r_dirname[1:] filters = r_filters.split() r_in_filters = [i for i in filters if not i.startswith()] r_out_filters = [i[1:] for i in filters if i.startswith()] return r_tree_recurse, r_flat_recurse, r_dirname, r_in_filters, r_out_filters
Splits the given path into their components: recursive, dirname, in_filters and out_filters :param str: extended_path_mask: The "extended path mask" to split :rtype: tuple(bool,bool,str,list(str),list(str)) :returns: Returns the extended path 5 components: - The tree-recurse flag - The flat-recurse flag - The actual path - A list of masks to include - A list of masks to exclude
def quaternion(self): q_xyzw = transformations.quaternion_from_matrix(self.matrix) q_wxyz = np.roll(q_xyzw, 1) return q_wxyz
:obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout.
def get_sub_node(self, node): subnode = node.find() if subnode: mimetype = subnode.attrs[] self.type = MIMEMAP[mimetype] node = node.find() return node
Extract node from document if desired.