code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def get_meas_los(self, user_lo_config): try: _m_los = self.default_meas_los.copy() except KeyError: raise PulseError() for channel, lo_freq in user_lo_config.meas_lo_dict().items(): _m_los[channel.index] = lo_freq if _m_los == self.default_meas_los: return None return _m_los
Embed default meas LO frequencies from backend and format them to list object. If configured lo frequency is the same as default, this method returns `None`. Args: user_lo_config (LoConfig): A dictionary of LOs to format. Returns: list: A list of meas LOs. Raises: PulseError: when LO frequencies are missing.
def cmd_log(self, reopen=False, rotate=False): cmd = b if reopen: cmd += b if rotate: cmd += b return self.send_command(cmd)
Allows managing of uWSGI log related stuff :param bool reopen: Reopen log file. Could be required after third party rotation. :param bool rotate: Trigger built-in log rotation.
def random_pairs_with_replacement(n, shape, random_state=None): if not isinstance(random_state, np.random.RandomState): random_state = np.random.RandomState(random_state) n_max = max_pairs(shape) if n_max <= 0: raise ValueError() indices = random_state.randint(0, n_max, n) if len(shape) == 1: return _map_tril_1d_on_2d(indices, shape[0]) else: return np.unravel_index(indices, shape)
make random record pairs
def invert(self, invert=True): if(isinstance(invert, (tuple, list)) and len(self.channels) != len(invert)): raise ValueError( "Number of channels and invert components differ.") logger.debug("Applying invert with parameters %s", str(invert)) if isinstance(invert, (tuple, list)): for i, chn in enumerate(self.channels): if invert[i]: self.channels[i] = 1 - chn elif invert: for i, chn in enumerate(self.channels): self.channels[i] = 1 - chn
Inverts all the channels of a image according to *invert*. If invert is a tuple or a list, elementwise invertion is performed, otherwise all channels are inverted if *invert* is true (default). Note: 'Inverting' means that black becomes white, and vice-versa, not that the values are negated !
def execute(self, timeout=None): logger.debug( % len(self._commands)) auth = self._build_http_auth() headers = self._build_request_headers() logger.debug( % headers) logger.debug( % len(self._commands)) path = self._build_request_path(self.BATCH_ENDPOINT) data = json.dumps(self._commands, cls=self._json_encoder) r = requests.post( path, auth=auth, headers=headers, data=data, timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout) ) self._commands = [] logger.debug( % r.status_code) try: logger.debug( % r.json()) except: logger.debug( % r.content) return r
Execute all currently queued batch commands
def get(self): self.log.debug() if self.format == "epub": if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.": ebook = self._url_to_epub() elif ".docx" in self.urlOrPath: ebook = self._docx_to_epub() if self.format == "mobi": if self.urlOrPath[:4] == "http" or self.urlOrPath[:4] == "www.": epub = self._url_to_epub() elif ".docx" in self.urlOrPath: epub = self._docx_to_epub() if not epub: return None ebook = self._epub_to_mobi( epubPath=epub, deleteEpub=False ) tag( log=self.log, filepath=ebook, tags=False, rating=False, wherefrom=self.url ) self.log.debug() return ebook
*get the ebook object* **Return:** - ``ebook`` **Usage:** See class docstring for usage
def handle_class(signature_node, module, object_name, cache): class_ = getattr(module, object_name, None) if class_ is None: return if class_ not in cache: cache[class_] = {} attributes = inspect.classify_class_attrs(class_) for attribute in attributes: cache[class_][attribute.name] = attribute if inspect.isabstract(class_): emphasis = nodes.emphasis("abstract ", "abstract ", classes=["property"]) signature_node.insert(0, emphasis)
Styles ``autoclass`` entries. Adds ``abstract`` prefix to abstract classes.
def mulmod(computation: BaseComputation) -> None: left, right, mod = computation.stack_pop(num_items=3, type_hint=constants.UINT256) if mod == 0: result = 0 else: result = (left * right) % mod computation.stack_push(result)
Modulo Multiplication
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True): for disk in self.disks: logger.info("Mounting volumes in {0}".format(disk)) for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions): yield volume
Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls :func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`. :rtype: generator
def num_or_str(x): if isnumber(x): return x try: return int(x) except ValueError: try: return float(x) except ValueError: return str(x).strip()
The argument is a string; convert to a number if possible, or strip it. >>> num_or_str('42') 42 >>> num_or_str(' 42x ') '42x'
def run(self): while True: if not self.task_socket.poll(-1): continue msg = self.task_socket.recv_multipart() msg_type = msg[1] if self.debug: self.stats.append((time.time(), msg_type, len(self.unassigned_tasks), len(self.available_workers))) if time.time() - self.lastDebugTs > TIME_BETWEEN_PARTIALDEBUG: self.writeDebug("debug/partial-{0}".format( round(time.time(), -1) )) self.lastDebugTs = time.time() if msg_type == TASK: task_id = msg[2] task = msg[3] self.logger.debug("Received task {0}".format(task_id)) try: address = self.available_workers.popleft() except IndexError: self.unassigned_tasks.append((task_id, task)) else: self.logger.debug("Sent {0}".format(task_id)) self.task_socket.send_multipart([address, TASK, task]) self.assigned_tasks[address].add(task_id) elif msg_type == REQUEST: address = msg[0] try: task_id, task = self.unassigned_tasks.popleft() except IndexError: self.available_workers.append(address) else: self.logger.debug("Sent {0}".format(task_id)) self.task_socket.send_multipart([address, TASK, task]) self.assigned_tasks[address].add(task_id) elif msg_type == STATUS_REQ: self.pruneAssignedTasks() address = msg[0] task_id = msg[2] if any(task_id in x for x in self.assigned_tasks.values()): status = STATUS_GIVEN elif task_id in (x[0] for x in self.unassigned_tasks): status = STATUS_HERE else: status = STATUS_NONE self.task_socket.send_multipart([ address, STATUS_ANS, task_id, status ]) elif msg_type == STATUS_DONE: address = msg[0] task_id = msg[2] try: self.assigned_tasks[address].discard(task_id) except KeyError: pass elif msg_type == STATUS_UPDATE: address = msg[0] try: tasks_ids = pickle.loads(msg[2]) except: self.logger.error("Could not unpickle status update message.") else: self.assigned_tasks[address] = tasks_ids self.status_times[address] = time.time() elif msg_type == REPLY: self.logger.debug("Relaying") destination = msg[-1] origin = msg[0] self.task_socket.send_multipart([destination] + msg[1:] + [origin]) elif msg_type == VARIABLE: address = msg[4] value = msg[3] key = msg[2] self.shared_variables[address].update( {key: value}, ) self.info_socket.send_multipart([VARIABLE, key, value, address]) elif msg_type == INIT: address = msg[0] try: self.processConfig(pickle.loads(msg[2])) except pickle.PickleError: continue self.task_socket.send_multipart([ address, pickle.dumps(self.config, pickle.HIGHEST_PROTOCOL), pickle.dumps(self.shared_variables, pickle.HIGHEST_PROTOCOL), ]) self.task_socket.send_multipart([ address, pickle.dumps(self.cluster_available, pickle.HIGHEST_PROTOCOL), ]) elif msg_type == CONNECT: try: connect_brokers = pickle.loads(msg[2]) except pickle.PickleError: self.logger.error("Could not understand CONNECT message.") continue self.logger.info("Connecting to other brokers...") self.addBrokerList(connect_brokers) elif msg_type == SHUTDOWN: self.logger.debug("SHUTDOWN command received.") self.shutdown() break
Redirects messages until a shutdown message is received.
async def set_config(cls, name: str, value): return await cls._handler.set_config(name=[name], value=[value])
Set a configuration value in MAAS. Consult your MAAS server for recognised settings. Alternatively, use the pre-canned functions also defined on this object.
def is_java_project(self): if self._is_java_project is None: self._is_java_project = isinstance(self.arch, ArchSoot) return self._is_java_project
Indicates if the project's main binary is a Java Archive.
def to_instants_dataframe(self, sql_ctx): ssql_ctx = sql_ctx._ssql_ctx jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1) return DataFrame(jdf, sql_ctx)
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time. This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column is a key form one of the rows in the TimeSeriesRDD.
def summary(dataset_uri, format): dataset = dtoolcore.DataSet.from_uri(dataset_uri) creator_username = dataset._admin_metadata["creator_username"] frozen_at = dataset._admin_metadata["frozen_at"] num_items = len(dataset.identifiers) tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) if format == "json": json_lines = [ , .format(dataset.name), .format(dataset.uuid), .format(creator_username), .format(num_items), .format(tot_size), .format(frozen_at), , ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False) else: info = [ ("name", dataset.name), ("uuid", dataset.uuid), ("creator_username", creator_username), ("number_of_items", str(num_items)), ("size", sizeof_fmt(tot_size).strip()), ("frozen_at", date_fmt(frozen_at)), ] for key, value in info: click.secho("{}: ".format(key), nl=False) click.secho(value, fg="green")
Report summary information about a dataset.
def validate_file(file_type, file_path): file_data = fileio._read_plain_json(file_path, False) validate_data(file_type, file_data)
Validates a file against a schema Parameters ---------- file_type : str Type of file to read. May be 'component', 'element', 'table', or 'references' file_path: Full path to the file to be validated Raises ------ RuntimeError If the file_type is not valid (and/or a schema doesn't exist) ValidationError If the given file does not pass validation FileNotFoundError If the file given by file_path doesn't exist
def kld(d1, d2): d1, d2 = flatten(d1), flatten(d2) return entropy(d1, d2, 2.0)
Return the Kullback-Leibler Divergence (KLD) between two distributions. Args: d1 (np.ndarray): The first distribution. d2 (np.ndarray): The second distribution. Returns: float: The KLD of ``d1`` from ``d2``.
def cmd_tool(args=None): from argparse import ArgumentParser parser = ArgumentParser(description="Command line utility for creating spectra from GuppiRaw files.") parser.add_argument(, type=str, help=) parser.add_argument(, dest=, type=str, default=, help=) args = parser.parse_args() r = GuppiRaw(args.filename) r.print_stats() bname = os.path.splitext(os.path.basename(args.filename))[0] bname = os.path.join(args.outdir, bname) r.plot_histogram(filename="%s_hist.png" % bname) r.plot_spectrum(filename="%s_spec.png" % bname)
Command line tool for plotting and viewing info on guppi raw files
def run(self): with util.timed_block() as t: files = self._collect_files() log.info("Collected <33>{} <32>files in <33>{}s".format( len(files), t.elapsed_s )) if self.verbose: for p in files: log.info(" <0>{}", p) if not files: return self.allow_empty with util.timed_block() as t: results = self._run_checks(files) log.info("Code checked in <33>{}s", t.elapsed_s) success = True for name, retcodes in results.items(): if any(x != 0 for x in retcodes): success = False log.err("<35>{} <31>failed with: <33>{}".format( name, retcodes )) return success
Run all linters and report results. Returns: bool: **True** if all checks were successful, **False** otherwise.
def get_os_version_package(pkg, fatal=True): codename = get_os_codename_package(pkg, fatal=fatal) if not codename: return None if in pkg: vers_map = SWIFT_CODENAMES for cname, version in six.iteritems(vers_map): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES for version, cname in six.iteritems(vers_map): if cname == codename: return version
Derive OpenStack version number from an installed package.
def _release_info(jsn,VERSION): try: release_point = jsn[][VERSION][0] except KeyError: print "\033[91m\033[1mError: Release not found." exit(1) python_version = release_point[] filename = release_point[] md5 = release_point[] download_url_for_release = release_point[] download_num_for_release = release_point[] download_size_for_release = _sizeof_fmt(int(release_point[])) print %(md5,python_version,download_url_for_release,\ download_num_for_release,download_size_for_release,filename)
Gives information about a particular package version.
def _parse_qualimap_globals_inregion(table): out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out
Retrieve metrics from the global targeted region table.
def modularity_louvain_und(W, gamma=1, hierarchy=False, seed=None): s global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- ci : Nx1 np.ndarray refined community affiliation vector. If hierarchical output enabled, it is an NxH np.ndarray instead with multiple iterations Q : float optimized modularity metric. If hierarchical output enabled, becomes an Hx1 array of floats instead. Notes ----- Ci and Q may vary from run to run, due to heuristics in the algorithm. Consequently, it may be worth to compare multiple runs. Modularity Infinite Loop Style B. Please contact the developer with this error.Modularity Infinite Loop Style C. Please contact the developer with this error.') flag = False for i in rng.permutation(n): ma = m[i] - 1 dQ = ((Knm[i, :] - Knm[i, ma] + W[i, i]) - gamma * k[i] * (Km - Km[ma] + k[i]) / s) dQ[ma] = 0 max_dq = np.max(dQ) if max_dq > 1e-10: j = np.argmax(dQ) Knm[:, j] += W[:, i] Knm[:, ma] -= W[:, i] Km[j] += k[i] Km[ma] -= k[i] m[i] = j + 1 flag = True _, m = np.unique(m, return_inverse=True) m += 1 h += 1 ci.append(np.zeros((n0,))) for i in range(n): ci[h][np.where(ci[h - 1] == i + 1)] = m[i] n = np.max(m) W1 = np.zeros((n, n)) for i in range(n): for j in range(i, n): wp = np.sum(W[np.ix_(m == i + 1, m == j + 1)]) W1[i, j] = wp W1[j, i] = wp W = W1 q.append(0) q[h] = np.trace(W) / s - gamma * np.sum(np.dot(W / s, W / s)) if q[h] - q[h - 1] < 1e-10: break ci = np.array(ci, dtype=int) if hierarchy: ci = ci[1:-1] q = q[1:-1] return ci, q else: return ci[h - 1], q[h - 1]
The optimal community structure is a subdivision of the network into nonoverlapping groups of nodes in a way that maximizes the number of within-group edges, and minimizes the number of between-group edges. The modularity is a statistic that quantifies the degree to which the network may be subdivided into such clearly delineated groups. The Louvain algorithm is a fast and accurate community detection algorithm (as of writing). The algorithm may also be used to detect hierarchical community structure. Parameters ---------- W : NxN np.ndarray undirected weighted/binary connection matrix gamma : float resolution parameter. default value=1. Values 0 <= gamma < 1 detect larger modules while gamma > 1 detects smaller modules. hierarchy : bool Enables hierarchical output. Defalut value=False seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- ci : Nx1 np.ndarray refined community affiliation vector. If hierarchical output enabled, it is an NxH np.ndarray instead with multiple iterations Q : float optimized modularity metric. If hierarchical output enabled, becomes an Hx1 array of floats instead. Notes ----- Ci and Q may vary from run to run, due to heuristics in the algorithm. Consequently, it may be worth to compare multiple runs.
def visible_fields(self): form_visible_fields = self.form.visible_fields() if self.render_fields: fields = self.render_fields else: fields = [field.name for field in form_visible_fields] filtered_fields = [field for field in fields if field not in self.exclude_fields] return [field for field in form_visible_fields if field.name in filtered_fields]
Returns the reduced set of visible fields to output from the form. This method respects the provided ``fields`` configuration _and_ exlcudes all fields from the ``exclude`` configuration. If no ``fields`` where provided when configuring this fieldset, all visible fields minus the excluded fields will be returned. :return: List of bound field instances or empty tuple.
def map_to_openapi_type(self, *args): if len(args) == 1 and args[0] in self.field_mapping: openapi_type_field = self.field_mapping[args[0]] elif len(args) == 2: openapi_type_field = args else: raise TypeError("Pass core marshmallow field type or (type, fmt) pair.") def inner(field_type): self.field_mapping[field_type] = openapi_type_field return field_type return inner
Decorator to set mapping for custom fields. ``*args`` can be: - a pair of the form ``(type, format)`` - a core marshmallow field type (in which case we reuse that type's mapping)
def calc_hamiltonian(self, mass, omega_array): Kappa_t= mass*omega_array**2 self.E_pot = 0.5*Kappa_t*self.position_data**2 self.E_kin = 0.5*mass*(_np.insert(_np.diff(self.position_data), 0, (self.position_data[1]-self.position_data[0]))*self.SampleFreq)**2 self.Hamiltonian = self.E_pot + self.E_kin return self.Hamiltonian
Calculates the standard (pot+kin) Hamiltonian of your system. Parameters ---------- mass : float The mass of the particle in kg omega_array : array array which represents omega at every point in your time trace and should therefore have the same length as self.position_data Requirements ------------ self.position_data : array Already filtered for the degree of freedom of intrest and converted into meters. Returns ------- Hamiltonian : array The calculated Hamiltonian
def addKeyword(self, keyword, weight): assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer" self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight})
add a relevant keyword to the topic page @param keyword: keyword or phrase to be added @param weight: importance of the provided keyword (typically in range 1 - 50)
def get_ordered_entries(self, queryset=False): if queryset: self.queryset = queryset else: self.queryset = EntryCategory.objects.all() if self.queryset: for category in self.queryset: entries = category.get_entries() if entries: amount_list = [e.amount_of_views for e in entries] rating_list = [e.rating() for e in entries] views_per_entry = fsum(amount_list) / len(amount_list) rating_per_entry = fsum(rating_list) / len(rating_list) category.last_rank = views_per_entry * rating_per_entry category.save() else: self.queryset = self.queryset.exclude(pk=category.pk) self.queryset = sorted(self.queryset, key=lambda c: c.last_rank, reverse=True) return self.queryset
Custom ordering. First we get the average views and rating for the categories's entries. Second we created a rank by multiplying both. Last, we sort categories by this rank from top to bottom. Example: - Cat_1 - Entry_1 (500 Views, Rating 2) - Entry_2 (200 Views, Rating -4) - Entry_3 (100 Views, Rating 3) - Cat_2 - Entry_1 (200 Views, Rating 7) - Entry_2 (50 Views, Rating 2) Result: Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33) Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5) Cat_2 will be displayed at the top. The algorithm is quality-oriented, as you can see.
def broadcast(self, fromUserId, objectName, content, pushContent=None, pushData=None, os=None): desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=(, , ), action=, params={ "fromUserId": fromUserId, "objectName": objectName, "content": content, "pushContent": pushContent, "pushData": pushData, "os": os }) return Response(r, desc)
发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param txtMessage:文本消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
def delete_untagged(collector, **kwargs): configuration = collector.configuration docker_api = configuration["harpoon"].docker_api images = docker_api.images() found = False for image in images: if image["RepoTags"] == ["<none>:<none>"]: found = True image_id = image["Id"] log.info("Deleting untagged image\thash=%s", image_id) try: docker_api.remove_image(image["Id"]) except DockerAPIError as error: log.error("Failed to delete image\thash=%s\terror=%s", image_id, error) if not found: log.info("Didn't find any untagged images to delete!")
Find the untagged images and remove them
def _get_site_class(self, vs30, mmi_mean): if vs30[0] < 180: c1 = 1.0 c2 = -0.25 d = 0.5 elif vs30[0] >= 180 and vs30[0] <= 360: c1 = 0.5 c2 = -0.125 d = 0.25 elif vs30[0] > 360 and vs30[0] <= 760: c1 = 0. c2 = 0. d = 0. elif vs30[0] > 760 and vs30[0] <= 1500: c1 = -0.5 c2 = 0.125 d = -0.25 elif vs30[0] > 1500: c1 = -1.0 c2 = 0.25 d = -0.5 S = np.zeros_like(vs30) for i in range(vs30.size): if mmi_mean[i] <= 7.0: S[i] += c1 elif mmi_mean[i] > 7 and mmi_mean[i] < 9.5: S[i] += c1 - d * (mmi_mean[i] - 7.0) else: S[i] += c2 return S
Return site class flag for: Class E - Very Soft Soil vs30 < 180 Class D - Deep or Soft Soil vs30 >= 180 and vs30 <= 360 Class C - Shallow Soil vs30 > 360 and vs30 <= 760 Class B - Rock vs30 > 760 and vs30 <= 1500 Class A - Strong Rock vs30 >= 180 and vs30 <= 360 The S site class is equal to S = c1 if MMI <= 7 S = c1 - d *(MMI - 7.0) if 7<MMI<9.5 S = c2 if MMI >= 9.5
def make_report(self, outcome): failures = self.getreports() if not failures: return for rep in failures: msg = self._getfailureheadline(rep) lines = rep.longrepr.splitlines() if len(lines) > 1: self.section(msg, lines[1]) self._outrep_summary(rep) tmpdir = tempfile.mkdtemp() try: ref_file = os.path.join(tmpdir, ) test_file = os.path.join(tmpdir, ) with io.open(ref_file, "w", encoding="utf8") as f: nbformat.write(self.nb_ref, f) with io.open(test_file, "w", encoding="utf8") as f: nbformat.write(self.nb_test, f) run_server( port=0, cwd=tmpdir, closable=True, on_port=lambda port: browse( port, ref_file, test_file, None)) finally: shutil.rmtree(tmpdir)
Make report in form of two notebooks. Use nbdime diff-web to present the difference between reference cells and test cells.
def currentpath(self) -> str: return os.path.join(self.basepath, self.currentdir)
Absolute path of the current working directory. >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> from hydpy import repr_, TestIO >>> with TestIO(): ... filemanager.currentdir = 'testdir' ... repr_(filemanager.currentpath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/projectname/basename/testdir'
def from_analysis_period(cls, analysis_period, clearness=1, daylight_savings_indicator=): _check_analysis_period(analysis_period) return cls(analysis_period.st_month, analysis_period.st_day, clearness, daylight_savings_indicator)
Initialize a OriginalClearSkyCondition from an analysis_period
def parse_model_table_file(path, f): for line in f: line, _, comment = line.partition() line = line.strip() if line == : continue yield line
Parse a file as a list of model reactions Yields reactions IDs. Path can be given as a string or a context.
async def get_entry(config, url): previous = config.cache.get( , url, schema_version=SCHEMA_VERSION) if config.cache else None headers = previous.caching if previous else None request = await utils.retry_get(config, url, headers=headers) if not request or not request.success: LOGGER.error("Could not get entry %s: %d", url, request.status if request else -1) return None, previous, False if request.cached: return previous, previous, False current = Entry(request) if config.cache: config.cache.set(, url, current) return current, previous, (not previous or previous.digest != current.digest or previous.status != current.status)
Given an entry URL, return the entry Arguments: config -- the configuration url -- the URL of the entry Returns: 3-tuple of (current, previous, updated)
def _stellingwerf_pdm_worker(task): times, mags, errs, frequency, binsize, minbin = task try: theta = stellingwerf_pdm_theta(times, mags, errs, frequency, binsize=binsize, minbin=minbin) return theta except Exception as e: return npnan
This is a parallel worker for the function below. Parameters ---------- task : tuple This is of the form below:: task[0] = times task[1] = mags task[2] = errs task[3] = frequency task[4] = binsize task[5] = minbin Returns ------- theta_pdm : float The theta value at the specified frequency. nan if the calculation fails.
def _init_params_default(self): Yimp = self.Y.copy() Inan = sp.isnan(Yimp) Yimp[Inan] = Yimp[~Inan].mean() if self.P==1: C = sp.array([[Yimp.var()]]) else: C = sp.cov(Yimp.T) C /= float(self.n_randEffs) for ti in range(self.n_randEffs): self.getTraitCovarFun(ti).setCovariance(C)
Internal method for default parameter initialization
def get_shape(kind=,x=None,y=None,x0=None,y0=None,x1=None,y1=None,span=0,color=,dash=,width=1, fillcolor=None,fill=False,opacity=1,xref=,yref=): if x1 is None: if x0 is None: if x is None: xref= x0=0 x1=1 else: x0=x1=x else: x1=x0 else: x if y1 is None: if y0 is None: if y is None: yref= y0=0 y1=1 else: y0=y1=y else: y1=y0 shape = { :x0, :y0, :x1, :y1, : { :normalize(color), :width, :dash }, :xref, :yref } if kind==: shape[]= elif kind==: shape[]= elif kind==: shape[]= else: raise Exception("Invalid or unkown shape type : {0}".format(kind)) if (fill or fillcolor) and kind!=: fillcolor = color if not fillcolor else fillcolor fillcolor=to_rgba(normalize(fillcolor),opacity) shape[]=fillcolor return shape
Returns a plotly shape Parameters: ----------- kind : string Shape kind line rect circle x : float x values for the shape. This assumes x0=x1 x0 : float x0 value for the shape x1 : float x1 value for the shape y : float y values for the shape. This assumes y0=y1 y0 : float y0 value for the shape y1 : float y1 value for the shape color : string color for shape line dash : string line style solid dash dashdot dot width : int line width fillcolor : string shape fill color fill : bool If True then fill shape If not fillcolor then the line color will be used opacity : float [0,1] opacity of the fill xref : string Sets the x coordinate system which this object refers to 'x' 'paper' 'x2' etc yref : string Sets the y coordinate system which this object refers to 'y' 'paper' 'y2' etc
def saml_provider_absent(name, region=None, key=None, keyid=None, profile=None): ret = {: name, : True, : , : {}} provider = __salt__[](region=region, key=key, keyid=keyid, profile=profile) if not provider: ret[] = .format(name) return ret if __opts__[]: ret[] = .format(name) ret[] = None return ret deleted = __salt__[](name, region=region, key=key, keyid=keyid, profile=profile) if deleted is not False: ret[] = .format(name) ret[][] = name return ret ret[] = False ret[] = .format(name) return ret
.. versionadded:: 2016.11.0 Ensure the SAML provider with the specified name is absent. name (string) The name of the SAML provider. saml_metadata_document (string) The xml document of the SAML provider. region (string) Region to connect to. key (string) Secret key to be used. keyid (string) Access key to be used. profile (dict) A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
def redirect_to_assignment_override_for_group(self, group_id, assignment_id): path = {} data = {} params = {} path["group_id"] = group_id path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/groups/{group_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True)
Redirect to the assignment override for a group. Responds with a redirect to the override for the given group, if any (404 otherwise).
def mouse_click(self, widget, event=None): if event.type == Gdk.EventType.BUTTON_PRESS: pthinfo = self.tree_view.get_path_at_pos(int(event.x), int(event.y)) if not bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) \ and not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and \ event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3: if pthinfo is not None: model, paths = self._tree_selection.get_selected_rows() if pthinfo[0] not in paths: self.tree_view.set_cursor(pthinfo[0]) self._last_path_selection = pthinfo[0] else: pass self.on_right_click_menu() return True if (bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) or \ bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK)) and \ event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3: return True if not bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1: if pthinfo is not None: self._last_path_selection = pthinfo[0] if bool(event.get_state() & Gdk.ModifierType.SHIFT_MASK) and event.get_button()[1] == 1: model, paths = self._tree_selection.get_selected_rows() if paths and pthinfo and pthinfo[0]: if self._last_path_selection[0] <= pthinfo[0][0]: new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]+1)) else: new_row_ids_selected = list(range(self._last_path_selection[0], pthinfo[0][0]-1, -1)) self._tree_selection.unselect_all() for path in new_row_ids_selected: self._tree_selection.select_path(path) return True else: if pthinfo and pthinfo[0]: self._last_path_selection = pthinfo[0] if bool(event.get_state() & Gdk.ModifierType.CONTROL_MASK) and event.get_button()[1] == 1: model, paths = self._tree_selection.get_selected_rows() if paths and pthinfo and pthinfo[0]: if pthinfo[0] in paths: self._tree_selection.unselect_path(pthinfo[0]) else: self._tree_selection.select_path(pthinfo[0]) return True elif pthinfo and pthinfo[0]: self._tree_selection.select_path(pthinfo[0]) return True elif event.type == Gdk.EventType._2BUTTON_PRESS: self._handle_double_click(event)
Implements shift- and control-key handling features for mouse button press events explicit The method is implements a fully defined mouse pattern to use shift- and control-key for multi-selection in a TreeView and a ListStore as model. It avoid problems caused by special renderer types like the text combo renderer by stopping the callback handler to continue with notifications. :param Gtk.Object widget: Object which is the source of the event :param Gtk.Event event: Event generated by mouse click :rtype: bool
def collection(self, collection_id): child_path = self._path + (collection_id,) return self._client.collection(*child_path)
Create a sub-collection underneath the current document. Args: collection_id (str): The sub-collection identifier (sometimes referred to as the "kind"). Returns: ~.firestore_v1beta1.collection.CollectionReference: The child collection.
def shutdown(self, msg, args): self.log.info("Received shutdown from %s", msg.user.username) self._bot.runnable = False return "Shutting down..."
Causes the bot to gracefully shutdown.
def printImportedNames(self): for module in self.listModules(): print("%s:" % module.modname) print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
Produce a report of imported names.
def with_arg_count(self, count): exp = self._get_current_call() exp.expected_arg_count = count return self
Set the last call to expect an exact argument count. I.E.:: >>> auth = Fake('auth').provides('login').with_arg_count(2) >>> auth.login('joe_user') # forgot password Traceback (most recent call last): ... AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
def trigger(cls, streams): items = [stream.contents.items() for stream in set(streams)] union = [kv for kvs in items for kv in kvs] klist = [k for k, _ in union] key_clashes = set([k for k in klist if klist.count(k) > 1]) if key_clashes: clashes = [] dicts = [dict(kvs) for kvs in items] for clash in key_clashes: values = set(d[clash] for d in dicts if clash in d) if len(values) > 1: clashes.append((clash, values)) if clashes: msg = .join([ % (k, v) for k, v in clashes]) print( % msg) subscriber_precedence = defaultdict(list) for stream in streams: stream._on_trigger() for precedence, subscriber in stream._subscribers: subscriber_precedence[precedence].append(subscriber) sorted_subscribers = sorted(subscriber_precedence.items(), key=lambda x: x[0]) subscribers = util.unique_iterator([s for _, subscribers in sorted_subscribers for s in subscribers]) with triggering_streams(streams): for subscriber in subscribers: subscriber(**dict(union)) for stream in streams: with util.disable_constant(stream): if stream.transient: stream.reset()
Given a list of streams, collect all the stream parameters into a dictionary and pass it to the union set of subscribers. Passing multiple streams at once to trigger can be useful when a subscriber may be set multiple times across streams but only needs to be called once.
def mdr_conditional_entropy(X, Y, labels, base=2): return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base)
Calculates the MDR conditional entropy, H(XY|labels), in the given base MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating the entropy of the resulting model's predictions conditional on the provided labels. Parameters ---------- X: array-like (# samples) An array of values corresponding to one feature in the MDR model Y: array-like (# samples) An array of values corresponding to one feature in the MDR model labels: array-like (# samples) The class labels corresponding to features X and Y base: integer (default: 2) The base in which to calculate MDR conditional entropy Returns ---------- mdr_conditional_entropy: float The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
def fragment6(pkt, fragSize): pkt = pkt.copy() if IPv6ExtHdrFragment not in pkt: return [pkt] return [] s = raw(pkt) if len(s) <= fragSize: return [pkt] fragPart = pkt[IPv6ExtHdrFragment].payload tmp = raw(IPv6(src="::1", dst="::1") / fragPart) fragPartLen = len(tmp) - 40 fragPartStr = s[-fragPartLen:] nh = pkt[IPv6ExtHdrFragment].nh fragHeader = pkt[IPv6ExtHdrFragment] del fragHeader.payload unfragPartLen = len(s) - fragPartLen - 8 unfragPart = pkt del pkt[IPv6ExtHdrFragment].underlayer.payload lastFragSize = fragSize - unfragPartLen - 8 innerFragSize = lastFragSize - (lastFragSize % 8) if lastFragSize <= 0 or innerFragSize == 0: warning("Provided fragment size value is too low. " + "Should be more than %d" % (unfragPartLen + 8)) return [unfragPart / fragHeader / fragPart] remain = fragPartStr res = [] fragOffset = 0 fragId = random.randint(0, 0xffffffff) if fragHeader.id is not None: fragId = fragHeader.id fragHeader.m = 1 fragHeader.id = fragId fragHeader.nh = nh while True: if (len(remain) > lastFragSize): tmp = remain[:innerFragSize] remain = remain[innerFragSize:] fragHeader.offset = fragOffset fragOffset += (innerFragSize // 8) if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp) res.append(tempo) else: fragHeader.offset = fragOffset fragHeader.m = 0 if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart / fragHeader / conf.raw_layer(load=remain) res.append(tempo) break return res
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected maximum size of fragments (MTU). The list of packets is returned. If packet does not contain an IPv6ExtHdrFragment class, it is returned in result list.
def add_to_results(self, data, label, results): raise NotImplementedError( .format( self.__class__.__name__ ) )
responsible for updating the running `results` variable with the data from this queryset/serializer combo
def MeetsConditions(knowledge_base, source): source_conditions_met = True os_conditions = ConvertSupportedOSToConditions(source) if os_conditions: source.conditions.append(os_conditions) for condition in source.conditions: source_conditions_met &= artifact_utils.CheckCondition( condition, knowledge_base) return source_conditions_met
Check conditions on the source.
def voxel_count(dset,p=None,positive_only=False,mask=None,ROI=None): all if p: dset = nl.thresh(dset,p,positive_only) else: if positive_only: dset = nl.calc(dset,) count = 0 devnull = open(os.devnull,"w") if mask: cmd = [,,,, ] cmd += [,str(mask),str(dset)] out = subprocess.check_output(cmd,stderr=devnull).split() if len(out)<4: return 0 rois = [int(x.replace(,)) for x in out[1].strip()[1:].split()] counts = [int(x.replace(,)) for x in out[3].strip().split()] count_dict = None if ROI==None: ROI = rois if ROI==: count_dict = {} ROI = rois else: if not isinstance(ROI,list): ROI = [ROI] for r in ROI: if r in rois: roi_count = counts[rois.index(r)] if count_dict!=None: count_dict[r] = roi_count else: count += roi_count else: cmd = [, , , , str(dset)] count = int(subprocess.check_output(cmd,stderr=devnull).strip()) if count_dict: return count_dict return count
returns the number of non-zero voxels :p: threshold the dataset at the given *p*-value, then count :positive_only: only count positive values :mask: count within the given mask :ROI: only use the ROI with the given value (or list of values) within the mask if ROI is 'all' then return the voxel count of each ROI as a dictionary
def poll_integration_information_for_waiting_integration_alerts(): if not polling_integration_alerts: return logger.debug("Polling information for waiting integration alerts") for integration_alert in polling_integration_alerts: configured_integration = integration_alert.configured_integration integration = configured_integration.integration polling_duration = integration.polling_duration if get_current_datetime_utc() - integration_alert.send_time > polling_duration: logger.debug("Polling duration expired for integration alert %s", integration_alert) integration_alert.status = IntegrationAlertStatuses.ERROR_POLLING.name else: integration_alert.status = IntegrationAlertStatuses.IN_POLLING.name poll_integration_alert_data(integration_alert)
poll_integration_information_for_waiting_integration_alerts.
def _get_bucket_region(self, bucket_name): if self._region: return self._region region = if bucket_name in self._region_map: region = self._region_map[bucket_name] else: region = self._get_bucket_location(bucket_name) self._region_map[bucket_name] = region return region
Get region based on the bucket name. :param bucket_name: Bucket name for which region will be fetched. :return: Region of bucket name.
def snapshots(): try: return _("collector").Inspector(cachedir=__opts__[], piddir=os.path.dirname(__opts__[])).db.list() except InspectorSnapshotException as err: raise CommandExecutionError(err) except Exception as err: log.error(_get_error_message(err)) raise Exception(err)
List current description snapshots. CLI Example: .. code-block:: bash salt myminion inspector.snapshots
def htmlFormat(output, pathParts = (), statDict = None, query = None): statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) _htmlRenderDict(pathParts, statDict, output)
Formats as HTML, writing to the given object.
def area_uri(self, area_uuid): if area_uuid not in self.areas: raise UploadException("I donuri'])
Return the URI for an Upload Area :param area_uuid: UUID of area for which we want URI :return: Upload Area URI object :rtype: UploadAreaURI :raises UploadException: if area does not exist
def kill_log_monitor(self, check_alive=True): self._kill_process_type( ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive)
Kill the log monitor. Args: check_alive (bool): Raise an exception if the process was already dead.
def write_temp_file(text=""): with NamedTemporaryFile(mode=, suffix=, delete=False) \ as tempfile: tempfile.write(text) return tempfile.name
Create a new temporary file and write some initial text to it. :param text: the text to write to the temp file :type text: str :returns: the file name of the newly created temp file :rtype: str
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None, set_identifier=None): self._halt_if_already_deleted() return self._add_record(AAAAResourceRecordSet, **values)
Creates an AAAA record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :keyword int weight: *For weighted record sets only*. Among resource record sets that have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. Ranges from 0-255. :keyword str region: *For latency-based record sets*. The Amazon EC2 region where the resource that is specified in this resource record set resides. :keyword str set_identifier: *For weighted and latency resource record sets only*. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. 1-128 chars. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created AAAAResourceRecordSet instance.
def get_graph_by_ids(self, network_ids: List[int]) -> BELGraph: if len(network_ids) == 1: return self.get_graph_by_id(network_ids[0]) log.debug(, network_ids) graphs = self.get_graphs_by_ids(network_ids) log.debug(, network_ids) rv = union(graphs) return rv
Get a combine BEL Graph from a list of network identifiers.
def kremove(self, key, value=None): for item in self: if value is None: item.pop(key, None) else: try: item[key].remove(value) if len(item[key]) == 1: item[key] = item[key].pop() except KeyError: pass except AttributeError: if item[key] == value: item.pop(key)
Removes the given key/value from all elements. If value is not specified, the whole key is removed. If value is not None and the key is present but with a different value, or if the key is not present, silently passes.
def sasl_mechanism(name, secure, preference = 50): def decorator(klass): klass._pyxmpp_sasl_secure = secure klass._pyxmpp_sasl_preference = preference if issubclass(klass, ClientAuthenticator): _register_client_authenticator(klass, name) elif issubclass(klass, ServerAuthenticator): _register_server_authenticator(klass, name) else: raise TypeError("Not a ClientAuthenticator" " or ServerAuthenticator class") return klass return decorator
Class decorator generator for `ClientAuthenticator` or `ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl mechanism registry. :Parameters: - `name`: SASL mechanism name - `secure`: if the mechanims can be considered secure - `True` if it can be used over plain-text channel - `preference`: mechanism preference level (the higher the better) :Types: - `name`: `unicode` - `secure`: `bool` - `preference`: `int`
def show_stories(self, raw=False, limit=None): show_stories = self._get_stories(, limit) if raw: show_stories = [story.raw for story in show_stories] return show_stories
Returns list of item ids of latest Show HN stories Args: limit (int): specifies the number of stories to be returned. raw (bool): Flag to indicate whether to transform all objects into raw json. Returns: `list` object containing ids of Show HN stories.
def burstColumn(self, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn): start = self.cellsPerColumn * column cellsForColumn = [cellIdx for cellIdx in xrange(start, start + self.cellsPerColumn) if cellIdx not in self.deadCells] return self._burstColumn( self.connections, self._random, self.lastUsedIterationForSegment, column, columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn, self.numActivePotentialSynapsesForSegment, self.iteration, self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement, self.permanenceDecrement, self.maxSegmentsPerCell, self.maxSynapsesPerSegment, learn)
Activates all of the cells in an unpredicted active column, chooses a winner cell, and, if learning is turned on, learns on one segment, growing a new segment if necessary. @param column (int) Index of bursting column. @param columnMatchingSegments (iter) Matching segments in this column, or None if there aren't any. @param prevActiveCells (list) Active cells in `t-1`. @param prevWinnerCells (list) Winner cells in `t-1`. @param learn (bool) Whether or not learning is enabled. @return (tuple) Contains: `cells` (iter), `winnerCell` (int),
def get_provider(vm_=None): if vm_ is None: provider = __active_provider_name__ or else: provider = vm_.get(, ) if in provider: prov_comps = provider.split() provider = prov_comps[0] return provider
Extract the provider name from vm
def add_note(self, note): for n in self.selected_tracks: self.tracks[n] + note
Add a note to the selected tracks. Everything container.Track supports in __add__ is accepted.
def getTaskInfo(self, task_id, **kwargs): kwargs[] = True taskinfo = yield self.call(, task_id, **kwargs) task = Task.fromDict(taskinfo) if task: task.connection = self defer.returnValue(task)
Load all information about a task and return a custom Task class. Calls "getTaskInfo" XML-RPC (with request=True to get the full information.) :param task_id: ``int``, for example 12345 :returns: deferred that when fired returns a Task (Munch, dict-like) object representing this Koji task, or none if no task was found.
def attr(*args, **kwargs): ctx = dom_tag._with_contexts[_get_thread_context()] if ctx and ctx[-1]: dicts = args + (kwargs,) for d in dicts: for attr, value in d.items(): ctx[-1].tag.set_attribute(*dom_tag.clean_pair(attr, value)) else: raise ValueError()
Set attributes on the current active tag context
def write_summary_cnts_goobjs(self, goobjs): cnts = self.get_cnts_levels_depths_recs(goobjs) self._write_summary_cnts(cnts)
Write summary of level and depth counts for active GO Terms.
def edit_message_live_location(latitude, longitude, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None, **kwargs): if not chat_id and not message_id and not inline_message_id: raise ValueError("Must specify chat_id and message_id or inline_message_id") if (chat_id and not message_id) or (not chat_id and message_id): raise ValueError("Must specify chat_id and message_id together") params = dict( latitude=latitude, longitude=longitude ) params.update( _clean_params( chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, reply_markup=reply_markup, ) ) return TelegramBotRPCRequest(, params=params, on_result=Message.from_result, **kwargs)
Use this method to edit live location messages sent by the bot or via the bot (for inline bots). A location can be edited until its live_period expires or editing is explicitly disabled by a call to stopMessageLiveLocation. On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned. :param latitude: Latitude of location. :param longitude: Longitude of location. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param message_id: Required if inline_message_id is not specified. Identifier of the sent message :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, instructions to hide keyboard or to force a reply from the user. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type latitude: float :type longitude: float :type message_id: Integer :type inline_message_id: string :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :returns: On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned. :rtype: TelegramBotRPCRequest or Bool
def destroy_iam(app=, env=, **_): session = boto3.Session(profile_name=env) client = session.client() generated = get_details(env=env, app=app) generated_iam = generated.iam() app_details = collections.namedtuple(, generated_iam.keys()) details = app_details(**generated_iam) LOG.debug(, details) resource_action( client, action=, log_format=, GroupName=details.group, UserName=details.user) resource_action(client, action=, log_format=, UserName=details.user) resource_action(client, action=, log_format=, GroupName=details.group) resource_action( client, action=, log_format= , InstanceProfileName=details.profile, RoleName=details.role) resource_action( client, action=, log_format=, InstanceProfileName=details.profile) role_policies = [] try: role_policies = resource_action( client, action=, log_format=, RoleName=details.role)[] except TypeError: LOG.info(, details.role) for policy in role_policies: resource_action( client, action=, log_format= , RoleName=details.role, PolicyName=policy) attached_role_policies = [] try: attached_role_policies = resource_action( client, action=, log_format=, RoleName=details.role)[] except TypeError: LOG.info(, details.role) for policy in attached_role_policies: resource_action( client, action=, log_format= , RoleName=details.role, PolicyArn=policy[]) resource_action(client, action=, log_format=, RoleName=details.role)
Destroy IAM Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment, i.e. dev, stage, prod. Returns: True upon successful completion.
def convertAndMake(converter, handler): def convertAction(loc, value): return handler(loc, converter(value)) return convertAction
Convert with location.
def _offset_to_min(utc_offset): match = re.match(r"^([+-])?(\d\d)(\d\d)$", utc_offset) if not match: raise SaltInvocationError("Invalid UTC offset") sign = -1 if match.group(1) == else 1 hours_offset = int(match.group(2)) minutes_offset = int(match.group(3)) total_offset = sign * (hours_offset * 60 + minutes_offset) return total_offset
Helper function that converts the utc offset string into number of minutes offset. Input is in form "[+-]?HHMM". Example valid inputs are "+0500" "-0300" and "0800". These would return -300, 180, 480 respectively.
def resume(self, trigger_duration=0): if trigger_duration != 0: self._mq.send("t%d" % trigger_duration, True, type=1) else: self._mq.send("r", True, type=1) self._paused = False
Resumes pulse capture after an optional trigger pulse.
def add(self, data, name=None): if name is None: n = len(self.data) while "Series %d"%n in self.data: n += 1 name = "Series %d"%n self.data[name] = data return name
Appends a new column of data to the data source. Args: data (seq) : new data to add name (str, optional) : column name to use. If not supplied, generate a name of the form "Series ####" Returns: str: the column name used
def iterate_presentation_files(path=None, excludes=None, includes=None): if includes is None: includes = [] if excludes is None: excludes = [] includes_pattern = r.join([fnmatch.translate(x) for x in includes]) or r excludes_pattern = r.join([fnmatch.translate(x) for x in excludes]) or r includes_re = re.compile(includes_pattern) excludes_re = re.compile(excludes_pattern) def included(root, name): full_path = os.path.join(root, name) if includes_re.match(full_path): return True return (not specials_re.match(name) and not excludes_re.match(full_path)) for root, dirs, files in os.walk(path): dirs[:] = [d for d in dirs if included(root, d)] files = [f for f in files if included(root, f)] for f in files: yield os.path.relpath(os.path.join(root, f), path)
Iterates the repository presentation files relative to 'path', not including themes. Note that 'includes' take priority.
def prop_symbols(x): "Return a list of all propositional symbols in x." if not isinstance(x, Expr): return [] elif is_prop_symbol(x.op): return [x] else: return list(set(symbol for arg in x.args for symbol in prop_symbols(arg)))
Return a list of all propositional symbols in x.
def getPaths(urlOrPaths): s a directory, it walks the directory and then finds all file paths in it, and ads them too. If its a URL it just adds it to the path. :param urlOrPaths: the url or path to be scanned :return: ``list`` of paths ' if isinstance(urlOrPaths, basestring): urlOrPaths = [urlOrPaths] paths = [] for eachUrlOrPaths in urlOrPaths: if os.path.isdir(eachUrlOrPaths): for root, directories, filenames in walk(eachUrlOrPaths): for filename in filenames: paths.append(os.path.join(root,filename)) else: paths.append(eachUrlOrPaths) return paths
Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's a directory, it walks the directory and then finds all file paths in it, and ads them too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path. :param urlOrPaths: the url or path to be scanned :return: ``list`` of paths
def read_lua_file(dotted_module, path=None, context=None): path = path or DEFAULT_LUA_PATH bits = dotted_module.split() bits[-1] += name = os.path.join(path, *bits) with open(name) as f: data = f.read() if context: data = data.format(context) return data
Load lua script from the stdnet/lib/lua directory
def view_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/views api_path = "/api/v2/views/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/views#show-view
def _keep_this(self, name): for keep_name in self.keep: if name == keep_name: return True return False
Return True if there are to be no modifications to name.
def make_caption(self, caption): if not hasattr(self, "caption"): self(caption=Caption()) return self.caption.empty()(caption)
Adds/Substitutes the table's caption.
def send_location(self, peer: Peer, latitude: float, longitude: float, reply: int=None, on_success: callable=None, reply_markup: botapi.ReplyMarkup=None): pass
Send location to peer. :param peer: Peer to send message to. :param latitude: Latitude of the location. :param longitude: Longitude of the location. :param reply: Message object or message_id to reply to. :param on_success: Callback to call when call is complete. :type reply: int or Message
def tisbod(ref, body, et): ref = stypes.stringToCharP(ref) body = ctypes.c_int(body) et = ctypes.c_double(et) retmatrix = stypes.emptyDoubleMatrix(x=6, y=6) libspice.tisbod_c(ref, body, et, retmatrix) return stypes.cMatrixToNumpy(retmatrix)
Return a 6x6 matrix that transforms states in inertial coordinates to states in body-equator-and-prime-meridian coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tisbod_c.html :param ref: ID of inertial reference frame to transform from. :type ref: str :param body: ID code of body. :type body: int :param et: Epoch of transformation. :type et: float :return: Transformation (state), inertial to prime meridian. :rtype: 6x6-Element Array of floats
def load(obj, env=None, silent=None, key=None): client = get_client(obj) env_list = _get_env_list(obj, env) for env in env_list: path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env]).replace("//", "/") data = client.read(path) if data: data = data.get("data", {}).get("data", {}) try: if data and key: value = parse_conf_data(data.get(key), tomlfy=True) if value: obj.logger.debug( "vault_loader: loading by key: %s:%s (%s:%s)", key, "****", IDENTIFIER, path, ) obj.set(key, value) elif data: obj.logger.debug( "vault_loader: loading: %s (%s:%s)", list(data.keys()), IDENTIFIER, path, ) obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True) except Exception as e: if silent: if hasattr(obj, "logger"): obj.logger.error(str(e)) return False raise
Reads and loads in to "settings" a single key or all keys from vault :param obj: the settings instance :param env: settings env default='DYNACONF' :param silent: if errors should raise :param key: if defined load a single key, else load all in env :return: None
def __copyfile(source, destination): logger.info("copyfile: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.copy(source, destination) return True except Exception as e: logger.error( "copyfile: %s -> %s failed! Error: %s", source, destination, e ) return False
Copy data and mode bits ("cp source destination"). The destination may be a directory. Args: source (str): Source file (file to copy). destination (str): Destination file or directory (where to copy). Returns: bool: True if the operation is successful, False otherwise.
def find_session(self, session_name): if not isinstance(session_name, basestring): raise TypeError("session_name can only be an instance of type basestring") sessions = self._call("findSession", in_p=[session_name]) sessions = [IGuestSession(a) for a in sessions] return sessions
Finds guest sessions by their friendly name and returns an interface array with all found guest sessions. in session_name of type str The session's friendly name to find. Wildcards like ? and * are allowed. return sessions of type :class:`IGuestSession` Array with all guest sessions found matching the name specified.
def get_posix(self, i): index = i.index value = [] try: c = next(i) if c != : raise ValueError() else: value.append(c) c = next(i) if c == : value.append(c) c = next(i) while c != : if c not in _PROPERTY: raise ValueError() if c not in _PROPERTY_STRIP: value.append(c) c = next(i) value.append(c) c = next(i) if c != or not value: raise ValueError() value.append(c) except Exception: i.rewind(i.index - index) value = [] return .join(value) if value else None
Get POSIX.
def pad_sentences(sentences, padding_word="</s>"): sequence_length = max(len(x) for x in sentences) padded_sentences = [] for i, sentence in enumerate(sentences): num_padding = sequence_length - len(sentence) new_sentence = sentence + [padding_word] * num_padding padded_sentences.append(new_sentence) return padded_sentences
Pads all sentences to the same length. The length is defined by the longest sentence. Returns padded sentences.
def EMetaclass(cls): superclass = cls.__bases__ if not issubclass(cls, EObject): sclasslist = list(superclass) if object in superclass: index = sclasslist.index(object) sclasslist.insert(index, EObject) sclasslist.remove(object) else: sclasslist.insert(0, EObject) superclass = tuple(sclasslist) orig_vars = cls.__dict__.copy() slots = orig_vars.get() if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop(, None) orig_vars.pop(, None) return MetaEClass(cls.__name__, superclass, orig_vars)
Class decorator for creating PyEcore metaclass.
def export_dse_home_in_dse_env_sh(self): sstableloader with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh: buf = dse_env_sh.readlines() with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file: for line in buf: out_file.write(line) if line == " out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n")
Due to the way CCM lays out files, separating the repository from the node(s) confs, the `dse-env.sh` script of each node needs to have its DSE_HOME var set and exported. Since DSE 4.5.x, the stock `dse-env.sh` file includes a commented-out place to do exactly this, intended for installers. Basically: read in the file, write it back out and add the two lines. 'sstableloader' is an example of a node script that depends on this, when used in a CCM-built cluster.
def postprocess(self): assert self.postscript envmod.setup() envmod.module(, ) cmd = .format(script=self.postscript) cmd = shlex.split(cmd) rc = sp.call(cmd) assert rc == 0,
Submit a postprocessing script after collation
def check_support_ucannet(cls, hw_info_ex): return cls.check_is_systec(hw_info_ex) and \ cls.check_version_is_equal_or_higher(hw_info_ex.m_dwFwVersionEx, 3, 8)
Checks whether the module supports the usage of USB-CANnetwork driver. :param HardwareInfoEx hw_info_ex: Extended hardware information structure (see method :meth:`get_hardware_info`). :return: True when the module does support the usage of the USB-CANnetwork driver, otherwise False. :rtype: bool
def img2ascii(img_path, ascii_path, ascii_char="*", pad=0): if len(ascii_char) != 1: raise Exception("ascii_char has to be single character.") image = Image.open(img_path).convert("L") matrix = np.array(image) matrix[np.where(matrix >= 128)] = 255 matrix[np.where(matrix < 128)] = 0 lines = list() for vector in matrix: line = list() for i in vector: line.append(" " * pad) if i: line.append(" ") else: line.append(ascii_char) lines.append("".join(line)) with open(ascii_path, "w") as f: f.write("\n".join(lines))
Convert an image to ascii art text. Suppose we have an image like that: .. image:: images/rabbit.png :align: left Put some codes:: >>> from weatherlab.math.img2waveform import img2ascii >>> img2ascii(r"testdata\img2waveform\rabbit.png", ... r"testdata\img2waveform\asciiart.txt", pad=0) Then you will see this in asciiart.txt:: ****** *** *** **** ** ** ********* ** ** *** *** ** * ** ** ** ** ** ** ** * *** * * ** ** ** ** * ** ** ** * ** * * ** ** * ** ** * ** ** * ** ** * * ** ** * ** * ** ** ** ** ** ** * ** ** ** * * ** ** * ** * ** * ** * * ** ** * * ** * * * ** ** * * ** ** * ** ** ** ** ** * ** ** ** * * ** ** * * ** ** * * ** * * ** ** * * ** * ** * ** * ** * ** * ** ** ** ** * ** ** ** * ** ** ** ** ** ** * ** ** ** ** * ** ** ** ** ** ** * ** ******* * ** ******* ** ** ** ** * ** ** *** * **** *** *** *** ** **** ** *** ** *** ** ** ** ** * ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** * ** * ** ** * ** ** * ** * ** ** * ** * ** ** ** ** ** ** ** ** ** ** ** ** *** *** ** * **** **** ** * *** **** ** ** ** ** * ** * ** * * ** ** ** ** * * ** ** ** ** ** ** * ** ** ** ** ** ** ** *** ** ** ** ****** *** *** ****** ** *** * *** *** *** *** *** *** **** **** ******** ******* *** ********** ******** *** ** *** ************ ********** *** * *** ** * **** *********************** *** ** *** ** * ** **** ** ******* * *** ***** *** **** * * ***** ********** * **** * * ** ** *** * * ** * ******************************* * *** * ** ** ***** * *** ********** ** ** ********** *** ** *** ** * ***** ** * ***** ** ** ***** * * ** * ** *** *** ************ ** ****** ** * * ** ** ** * ** *** ** ******* * * ** ** ** **** * ** * ** * **** ** ** *** *** ******* ****** * ** * *** ***** *** ** ***** ** ** ** * * ***** ************************************ * **** * ** *** ** ** *********************************************** *** *** *** ** ****************************************** **** ** ** ** **** ** ** ******************************************** ** * ** ** ****** ** ******************************************** ** * *** ** ***** *********************************************** ** **** * *** ****************************** **************** ********* ** ** *************************************** * * * ***** * ** ** ********************************************** *** * * ** ** *********************************** ******* ** * ** ** ***************************************** *** ** * *** ** * ********************************************** ** ** ****** ************************************************ ** *** **** *********************************************** ******** ** *********************************************** **** *** ** ******************************************* ** *** ** ***** ****** * * * * * ******** *** ** ** *** *** * * **** **** **** * ** ** * *** ** *** **** * * ** **** * *** ******** * *** ***** ***** ** ** ** ** *** ** *** ***** ******* * * ** * ** ******** *************** * ******************* ****************************** *** *** ********* ** ** * ** ** * ** ** * ** ** * ** ** * ** ** ** ** ** ****** * ** ********* ************************************* ********** :param img_path: the image file path :type img_path: str :param ascii_path: the output ascii text file path :type ascii_path: str :param pad: how many space been filled in between two pixels :type pad: int
def verifies( self, hash, signature ): G = self.generator n = G.order() r = signature.r s = signature.s if r < 1 or r > n-1: return False if s < 1 or s > n-1: return False c = numbertheory.inverse_mod( s, n ) u1 = ( hash * c ) % n u2 = ( r * c ) % n xy = u1 * G + u2 * self.point v = xy.x() % n return v == r
Verify that signature is a valid signature of hash. Return True if the signature is valid.
def _detect_start_end(true_values): neg = zeros((1), dtype=) int_values = asarray(concatenate((neg, true_values[:-1], neg)), dtype=) cross_threshold = diff(int_values) event_starts = where(cross_threshold == 1)[0] event_ends = where(cross_threshold == -1)[0] if len(event_starts): events = vstack((event_starts, event_ends)).T else: events = None return events
From ndarray of bool values, return intervals of True values. Parameters ---------- true_values : ndarray (dtype='bool') array with bool values Returns ------- ndarray (dtype='int') N x 2 matrix with starting and ending times.
def isPythonFile(filename): if filename.endswith(): return True if filename.endswith("~"): return False max_bytes = 128 try: with open(filename, ) as f: text = f.read(max_bytes) if not text: return False except IOError: return False first_line = text.splitlines()[0] return PYTHON_SHEBANG_REGEX.match(first_line)
Return True if filename points to a Python file.
def reftrack_status_data(rt, role): status = rt.status() if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: if status: return status else: return "Not in scene!"
Return the data for the status :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the status :rtype: depending on role :raises: None
def needs_encode(obj): 1211t round trip need encoding:: >>> needs_encode(tuple()) True >>> needs_encode(set()) True >>> needs_encode([1, [set()]]) True >>> needs_encode({: {: set()}}) True Mongo rejects dicts with non-string keys so they need encoding too:: >>> needs_encode({1: 2}) True >>> needs_encode({: {None: True}}) True ' obtype = type(obj) if obtype in atomic_types: return False if obtype is list: return any(needs_encode(i) for i in obj) if obtype is dict: return any(type(k) not in valid_key_types or needs_encode(v) for (k, v) in obj.items()) return True
>>> from re import compile >>> atomics = (True, 1, 1.0, '', None, compile(''), datetime.now(), b'') >>> any(needs_encode(i) for i in atomics) False >>> needs_encode([1, 2, 3]) False >>> needs_encode([]) False >>> needs_encode([1, [2, 3]]) False >>> needs_encode({}) False >>> needs_encode({'1': {'2': 3}}) False >>> needs_encode({'1': [2]}) False >>> needs_encode(b'1') False Objects that don't round trip need encoding:: >>> needs_encode(tuple()) True >>> needs_encode(set()) True >>> needs_encode([1, [set()]]) True >>> needs_encode({'1': {'2': set()}}) True Mongo rejects dicts with non-string keys so they need encoding too:: >>> needs_encode({1: 2}) True >>> needs_encode({'1': {None: True}}) True