code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def all(cls): account = cls.info() creditusage = cls.creditusage() if not creditusage: return account left = account['credits'] / creditusage years, hours = divmod(left, 365 * 24) months, hours = divmod(hours, 31 * 24) days, hours = divmod(hours, 24) account.update({'credit_usage': creditusage, 'left': (years, months, days, hours)}) return account
Get all informations about this account
def _compute_site_scaling(self, C, vs30): site_term = np.zeros(len(vs30), dtype=float) site_term[vs30 < 760.0] = C["e"] return site_term
Returns the site scaling term as a simple coefficient
def fit(self, data, labels, **kwargs): self._som.train(data, **kwargs) bmus, q_error, t_error = self.bmus_with_errors(data) self.quant_error = q_error self.topog_error = t_error self._bmus = bmus self._calibrate(data, labels)
\ Training the SOM on the the data and calibrate itself. After the training, `self.quant_error` and `self.topog_error` are respectively set. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param labels: the labels associated with data :type labels: iterable :param \**kwargs: optional parameters for :meth:`train`
def handle(self, handler_name, request, suffix=''): return self.runtime.handle(self, handler_name, request, suffix)
Handle `request` with this block's runtime.
def should_obfuscate_filename(self): for pattern in self.args.hide_file_names: try: compiled = re.compile(pattern, re.IGNORECASE) if compiled.search(self.entity): return True except re.error as ex: log.warning(u('Regex error ({msg}) for hide_file_names pattern: {pattern}').format( msg=u(ex), pattern=u(pattern), )) return False
Returns True if hide_file_names is true or the entity file path matches one in the list of obfuscated file paths.
async def _make_connection(self): return await aioredis.create_redis( 'redis://{}:{}'.format( self._redis_params.get('host', 'localhost'), self._redis_params.get('port', 6379) ), db=int(self._redis_params.get('db', 1)) )
Construct a connection to Redis.
def sg_seek_streamer(self, index, force, value): force = bool(force) err = self.sensor_graph.acknowledge_streamer(index, value, force) return [err]
Ackowledge a streamer.
def _bind_method(self, name, unconditionally=False): exists = self.run_func('exist', name)['result'] in [2, 3, 5] if not unconditionally and not exists: raise AttributeError("'Matlab' object has no attribute '%s'" % name) method_instance = MatlabFunction(weakref.ref(self), name) method_instance.__name__ = name if sys.version.startswith('3'): method = types.MethodType(method_instance, weakref.ref(self)) else: method = types.MethodType(method_instance, weakref.ref(self), _Session) setattr(self, name, method) return getattr(self, name)
Generate a Matlab function and bind it to the instance This is where the magic happens. When an unknown attribute of the Matlab class is requested, it is assumed to be a call to a Matlab function, and is generated and bound to the instance. This works because getattr() falls back to __getattr__ only if no attributes of the requested name can be found through normal routes (__getattribute__, __dict__, class tree). bind_method first checks whether the requested name is a callable Matlab function before generating a binding. Parameters ---------- name : str The name of the Matlab function to call e.g. 'sqrt', 'sum', 'svd', etc unconditionally : bool, optional Bind the method without performing checks. Used to bootstrap methods that are required and know to exist Returns ------- MatlabFunction A reference to a newly bound MatlabFunction instance if the requested name is determined to be a callable function Raises ------ AttributeError: if the requested name is not a callable Matlab function
def load_trajectory(name, format=None, skip=1): df = datafile(name, format=format) ret = {} t, coords = df.read('trajectory', skip=skip) boxes = df.read('boxes') ret['t'] = t ret['coords'] = coords ret['boxes'] = boxes return ret
Read a trajectory from a file. .. seealso:: `chemlab.io.datafile`
def _serve_forever_wrapper(self, _srv, poll_interval=0.1): self.logger.info('Opening tunnel: {0} <> {1}'.format( address_to_str(_srv.local_address), address_to_str(_srv.remote_address)) ) _srv.serve_forever(poll_interval) self.logger.info('Tunnel: {0} <> {1} released'.format( address_to_str(_srv.local_address), address_to_str(_srv.remote_address)) )
Wrapper for the server created for a SSH forward
def ctrl_transfer(self, dev_handle, bmRequestType, bRequest, wValue, wIndex, data, timeout): r _not_implemented(self.ctrl_transfer)
r"""Perform a control transfer on the endpoint 0. The direction of the transfer is inferred from the bmRequestType field of the setup packet. dev_handle is the value returned by the open_device() method. bmRequestType, bRequest, wValue and wIndex are the same fields of the setup packet. data is an array object, for OUT requests it contains the bytes to transmit in the data stage and for IN requests it is the buffer to hold the data read. The number of bytes requested to transmit or receive is equal to the length of the array times the data.itemsize field. The timeout parameter specifies a time limit to the operation in miliseconds. Return the number of bytes written (for OUT transfers) or the data read (for IN transfers), as an array.array object.
def get_subscriptions(self, limit=100, offset=0, params={}): url = self.SUBSCRIPTIONS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
Get all subscriptions
def read_hdf5_array(source, path=None, array_type=Array): dataset = io_hdf5.find_dataset(source, path=path) attrs = dict(dataset.attrs) try: attrs['channel'] = _unpickle_channel(attrs['channel']) except KeyError: pass for key in attrs: if isinstance(attrs[key], bytes): attrs[key] = attrs[key].decode('utf-8') return array_type(dataset[()], **attrs)
Read an `Array` from the given HDF5 object Parameters ---------- source : `str`, :class:`h5py.HLObject` path to HDF file on disk, or open `h5py.HLObject`. path : `str` path in HDF hierarchy of dataset. array_type : `type` desired return type
def request_done(self, request): if self._requests is None: return assert request == self._requests[0], "Unexpected request done" del self._requests[0] if request.persistent: if self._requests: self._requests[0].activate() else: self.transport.loseConnection()
Called by the active request when it is done writing
def symbolize(flt: float) -> sympy.Symbol: try: ratio = rationalize(flt) res = sympy.simplify(ratio) except ValueError: ratio = rationalize(flt/np.pi) res = sympy.simplify(ratio) * sympy.pi return res
Attempt to convert a real number into a simpler symbolic representation. Returns: A sympy Symbol. (Convert to string with str(sym) or to latex with sympy.latex(sym) Raises: ValueError: If cannot simplify float
def start_wsgi_server(port, addr='', registry=REGISTRY): app = make_wsgi_app(registry) httpd = make_server(addr, port, app, handler_class=_SilentHandler) t = threading.Thread(target=httpd.serve_forever) t.daemon = True t.start()
Starts a WSGI server for prometheus metrics as a daemon thread.
def decode_html(html): if isinstance(html, unicode): return html match = CHARSET_META_TAG_PATTERN.search(html) if match: declared_encoding = match.group(1).decode("ASCII") with ignored(LookupError): return html.decode(declared_encoding, "ignore") with ignored(UnicodeDecodeError): return html.decode("utf8") text = TAG_MARK_PATTERN.sub(to_bytes(" "), html) diff = text.decode("utf8", "ignore").encode("utf8") sizes = len(diff), len(text) if abs(len(text) - len(diff)) < max(sizes) * 0.01: return html.decode("utf8", "ignore") encoding = "utf8" encoding_detector = chardet.detect(text) if encoding_detector["encoding"]: encoding = encoding_detector["encoding"] return html.decode(encoding, "ignore")
Converts bytes stream containing an HTML page into Unicode. Tries to guess character encoding from meta tag of by "chardet" library.
def consecutive_ones_property(sets, universe=None): if universe is None: universe = set() for S in sets: universe |= set(S) tree = PQ_tree(universe) try: for S in sets: tree.reduce(S) return tree.border() except IsNotC1P: return None
Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem.
def translate_labels(val): if not isinstance(val, dict): if not isinstance(val, list): val = split(val) new_val = {} for item in val: if isinstance(item, dict): if len(item) != 1: raise SaltInvocationError('Invalid label(s)') key = next(iter(item)) val = item[key] else: try: key, val = split(item, '=', 1) except ValueError: key = item val = '' if not isinstance(key, six.string_types): key = six.text_type(key) if not isinstance(val, six.string_types): val = six.text_type(val) new_val[key] = val val = new_val return val
Can either be a list of label names, or a list of name=value pairs. The API can accept either a list of label names or a dictionary mapping names to values, so the value we translate will be different depending on the input.
def filter(args): p = OptionParser(filter.__doc__) p.add_option("--less", default=False, action="store_true", help="filter the sizes < certain cutoff [default: >=]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, cutoff = args try: cutoff = int(cutoff) except ValueError: sys.exit(not p.print_help()) f = Fasta(fastafile, lazy=True) fw = must_open(opts.outfile, "w") for name, rec in f.iteritems_ordered(): if opts.less and len(rec) >= cutoff: continue if (not opts.less) and len(rec) < cutoff: continue SeqIO.write([rec], fw, "fasta") fw.flush() return fw.name
%prog filter fastafile 100 Filter the FASTA file to contain records with size >= or <= certain cutoff.
def get_as_nullable_parameters(self, key): value = self.get_as_nullable_map(key) return Parameters(value) if value != None else None
Converts map element into an Parameters or returns null if conversion is not possible. :param key: a key of element to get. :return: Parameters value of the element or null if conversion is not supported.
def blast(args): p = OptionParser(blast.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) btabfile, = args btab = Btab(btabfile) for b in btab: print(b.blastline)
%prog blast btabfile Convert to BLAST -m8 format.
def to_mongo(self): d = copy.copy(self._fields) for k, v in self._slices.items(): d[k] = {'$slice': v} return d
Translate projection to MongoDB query form. :return: Dictionary to put into a MongoDB JSON query :rtype: dict
def isfile(self, path, follow_symlinks=True): return self._is_of_type(path, S_IFREG, follow_symlinks)
Determine if path identifies a regular file. Args: path: Path to filesystem object. Returns: `True` if path points to a regular file (following symlinks). Raises: TypeError: if path is None.
def fetch(self, raise_exc=True): self._request(GET, raise_exc=raise_exc) self.fetched = True return self.state.copy()
Performs a GET request to the uri of this navigator
def update_snapshot_schedule(cls, cluster_id_label, s3_location=None, frequency_unit=None, frequency_num=None, status=None): conn = Qubole.agent(version=Cluster.api_version) data = {} if s3_location is not None: data["s3_location"] = s3_location if frequency_unit is not None: data["frequency_unit"] = frequency_unit if frequency_num is not None: data["frequency_num"] = frequency_num if status is not None: data["status"] = status return conn.put(cls.element_path(cluster_id_label) + "/snapshot_schedule", data)
Update for snapshot schedule
def migrate(belstr: str) -> str: bo.ast = bel.lang.partialparse.get_ast_obj(belstr, "2.0.0") return migrate_ast(bo.ast).to_string()
Migrate BEL 1 to 2.0.0 Args: bel: BEL 1 Returns: bel: BEL 2
def atanh(x): if isinstance(x, UncertainFunction): mcpts = np.arctanh(x._mcpts) return UncertainFunction(mcpts) else: return np.arctanh(x)
Inverse hyperbolic tangent
def prob(self, pw): tokens = self.pcfgtokensofw(pw) S, tokens = tokens[0], tokens[1:] l = len(tokens) assert l % 2 == 0, "Expecting even number of tokens!. got {}".format(tokens) p = float(self._T.get(S, 0.0)) / sum(v for k, v in self._T.items('__S__')) for i, t in enumerate(tokens): f = self._T.get(t, 0.0) if f == 0: return 0.0 if i < l / 2: p /= f else: p *= f return p
Return the probability of pw under the Weir PCFG model. P[{S -> L2D1Y3, L2 -> 'ab', D1 -> '1', Y3 -> '!@#'}]
def connect(self, protocolFactory): deferred = self._startProcess() deferred.addCallback(self._connectRelay, protocolFactory) deferred.addCallback(self._startRelay) return deferred
Starts a process and connect a protocol to it.
def absolute_magnitude(distance_modulus,g,r,prob=None): V = g - 0.487*(g - r) - 0.0249 flux = np.sum(10**(-(V-distance_modulus)/2.5)) Mv = -2.5*np.log10(flux) return Mv
Calculate the absolute magnitude from a set of bands
def _dict_to_obj(self, d): if JsonEncoder.TYPE_ID not in d: return d type_name = d.pop(JsonEncoder.TYPE_ID) if type_name in _TYPE_NAME_TO_DECODER: decoder = _TYPE_NAME_TO_DECODER[type_name] return decoder(d) else: raise TypeError("Invalid type %s.", type_name)
Converts a dictionary of json object to a Python object.
def devserver_cmd(argv=sys.argv[1:]): arguments = docopt(devserver_cmd.__doc__, argv=argv) initialize_config() app.run( host=arguments['--host'], port=int(arguments['--port']), debug=int(arguments['--debug']), )
\ Serve the web API for development. Usage: pld-devserver [options] Options: -h --help Show this screen. --host=<host> The host to use [default: 0.0.0.0]. --port=<port> The port to use [default: 5000]. --debug=<debug> Whether or not to use debug mode [default: 0].
def canonical_new_peer_list( self, peers_to_add ): new_peers = list(set(self.new_peers + peers_to_add)) random.shuffle( new_peers ) tmp = [] for peer in new_peers: tmp.append( self.canonical_peer(peer) ) new_peers = tmp if self.my_hostport in new_peers: new_peers.remove(self.my_hostport) return new_peers
Make a list of canonical new peers, using the self.new_peers and the given peers to add Return a shuffled list of canonicalized host:port strings.
def qteUpdateLogSlot(self): log = self.logHandler.fetch(start=self.qteLogCnt) self.qteLogCnt += len(log) if not len(log): return log_pruned = [] last_entry = log[0] num_rep = -1 for cur_entry in log: if last_entry.msg == cur_entry.msg: num_rep += 1 else: log_pruned.append([last_entry, num_rep]) num_rep = 0 last_entry = cur_entry log_pruned.append([cur_entry, num_rep]) log_formatted = "" for cur_entry in log_pruned: log_formatted += self.qteFormatMessage(cur_entry[0], cur_entry[1]) log_formatted + '\n' self.qteText.insertHtml(log_formatted) self.qteMoveToEndOfBuffer() if self.qteAutoActivate: self.qteAutoActivate = False self.qteMain.qteMakeAppletActive(self)
Fetch and display the next batch of log messages.
def add_alias(agent, prefix, alias): return _broadcast(agent, AddMappingManager, RecordType.record_CNAME, prefix, alias)
Adds an alias mapping with a contract. It has high latency but gives some kind of guarantee.
def info_hash_base32(self): if getattr(self, '_data', None): return b32encode(sha1(bencode(self._data['info'])).digest()) else: raise exceptions.TorrentNotGeneratedException
Returns the base32 info hash of the torrent. Useful for generating magnet links. .. note:: ``generate()`` must be called first.
def write_content(self, content, destination): directory = os.path.dirname(destination) if directory and not os.path.exists(directory): os.makedirs(directory) with io.open(destination, 'w', encoding='utf-8') as f: f.write(content) return destination
Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written.
def all_errors(self, joiner="; "): parts = [] for pname, errs in self.errors.items(): for err in errs: parts.append("{0}: {1}".format(pname, err)) return joiner.join(parts)
Returns a string representation of all errors recorded for the instance.
def safe_type(self, data, tree): if not isinstance(data, list): name = self.__class__.__name__ msg = "did not pass validation against callable: %s" % name reason = 'expected a list but got %s' % safe_repr(data) raise Invalid(self.schema, tree, reason=reason, pair='value', msg=msg)
Make sure that the incoming data complies with the class type we are expecting it to be. In this case, classes that inherit from this base class expect data to be of type ``list``.
def read_git_commit_timestamp_for_file(filepath, repo_path=None): repo = git.repo.base.Repo(path=repo_path, search_parent_directories=True) head_commit = repo.head.commit for commit in head_commit.iter_parents(filepath): return commit.committed_datetime raise IOError('File {} not found'.format(filepath))
Obtain the timestamp for the most recent commit to a given file in a Git repository. Parameters ---------- filepath : `str` Repository-relative path for a file. repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of a the most recent commit to the given file. Raises ------ IOError Raised if the ``filepath`` does not exist in the Git repository.
def compute_K_analytical(self, spacing): assert isinstance(spacing, Number) K = geometric_factors.compute_K_analytical(self.data, spacing) self.data = geometric_factors.apply_K(self.data, K) fix_sign_with_K(self.data)
Assuming an equal electrode spacing, compute the K-factor over a homogeneous half-space. For more complex grids, please refer to the module: reda.utils.geometric_factors Parameters ---------- spacing: float Electrode spacing
def sync_files(self, dataset_key): try: self._datasets_api.sync(*(parse_dataset_key(dataset_key))) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Trigger synchronization process to update all dataset files linked to source URLs. :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.sync_files('username/test-dataset') # doctest: +SKIP
def detect(self, text): t = text.encode("utf-8") reliable, index, top_3_choices = cld2.detect(t, bestEffort=False) if not reliable: self.reliable = False reliable, index, top_3_choices = cld2.detect(t, bestEffort=True) if not self.quiet: if not reliable: raise UnknownLanguage("Try passing a longer snippet of text") else: logger.warning("Detector is not able to detect the language reliably.") self.languages = [Language(x) for x in top_3_choices] self.language = self.languages[0] return self.language
Decide which language is used to write the text. The method tries first to detect the language with high reliability. If that is not possible, the method switches to best effort strategy. Args: text (string): A snippet of text, the longer it is the more reliable we can detect the language used to write the text.
def keep_only_update_source_in_field(field, root, head, update): update_sources = {source.lower() for source in get_value(thaw(update), '.'.join([field, 'source']), [])} if len(update_sources) != 1: return root, head, update source = update_sources.pop() if field in root: root = root.set(field, remove_elements_with_source(source, root[field])) if field in head: head = head.set(field, remove_elements_with_source(source, head[field])) return root, head, update
Remove elements from root and head where ``source`` matches the update. This is useful if the update needs to overwrite all elements with the same source. .. note:: If the update doesn't contain exactly one source in ``field``, the records are returned with no modifications. Args: field (str): the field to filter out. root (pmap): the root record, whose ``field`` will be cleaned. head (pmap): the head record, whose ``field`` will be cleaned. update (pmap): the update record, from which the ``source`` is read. Returns: tuple: ``(root, head, update)`` with some elements filtered out from ``root`` and ``head``.
def results(self, *args, **kwargs): def worker(): kwargs['page'] = 1 while True: response = self.client(*args, **kwargs) if isinstance(response, list): yield response break elif _is_page(response): yield response['results'] if response['next']: kwargs['page'] += 1 else: break else: raise NoResultsError(response) return itertools.chain.from_iterable(worker())
Return an iterator with all pages of data. Return NoResultsError with response if there is unexpected data.
def ContainsNone(self, *values): self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_NONE') return self._query_builder
Sets the type of the WHERE clause as "contains none". Args: *values: The values to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
def get_module_attr(module_filename, module_attr, namespace=None): if namespace is None: namespace = {} module_filename = os.path.abspath(module_filename) namespace['__file__'] = module_filename module_dir = os.path.dirname(module_filename) old_cwd = os.getcwd() old_sys_path = sys.path[:] try: os.chdir(module_dir) sys.path.append(module_dir) with open(module_filename, 'r') as mf: exec(compile(mf.read(), module_filename, 'exec'), namespace) return namespace[module_attr] finally: os.chdir(old_cwd) sys.path = old_sys_path
Get an attribute from a module. This uses exec to load the module with a private namespace, and then plucks and returns the given attribute from that module's namespace. Note that, while this method doesn't have any explicit unit tests, it is tested implicitly by the doctor's own documentation. The Sphinx build process will fail to generate docs if this does not work. :param str module_filename: Path to the module to execute (e.g. "../src/app.py"). :param str module_attr: Attribute to pluck from the module's namespace. (e.g. "app"). :param dict namespace: Optional namespace. If one is not passed, an empty dict will be used instead. Note that this function mutates the passed namespace, so you can inspect a passed dict after calling this method to see how the module changed it. :returns: The attribute from the module. :raises KeyError: if the module doesn't have the given attribute.
def __identify_user(self, username, csrf): data = { "username": username, "csrf": csrf, "apiClient": "WEB", "bindDevice": "false", "skipLinkAccount": "false", "redirectTo": "", "skipFirstUse": "", "referrerId": "", } r = self.post("/login/identifyUser", data) if r.status_code == requests.codes.ok: result = r.json() new_csrf = getSpHeaderValue(result, CSRF_KEY) auth_level = getSpHeaderValue(result, AUTH_LEVEL_KEY) return (new_csrf, auth_level) return (None, None)
Returns reusable CSRF code and the auth level as a 2-tuple
def _import_plugins(self): if self.detected: return self.scanLock.acquire() if self.detected: return try: import_apps_submodule("content_plugins") self.detected = True finally: self.scanLock.release()
Internal function, ensure all plugin packages are imported.
def _vertex_net_cost(vertex, v2n, placements, has_wrap_around_links, machine): total_cost = 0.0 for net in v2n[vertex]: total_cost += _net_cost(net, placements, has_wrap_around_links, machine) return total_cost
Get the total cost of the nets connected to the given vertex. Parameters ---------- vertex The vertex whose nets we're interested in. v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...} placements : {vertex: (x, y), ...} has_wrap_around_links : bool machine : :py:class:`rig.place_and_route.Machine` Returns ------- float
def abort_now (): if os.name == 'posix': import signal os.kill(os.getpid(), signal.SIGTERM) time.sleep(1) os.kill(os.getpid(), signal.SIGKILL) elif os.name == 'nt': os.abort() else: os._exit(3)
Force exit of current process without cleanup.
def warning(message): import lltk.config as config if config['warnings']: try: from termcolor import colored except ImportError: def colored(message, color): return message print colored('@LLTK-WARNING: ' + message, 'red')
Prints a message if warning mode is enabled.
def search(self, id_list: List, negated_classes: List, limit: Optional[int], method: Optional) -> List[SimResult]: raise NotImplementedError
Given an input list of classes or individuals, provides a ranking of similar profiles
def mimf_ferrario(mi): mf=-0.00012336*mi**6+0.003160*mi**5-0.02960*mi**4+\ 0.12350*mi**3-0.21550*mi**2+0.19022*mi+0.46575 return mf
Curvature MiMf from Ferrario etal. 2005MNRAS.361.1131.
def GetOptions(self): if self._options: return self._options from google.protobuf import descriptor_pb2 try: options_class = getattr(descriptor_pb2, self._options_class_name) except AttributeError: raise RuntimeError('Unknown options class name %s!' % (self._options_class_name)) self._options = options_class() return self._options
Retrieves descriptor options. This method returns the options set or creates the default options for the descriptor.
def actualize_source_type (self, sources, prop_set): assert is_iterable_typed(sources, VirtualTarget) assert isinstance(prop_set, property_set.PropertySet) result = [] for i in sources: scanner = None if i.type (): scanner = b2.build.type.get_scanner (i.type (), prop_set) r = i.actualize (scanner) result.append (r) return result
Helper for 'actualize_sources'. For each passed source, actualizes it with the appropriate scanner. Returns the actualized virtual targets.
def patch_wave_header(body): length = len(body) padded = length + length % 2 total = WAVE_HEADER_LENGTH + padded header = copy.copy(WAVE_HEADER) header[4:8] = bytearray(struct.pack('<I', total)) header += bytearray(struct.pack('<I', length)) data = header + body if length != padded: data = data + bytearray([0]) return data
Patch header to the given wave body. :param body: the wave content body, it should be bytearray.
def mifare_classic_read_block(self, block_number): response = self.call_function(PN532_COMMAND_INDATAEXCHANGE, params=[0x01, MIFARE_CMD_READ, block_number & 0xFF], response_length=17) if response[0] != 0x00: return None return response[1:]
Read a block of data from the card. Block number should be the block to read. If the block is successfully read a bytearray of length 16 with data starting at the specified block will be returned. If the block is not read then None will be returned.
def pull_full_properties(self): full_properties = self.manager.session.get(self._uri) self._properties = dict(full_properties) self._properties_timestamp = int(time.time()) self._full_properties = True
Retrieve the full set of resource properties and cache them in this object. Authorization requirements: * Object-access permission to this resource. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def thread(self, value: str): if value is not None and not isinstance(value, str): raise TypeError("'thread' MUST be a string") self._thread = value
Set thread id of the message Args: value (str): the thread id
def cli_login(self, password='', captcha='', email_code='', twofactor_code='', language='english'): while True: try: return self.login(password, captcha, email_code, twofactor_code, language) except (LoginIncorrect, CaptchaRequired) as exp: email_code = twofactor_code = '' if isinstance(exp, LoginIncorrect): prompt = ("Enter password for %s: " if not password else "Invalid password for %s. Enter password: ") password = getpass(prompt % repr(self.username)) if isinstance(exp, CaptchaRequired): prompt = "Solve CAPTCHA at %s\nCAPTCHA code: " % self.captcha_url captcha = _cli_input(prompt) else: captcha = '' except EmailCodeRequired: prompt = ("Enter email code: " if not email_code else "Incorrect code. Enter email code: ") email_code, twofactor_code = _cli_input(prompt), '' except TwoFactorCodeRequired: prompt = ("Enter 2FA code: " if not twofactor_code else "Incorrect code. Enter 2FA code: ") email_code, twofactor_code = '', _cli_input(prompt)
Generates CLI prompts to perform the entire login process :param password: password, if it wasn't provided on instance init :type password: :class:`str` :param captcha: text reponse for captcha challenge :type captcha: :class:`str` :param email_code: email code for steam guard :type email_code: :class:`str` :param twofactor_code: 2FA code for steam guard :type twofactor_code: :class:`str` :param language: select language for steam web pages (sets language cookie) :type language: :class:`str` :return: a session on success and :class:`None` otherwise :rtype: :class:`requests.Session`, :class:`None` .. code:: python In [3]: user.cli_login() Enter password for 'steamuser': Solve CAPTCHA at https://steamcommunity.com/login/rendercaptcha/?gid=1111111111111111111 CAPTCHA code: 123456 Invalid password for 'steamuser'. Enter password: Solve CAPTCHA at https://steamcommunity.com/login/rendercaptcha/?gid=2222222222222222222 CAPTCHA code: abcdef Enter 2FA code: AB123 Out[3]: <requests.sessions.Session at 0x6fffe56bef0>
def get_installed_extension(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): return installed_extensions(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas).get(name, None)
Get info about an installed postgresql extension CLI Example: .. code-block:: bash salt '*' postgres.get_installed_extension plpgsql
def resource_filename(package_or_requirement, resource_name): if pkg_resources.resource_exists(package_or_requirement, resource_name): return pkg_resources.resource_filename(package_or_requirement, resource_name) path = _search_in_share_folders(package_or_requirement, resource_name) if path: return path raise RuntimeError("Resource {} not found in {}".format(package_or_requirement, resource_name))
Similar to pkg_resources.resource_filename but if the resource it not found via pkg_resources it also looks in a predefined list of paths in order to find the resource :param package_or_requirement: the module in which the resource resides :param resource_name: the name of the resource :return: the path to the resource :rtype: str
def _CreateDynamicDisplayAdSettings(media_service, opener): image = _CreateImage(media_service, opener, 'https://goo.gl/dEvQeF') logo = { 'type': 'IMAGE', 'mediaId': image['mediaId'], 'xsi_type': 'Image' } dynamic_settings = { 'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!', 'xsi_type': 'DynamicSettings', } return dynamic_settings
Creates settings for dynamic display ad. Args: media_service: a SudsServiceProxy instance for AdWords's MediaService. opener: an OpenerDirector instance. Returns: The dynamic display ad settings.
def get_lib_from(search_directory, lib_extension='.so'): for root, dirs, files in walk(search_directory): for file in files: if file.endswith(lib_extension): print('get_lib_from: {}\n\t- {}'.format( search_directory, join(root, file))) return join(root, file) return None
Scan directories recursively until find any file with the given extension. The default extension to search is ``.so``.
def execute(self, sql, *args, **kwargs): try: self.cursor.execute(sql, *args) except self.sqlite3.InterfaceError, msg: raise self.sqlite3.InterfaceError(unicode(msg) + '\nTry converting types or pickling.') rows = self.cursor.fetchall() self.__commit_if_necessary(kwargs) if None == self.cursor.description: return None else: colnames = [d[0].decode('utf-8') for d in self.cursor.description] rawdata = [OrderedDict(zip(colnames,row)) for row in rows] return rawdata
Run raw SQL on the database, and receive relaxing output. This is sort of the foundational method that most of the others build on.
def set_direct(self, address_value_dict): with self._lock: for address, value in address_value_dict.items(): self._validate_write(address) if address in self._state: self._state[address].set_result(result=value) else: fut = _ContextFuture(address=address) self._state[address] = fut fut.set_result(result=value)
Called in the context manager's set method to either overwrite the value for an address, or create a new future and immediately set a value in the future. Args: address_value_dict (dict of str:bytes): The unique full addresses with bytes to set at that address. Raises: AuthorizationException
def reorder(self, dst_order, arr, src_order=None): if dst_order is None: dst_order = self.viewer.rgb_order if src_order is None: src_order = self.rgb_order if src_order != dst_order: arr = trcalc.reorder_image(dst_order, arr, src_order) return arr
Reorder the output array to match that needed by the viewer.
def execute_once(self, string): for rule in self.rules: if rule[0] in string: pos = string.find(rule[0]) self.last_rule = rule return string[:pos] + rule[1] + string[pos+len(rule[0]):] self.last_rule = None return string
Execute only one rule.
def exit(self): self.client.flush() self.client.close() super(Export, self).exit()
Close the Kafka export module.
def resolve(self, notes=None): self.set_status(self._redmine.ISSUE_STATUS_ID_RESOLVED, notes=notes)
Save all changes and resolve this issue
def force_directed(self,defaultEdgeWeight=None,defaultNodeMass=None,\ defaultSpringCoefficient=None,defaultSpringLength=None,EdgeAttribute=None,\ isDeterministic=None,maxWeightCutoff=None,minWeightCutoff=None,network=None,\ NodeAttribute=None,nodeList=None,numIterations=None,singlePartition=None,\ Type=None,verbose=None): network=check_network(self,network,verbose=verbose) PARAMS=set_param(['defaultEdgeWeight','defaultNodeMass','defaultSpringCoefficient',\ 'defaultSpringLength','EdgeAttribute','isDeterministic','maxWeightCutoff',\ 'minWeightCutoff','network','NodeAttribute','nodeList','numIterations',\ 'singlePartition','Type'],[defaultEdgeWeight,defaultNodeMass,\ defaultSpringCoefficient,defaultSpringLength,EdgeAttribute,isDeterministic,\ maxWeightCutoff,minWeightCutoff,network,NodeAttribute,nodeList,numIterations,\ singlePartition,Type]) response=api(url=self.__url+"/force-directed", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Execute the Prefuse Force Directed Layout on a network :param defaultEdgeWeight (string, optional): The default edge weight to con sider, default is 0.5 :param defaultNodeMass (string, optional): Default Node Mass, in numeric va lue :param defaultSpringCoefficient (string, optional): Default Spring Coeffici ent, in numeric value :param defaultSpringLength (string, optional): Default Spring Length, in nu meric value :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param isDeterministic (string, optional): Force deterministic layouts (slo wer); boolean values only, true or false; defaults to false :param maxWeightCutoff (string, optional): The maximum edge weight to consi der, default to the Double.MAX value :param minWeightCutoff (string, optional): The minimum edge weight to consi der, numeric values, default is 0 :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param numIterations (string, optional): Number of Iterations, in numeric v alue :param singlePartition (string, optional): Don't partition graph before lay out; boolean values only, true or false; defaults to false :param Type (string, optional): How to interpret weight values; must be one of Heuristic, -Log(value), 1 - normalized value and normalized valu e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali zed value', 'normalized value']
def multivariate_hypergeometric_expval(n, m): m = np.asarray(m, float) return n * (m / m.sum())
Expected value of multivariate hypergeometric distribution. Parameters: - `n` : Number of draws. - `m` : Number of items in each categoy.
def uninstall_hook(ctx): try: lint_config = ctx.obj[0] hooks.GitHookInstaller.uninstall_commit_msg_hook(lint_config) hook_path = hooks.GitHookInstaller.commit_msg_hook_path(lint_config) click.echo(u"Successfully uninstalled gitlint commit-msg hook from {0}".format(hook_path)) ctx.exit(0) except hooks.GitHookInstallerError as e: click.echo(ustr(e), err=True) ctx.exit(GIT_CONTEXT_ERROR_CODE)
Uninstall gitlint commit-msg hook.
def bbox_flip(bbox, d, rows, cols): if d == 0: bbox = bbox_vflip(bbox, rows, cols) elif d == 1: bbox = bbox_hflip(bbox, rows, cols) elif d == -1: bbox = bbox_hflip(bbox, rows, cols) bbox = bbox_vflip(bbox, rows, cols) else: raise ValueError('Invalid d value {}. Valid values are -1, 0 and 1'.format(d)) return bbox
Flip a bounding box either vertically, horizontally or both depending on the value of `d`. Raises: ValueError: if value of `d` is not -1, 0 or 1.
def time_estimate(self, duration, **kwargs): path = '%s/%s/time_estimate' % (self.manager.path, self.get_id()) data = {'duration': duration} return self.manager.gitlab.http_post(path, post_data=data, **kwargs)
Set an estimated time of work for the object. Args: duration (str): Duration in human format (e.g. 3h30) **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTimeTrackingError: If the time tracking update cannot be done
def compare_config(self): if self.ssh_connection is False: self._open_ssh() self.ssh_device.exit_config_mode() diff = self.ssh_device.send_command("show config diff") return diff.strip()
Netmiko is being used to obtain config diffs because pan-python doesn't support the needed command.
def _init_grps(code2nt): seen = set() seen_add = seen.add groups = [nt.group for nt in code2nt.values()] return [g for g in groups if not (g in seen or seen_add(g))]
Return list of groups in same order as in code2nt
def connect(self, signal, slot, transform=None, condition=None): if not signal in self.signals: print("WARNING: {0} is trying to connect a slot to an undefined signal: {1}".format(self.__class__.__name__, str(signal))) return if not hasattr(self, 'connections'): self.connections = {} connection = self.connections.setdefault(signal, {}) connection = connection.setdefault(condition, {}) connection[slot] = transform
Defines a connection between this objects signal and another objects slot signal: the signal this class will emit, to cause the slot method to be called receiver: the object containing the slot method to be called slot: the slot method to call transform: an optional value override to pass into the slot method as the first variable condition: only call the slot if the value emitted matches the required value or calling required returns True
def InternalExchange(self, cmd, payload_in): self.logger.debug('payload: ' + str(list(payload_in))) payload = bytearray() payload[:] = payload_in for _ in range(2): self.InternalSend(cmd, payload) ret_cmd, ret_payload = self.InternalRecv() if ret_cmd == UsbHidTransport.U2FHID_ERROR: if ret_payload == UsbHidTransport.ERR_CHANNEL_BUSY: time.sleep(0.5) continue raise errors.HidError('Device error: %d' % int(ret_payload[0])) elif ret_cmd != cmd: raise errors.HidError('Command mismatch!') return ret_payload raise errors.HidError('Device Busy. Please retry')
Sends and receives a message from the device.
def __learn_oneself(self): if not self.__parent_path or not self.__text_nodes: raise Exception("This error occurred because the step constructor\ had insufficient textnodes or it had empty string\ for its parent xpath") self.tnodes_cnt = len(self.__text_nodes) self.ttl_strlen = sum([len(tnode) for tnode in self.__text_nodes]) self.avg_strlen = self.ttl_strlen/self.tnodes_cnt
calculate cardinality, total and average string length
async def open(self) -> '_BaseAgent': LOGGER.debug('_BaseAgent.open >>>') await self.wallet.open() LOGGER.debug('_BaseAgent.open <<<') return self
Context manager entry; open wallet. For use when keeping agent open across multiple calls. :return: current object
def variance_larger_than_standard_deviation(x): y = np.var(x) return y > np.sqrt(y)
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x being larger than 1 :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: bool
def getPlaintextLen(self, ciphertext): completeCiphertextHeader = (len(ciphertext) >= 16) if completeCiphertextHeader is False: raise RecoverableDecryptionError('Incomplete ciphertext header.') ciphertext_header = ciphertext[:16] L = self._ecb_enc_K1.decrypt(ciphertext_header) padding_expected = '\x00\x00\x00\x00' padding_actual = L[-8:-4] validPadding = (padding_actual == padding_expected) if validPadding is False: raise UnrecoverableDecryptionError( 'Invalid padding: ' + padding_actual) message_length = fte.bit_ops.bytes_to_long(L[-8:]) msgLenNonNegative = (message_length >= 0) if msgLenNonNegative is False: raise UnrecoverableDecryptionError('Negative message length.') return message_length
Given a ``ciphertext`` with a valid header, returns the length of the plaintext payload.
def _create_mappings(self, spec): ret = dict(zip(set(spec.fields), set(spec.fields))) ret.update(dict([(n, s.alias) for n, s in spec.fields.items() if s.alias])) return ret
Create property name map based on aliases.
def get_predecessors(self, head): frozen_head = frozenset(head) if frozen_head not in self._predecessors: return set() return set(self._predecessors[frozen_head].values())
Given a head set of nodes, get a list of edges of which the node set is the head of each edge. :param head: set of nodes that correspond to the heads of some (possibly empty) set of edges. :returns: set -- hyperedge_ids of the hyperedges that have head in the head.
def _remove_existing(self): for key in self._keys: if key in os.environ: LOG.debug('%r: removing old key %r', self, key) del os.environ[key] self._keys = []
When a change is detected, remove keys that existed in the old file.
def read_stack_frame(self, structure, offset = 0): aProcess = self.get_process() stackData = aProcess.read_structure(self.get_fp() + offset, structure) return tuple([ stackData.__getattribute__(name) for (name, type) in stackData._fields_ ])
Reads the stack frame of the thread. @type structure: ctypes.Structure @param structure: Structure of the stack frame. @type offset: int @param offset: Offset from the frame pointer to begin reading. The frame pointer is the same returned by the L{get_fp} method. @rtype: tuple @return: Tuple of elements read from the stack frame. The type of each element matches the types in the stack frame structure.
def save_callback(sender, instance, created, update_fields, **kwargs): if validate_instance(instance): status = 'add' if created is True else 'change' change = '' if status == 'change' and 'al_chl' in instance.__dict__.keys(): changelog = instance.al_chl.modification change = ' to following changed: {}'.format(changelog) processor(status, sender, instance, update_fields, addition=change)
Save object & link logging entry
def magnetic_lat(inc): rad = old_div(np.pi, 180.) paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad) return paleo_lat
returns magnetic latitude from inclination
def as_list(func): @wraps(func) def wrapper(*args, **kwargs): response = func(*args, **kwargs) if isinstance(response, Response): return response return as_json_list( response, **_serializable_params(request.args, check_groupby=True)) return wrapper
A decorator used to return a JSON response of a list of model objects. It expects the decorated function to return a list of model instances. It then converts the instances to dicts and serializes them into a json response Examples: >>> @app.route('/api') ... @as_list ... def list_customers(): ... return Customer.all()
def pseudosample(x): BXs = [] for k in range(len(x)): ind = random.randint(0, len(x) - 1) BXs.append(x[ind]) return BXs
draw a bootstrap sample of x
def repair_url(url): url = url.strip('\n') if not re.match(r"^http", url): url = "http://" + url if "?" in url: url, _ = url.split('?') if not url.endswith("/"): return url + "/" else : return url
Fixes URL. @param url: url to repair. @param out: instance of StandardOutput as defined in this lib. @return: Newline characters are stripped from the URL string. If the url string parameter does not start with http, it prepends http:// If the url string parameter does not end with a slash, appends a slash. If the url contains a query string, it gets removed.
def open(self, auto_commit=None, schema=None): if schema is None: schema = self.schema ac = auto_commit if auto_commit is not None else schema.auto_commit exe = ExecutionContext(self.path, schema=schema, auto_commit=ac) if not os.path.isfile(self.path) or os.path.getsize(self.path) == 0: getLogger().warning("DB does not exist at {}. Setup is required.".format(self.path)) if schema is not None and schema.setup_files: for file_path in schema.setup_files: getLogger().debug("Executing script file: {}".format(file_path)) exe.cur.executescript(self.read_file(file_path)) if schema.setup_scripts: for script in schema.setup_scripts: exe.cur.executescript(script) return exe
Create a context to execute queries
def beforeSummaryReport(self, event): self.prof.disable() stats = pstats.Stats(self.prof, stream=event.stream).sort_stats( self.sort) event.stream.writeln(nose2.util.ln('Profiling results')) stats.print_stats() if self.pfile: stats.dump_stats(self.pfile) if self.cachegrind: visualize(self.prof.getstats())
Output profiling results
def build_grouped_ownership_map(table, key_from_row, value_from_row, group_key): grouped_rows = groupby( group_key, sa.select(table.c).execute().fetchall(), ) return { key: _build_ownership_map_from_rows( rows, key_from_row, value_from_row, ) for key, rows in grouped_rows.items() }
Builds a dict mapping group keys to maps of keys to to lists of OwnershipPeriods, from a db table.
def add_aggregate(self, name, data_fac): @self.add_target(name) def wrap(outdir, c): return data_fac()
Add an aggregate target to this nest. Since nests added after the aggregate can access the construct returned by the factory function value, it can be mutated to provide additional values for use when the decorated function is called. To do something with the aggregates, you must :meth:`SConsWrap.pop` nest levels created between addition of the aggregate and then can add any normal targets you would like which take advantage of the targets added to the data structure. :param name: Name for the target in the nest :param data_fac: a nullary factory function which will be called immediately for each of the current control dictionaries and stored in each dictionary with the given name as in :meth:`SConsWrap.add_target`.
def _ps(osdata): grains = {} bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS') if osdata['os'] in bsd_choices: grains['ps'] = 'ps auxwww' elif osdata['os_family'] == 'Solaris': grains['ps'] = '/usr/ucb/ps auxwww' elif osdata['os'] == 'Windows': grains['ps'] = 'tasklist.exe' elif osdata.get('virtual', '') == 'openvzhn': grains['ps'] = ( 'ps -fH -p $(grep -l \"^envID:[[:space:]]*0\\$\" ' '/proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") ' '| awk \'{ $7=\"\"; print }\'' ) elif osdata['os_family'] == 'AIX': grains['ps'] = '/usr/bin/ps auxww' elif osdata['os_family'] == 'NILinuxRT': grains['ps'] = 'ps -o user,pid,ppid,tty,time,comm' else: grains['ps'] = 'ps -efHww' return grains
Return the ps grain
def get_join_parameters(join_kwargs): by = join_kwargs.get('by', None) suffixes = join_kwargs.get('suffixes', ('_x', '_y')) if isinstance(by, tuple): left_on, right_on = by elif isinstance(by, list): by = [x if isinstance(x, tuple) else (x, x) for x in by] left_on, right_on = (list(x) for x in zip(*by)) else: left_on, right_on = by, by return left_on, right_on, suffixes
Convenience function to determine the columns to join the right and left DataFrames on, as well as any suffixes for the columns.