Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
389,000
def fetch_request_token(self, oauth_request): try: token = self._get_token(oauth_request, ) except Error: version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) try: callback = self.get_callback(oauth_request) except Error: callback = None self._check_signature(oauth_request, consumer, None) token = self.data_store.fetch_request_token(consumer, callback) return token
Processes a request_token request and returns the request token on success.
389,001
async def send_chat_action(self, chat_id: typing.Union[base.Integer, base.String], action: base.String) -> base.Boolean: payload = generate_payload(**locals()) result = await self.request(api.Methods.SEND_CHAT_ACTION, payload) return result
Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). We only recommend using this method when a response from the bot will take a noticeable amount of time to arrive. Source: https://core.telegram.org/bots/api#sendchataction :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param action: Type of action to broadcast :type action: :obj:`base.String` :return: Returns True on success :rtype: :obj:`base.Boolean`
389,002
def proximal_convex_conj_l2(space, lam=1, g=None): r prox_l2 = proximal_l2(space, lam=lam, g=g) return proximal_convex_conj(prox_l2)
r"""Proximal operator factory of the convex conj of the l2-norm/distance. Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Most problems are forumlated for the squared norm/distance, in that case use the `proximal_convex_conj_l2_squared` instead. The :math:`L_2`-norm/distance :math:`F` is given by is given by .. math:: F(x) = \lambda \|x - g\|_2 The convex conjugate :math:`F^*` of :math:`F` is given by .. math:: F^*(y) = \begin{cases} 0 & \text{if } \|y-g\|_2 \leq \lambda, \\ \infty & \text{else.} \end{cases} For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F^*` is given by the projection onto the set of :math:`y` satisfying :math:`\|y-g\|_2 \leq \lambda`, i.e., by .. math:: \mathrm{prox}_{\sigma F^*}(y) = \begin{cases} \lambda \frac{y - g}{\|y - g\|} & \text{if } \|y-g\|_2 > \lambda, \\ y & \text{if } \|y-g\|_2 \leq \lambda \end{cases} Note that the expression is independent of :math:`\sigma`. See Also -------- proximal_l2 : proximal without convex conjugate proximal_convex_conj_l2_squared : proximal for squared norm/distance
389,003
def resolve_profile(name, include_expired=False, include_name_record=False, hostport=None, proxy=None): assert hostport or proxy, name_rec = get_name_record(name, include_history=False, include_expired=include_expired, include_grace=False, proxy=proxy, hostport=hostport) if in name_rec: log.error("Failed to get name record for {}: {}".format(name, name_rec[])) return {: .format(name_rec[]), : name_rec.get(, 500)} if in name_rec and name_rec[]: log.error("Name {} is in the grace period".format(name)) return {: .format(name), : name_rec.get(, 404)} if not in name_rec: log.error("Name record for {} has no zone file hash".format(name)) return {: .format(name), : 404} zonefile_hash = name_rec[] zonefile_res = get_zonefiles(hostport, [zonefile_hash], proxy=proxy) if in zonefile_res: log.error("Failed to get zone file for {} for name {}: {}".format(zonefile_hash, name, zonefile_res[])) return {: .format(name), : 404} zonefile_txt = zonefile_res[][zonefile_hash] log.debug("Got {}-byte zone file {}".format(len(zonefile_txt), zonefile_hash)) try: zonefile_data = blockstack_zones.parse_zone_file(zonefile_txt) zonefile_data = dict(zonefile_data) assert in zonefile_data if len(zonefile_data[]) == 0: return {: .format(zonefile_hash, name), : 404} except Exception as e: if BLOCKSTACK_TEST: log.exception(e) return {: .format(zonefile_hash, name), : 404} urls = [uri[] for uri in zonefile_data[]] for url in urls: jwt = get_JWT(url, address=str(name_rec[])) if not jwt: continue if not in jwt[]: log.warning("No field in payload for {}".format(url)) continue profile_data = jwt[][] public_key = str(jwt[][][]) pubkeys = [virtualchain.ecdsalib.ecdsa_public_key(keylib.key_formatting.decompress(public_key)), virtualchain.ecdsalib.ecdsa_public_key(keylib.key_formatting.compress(public_key))] if name_rec[] == pubkeys[0].address(): public_key = pubkeys[0].to_hex() else: public_key = pubkeys[1].to_hex() ret = { : profile_data, : zonefile_txt, : public_key, } if include_name_record: ret[] = name_rec return ret log.error("No zone file URLs resolved to a JWT with the public key whose address is {}".format(name_rec[])) return {: , : 404}
Resolve a name to its profile. This is a multi-step process: 1. get the name record 2. get the zone file 3. parse the zone file to get its URLs (if it's not well-formed, then abort) 4. fetch and authenticate the JWT at each URL (abort if there are none) 5. extract the profile JSON and return that, along with the zone file and public key Return {'profile': ..., 'zonefile': ..., 'public_key': ...['name_rec': ...]} on success Return {'error': ...} on error
389,004
def nolist(self, account): assert callable(self.blockchain.account_whitelist) return self.blockchain.account_whitelist(account, lists=[], account=self)
Remove an other account from any list of this account
389,005
def is_file(cls, file): peeked_data = wpull.string.printable_bytes( wpull.util.peek_file(file)).lower() if b in peeked_data: return VeryFalse if re.search(br, peeked_data): return True
Return whether the file is likely CSS.
389,006
def _derive_stereographic(): from sympy import symbols, atan2, acos, rot_axis1, rot_axis3, Matrix x_c, y_c, z_c, x, y, z = symbols() around_z = atan2(x_c, y_c) around_x = acos(-z_c) v = Matrix([x, y, z]) xo, yo, zo = rot_axis1(around_x) * rot_axis3(-around_z) * v xp = xo / (1 - zo) yp = yo / (1 - zo) return xp, yp
Compute the formulae to cut-and-paste into the routine below.
389,007
def _read_regex(ctx: ReaderContext) -> Pattern: s = _read_str(ctx, allow_arbitrary_escapes=True) try: return langutil.regex_from_str(s) except re.error: raise SyntaxError(f"Unrecognized regex pattern syntax: {s}")
Read a regex reader macro from the input stream.
389,008
def process_request(self, num_pending, ports, request_blocking=False): if any(port in self._step_requests for port in ports): port = [x for x in ports if x in self._step_requests][0] self._worker_backend_socket.send_pyobj( self._step_requests.pop(port)) self._n_processed += 1 self.report(f) self._claimed_ports.remove(port) if ports[0] in self._last_pending_time: self._last_pending_time.pop(ports[0]) elif any(port in self._claimed_ports for port in ports): self._worker_backend_socket.send_pyobj({}) self.report(f) elif any(port in self._blocking_ports for port in ports): self._max_workers -= 1 env.logger.debug( f ) for port in ports: if port in self._blocking_ports: self._blocking_ports.remove(port) if port in self._available_ports: self._available_ports.remove(port) self._worker_backend_socket.send_pyobj(None) self._num_workers -= 1 self.report(f) elif self._substep_requests: msg = self._substep_requests.pop() self._worker_backend_socket.send_pyobj(msg) self._n_processed += 1 self.report(f) for port in ports: if port in self._available_ports: self._available_ports.remove(port) if port in self._last_pending_time: self._last_pending_time.pop(port) elif request_blocking: self._worker_backend_socket.send_pyobj({}) return ports[0] elif num_pending == 0 and ports[ 0] in self._last_pending_time and time.time( ) - self._last_pending_time[ports[0]] > 5: for port in ports: if port in self._available_ports: self._available_ports.remove(port) self._worker_backend_socket.send_pyobj(None) self._num_workers -= 1 self.report(f) self._last_pending_time.pop(ports[0]) else: if num_pending == 0 and ports[0] not in self._last_pending_time: self._last_pending_time[ports[0]] = time.time() self._available_ports.add(ports[0]) self._worker_backend_socket.send_pyobj({}) ports = tuple(ports) if (ports, num_pending) not in self._last_pending_msg or time.time( ) - self._last_pending_msg[(ports, num_pending)] > 1.0: self.report( f) self._last_pending_msg[(ports, num_pending)] = time.time()
port is the open port at the worker, num_pending is the num_pending of stack. A non-zero num_pending means that the worker is pending on something while looking for new job, so the worker should not be killed.
389,009
def invokeRunnable(self): runnable = self.runnable if runnable is None: self.deleteFromStore() else: try: self.running = True newTime = runnable.run() finally: self.running = False self._rescheduleFromRun(newTime)
Run my runnable, and reschedule or delete myself based on its result. Must be run in a transaction.
389,010
def gene_counts(self): return { gene_name: len(group) for (gene_name, group) in self.groupby_gene_name().items() }
Returns number of elements overlapping each gene name. Expects the derived class (VariantCollection or EffectCollection) to have an implementation of groupby_gene_name.
389,011
def on_start(self): logger.info() if self.config[] == 0: logger.info() return self.in_future.report_status()
Runs when the actor is started and schedules a status update
389,012
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx): import scipy.linalg.lapack npt = bd_all.shape[0] n = bd_idx.shape[1] kvalues = np.zeros(npt) sigmasq = np.zeros(npt) for i in np.nonzero(~mask)[0]: b_selector = bd_idx[i] bd = bd_all[i] a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1]))) a = a_all[a_selector[:, None], a_selector] if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_value = False zero_index = None b = np.zeros((n+1, 1)) b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd) if zero_value: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = scipy.linalg.solve(a, b) kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector]) sigmasq[i] = - x[:, 0].dot(b[:, 0]) return kvalues, sigmasq
Solves the kriging system by looping over all specified points. Uses only a certain number of closest points. Not very memory intensive, but the loop is done in pure Python.
389,013
def stop_archive(self, archive_id): response = requests.post(self.endpoints.archive_url(archive_id) + , headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout) if response.status_code < 300: return Archive(self, response.json()) elif response.status_code == 403: raise AuthError() elif response.status_code == 404: raise NotFoundError("Archive not found") elif response.status_code == 409: raise ArchiveError("Archive is not in started state") else: raise RequestError("An unexpected error occurred", response.status_code)
Stops an OpenTok archive that is being recorded. Archives automatically stop recording after 90 minutes or when all clients have disconnected from the session being archived. @param [String] archive_id The archive ID of the archive you want to stop recording. :rtype: The Archive object corresponding to the archive being stopped.
389,014
def get(self, sid): return ReservationContext( self._version, workspace_sid=self._solution[], task_sid=self._solution[], sid=sid, )
Constructs a ReservationContext :param sid: The sid :returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext :rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
389,015
def _controller(self): def server_controller(cmd_id, cmd_body, _): if not self.init_logginig: head = + str( self.kvstore.rank) + logging.basicConfig(level=logging.DEBUG, format=head) self.init_logginig = True if cmd_id == 0: try: optimizer = pickle.loads(cmd_body) except: raise self.kvstore.set_optimizer(optimizer) else: print("server %d, unknown command (%d, %s)" % ( self.kvstore.rank, cmd_id, cmd_body)) return server_controller
Return the server controller.
389,016
def to_json(self): return { : self.location.to_json(), : [des_d.to_json() for des_d in self.design_days] }
Convert the Design Day to a dictionary.
389,017
def run_parse(self): parsedset = {} parsedset[] = [] for log in self.input_files: parsemodule = self.parse_modules[self.args.parser] try: if self.args.tzone: parsemodule.tzone = self.args.tzone except NameError: pass parsedset[].append(parsemodule.parse_file(log)) self.data_set = parsedset del(parsedset)
Parse one or more log files
389,018
def _load_cpp4(self, filename): ccp4 = CCP4.CCP4() ccp4.read(filename) grid, edges = ccp4.histogramdd() self.__init__(grid=grid, edges=edges, metadata=self.metadata)
Initializes Grid from a CCP4 file.
389,019
def compute_region_border(start, end): cells = defaultdict(Cell) start_row = row_number(start) end_row = row_number(end) if end % 0x10 == 0: end_row -= 1 if start_row == end_row: for i in range(start, end): cells[i].top = True else: for i in range(start, row_end_index(start) + 1): cells[i].top = True if start_row != end_row: next_row_start = row_start_index(start) + 0x10 for i in range(next_row_start, next_row_start + column_number(start)): cells[i].top = True if start_row == end_row: for i in range(start, end): cells[i].bottom = True else: for i in range(row_start_index(end), end): cells[i].bottom = True if start_row != end_row: prev_row_end = row_end_index(end) - 0x10 for i in range(prev_row_end - (0x10 - column_number(end) - 1), prev_row_end + 1): cells[i].bottom = True if start_row == end_row: cells[start].left = True else: second_row_start = row_start_index(start) + 0x10 for i in range(second_row_start, row_start_index(end) + 0x10, 0x10): cells[i].left = True if start_row != end_row: cells[start].left = True if start_row == end_row: cells[end - 1].right = True else: penultimate_row_end = row_end_index(end) - 0x10 for i in range(row_end_index(start), penultimate_row_end + 0x10, 0x10): cells[i].right = True if start_row != end_row: cells[end - 1].right = True cells.default_factory = None return cells
given the buffer start and end indices of a range, compute the border edges that should be drawn to enclose the range. this function currently assumes 0x10 length rows. the result is a dictionary from buffer index to Cell instance. the Cell instance has boolean properties "top", "bottom", "left", and "right" that describe if a border should be drawn on that side of the cell view. :rtype: Mapping[int, CellT]
389,020
def gene_names(self): return self.ensembl.gene_names_at_locus( self.contig, self.start, self.end)
Return names of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object.
389,021
def ldr(scatterer, h_pol=True): Z = scatterer.get_Z() if h_pol: return (Z[0,0] - Z[0,1] + Z[1,0] - Z[1,1]) / \ (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) else: return (Z[0,0] + Z[0,1] - Z[1,0] - Z[1,1]) / \ (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
Linear depolarizarion ratio (LDR) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), return LDR_h. If False, return LDR_v. Returns: The LDR.
389,022
def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ret = {: name, : {}, : None, : } if not in __salt__: ret[] = False ret[] = \ return ret if not pkgs and isinstance(pkgs, list): ret[] = True ret[] = return ret if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] if in kwargs: del kwargs[] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and in targets: return targets elif not isinstance(targets, dict): ret[] = False ret[] = \ .format(targets) return ret if __opts__[]: summary = .join(targets) ret[] = \ .format(summary) return ret try: pkg_ret = __salt__[](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret[] = True ret[].update(pkg_ret) except CommandExecutionError as exc: ret = {: name, : False} if exc.info: ret[] = exc.info.get(, {}) ret[] = exc.strerror_without_changes else: ret[] = {} ret[] = \ .format(exc) return ret new_pkgs = __salt__[](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = .join([_get_desired_pkg(x, targets) for x in failed]) ret[] = False ret[] = \ .format(summary) if not ret[] and not ret[]: ret[] = True ret[] = \ .format(.join(targets)) return ret
.. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository"
389,023
def canonicalize(message): if message.is_multipart() \ or message.get() != : return mime_to_bytes(message, 0).replace( b, b).replace(b, b).replace(b, b) else: message_header = message_body = message.get_payload(decode=True) for k, v in message.items(): message_header += .format(k, v) message_header += return message_header.encode() + message_body
Function to convert an email Message to standard format string :param message: email.Message to be converted to standard string :return: the standard representation of the email message in bytes
389,024
def __deserialize_model(self, data, klass): if not klass.swagger_types: return data kwargs = {} for attr, attr_type in iteritems(klass.swagger_types): if data is not None \ and klass.attribute_map[attr] in data \ and isinstance(data, (list, dict)): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) return instance
Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object.
389,025
def get_billing_report(self, month, **kwargs): kwargs[] = True if kwargs.get(): return self.get_billing_report_with_http_info(month, **kwargs) else: (data) = self.get_billing_report_with_http_info(month, **kwargs) return data
Get billing report. # noqa: E501 Fetch the billing report generated for the currently authenticated commercial non-subtenant account. Billing reports for subtenant accounts are included in their aggregator's billing report response. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v3/billing-report?month=2018-07 -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_billing_report(month, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str month: Queried year and month of billing report. (required) :return: ReportResponse If the method is called asynchronously, returns the request thread.
389,026
def total(self): feats = imap(lambda name: self[name], self._counters()) return sum(chain(*map(lambda mset: map(abs, mset.values()), feats)))
Returns sum of all counts in all features that are multisets.
389,027
def execute_prepared_cql3_query(self, itemId, values, consistency): self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_execute_prepared_cql3_query(itemId, values, consistency) return d
Parameters: - itemId - values - consistency
389,028
def a_stays_connected(ctx): ctx.ctrl.connected = True ctx.device.connected = False return True
Stay connected.
389,029
def exit(self, status=EXIT_OK, message=None): if not self.parser: self.parser = argparse.ArgumentParser() if self.msg_on_error_only: if status != EXIT_OK: self.parser.exit(status, None) else: self.parser.exit(status, message)
Terminate the script.
389,030
def load(self, filepath): for attributeLines in oboTermParser(filepath): oboTerm = _attributeLinesToDict(attributeLines) if oboTerm[] not in self.oboTerms: self.oboTerms[oboTerm[]] = oboTerm else: oldOboTerm = self.oboTerms[oboTerm[]] oldTermIsObsolete = _termIsObsolete(oldOboTerm) newTermIsObsolete = _termIsObsolete(oboTerm) if oldTermIsObsolete and not newTermIsObsolete: self.oboTerms[oboTerm[]] = oboTerm else: assert oldTermIsObsolete or newTermIsObsolete
Import '[Term]' entries from an .obo file.
389,031
def unionByName(self, other): return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)
Returns a new :class:`DataFrame` containing union of rows in this and another frame. This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. The difference between this function and :func:`union` is that this function resolves columns by name (not by position): >>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"]) >>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"]) >>> df1.unionByName(df2).show() +----+----+----+ |col0|col1|col2| +----+----+----+ | 1| 2| 3| | 6| 4| 5| +----+----+----+
389,032
def process_event(self, c): if c == "": sys.exit() elif c in key_directions: self.move_entity(self.player, *vscale(self.player.speed, key_directions[c])) else: return "try arrow keys, w, a, s, d, or ctrl-D (you pressed %r)" % c return self.tick()
Returns a message from tick() to be displayed if game is over
389,033
def _establish_tunnel(self, connection, address): host = .format(address[0]) if in address[0] else address[0] port = address[1] request = RawRequest(, .format(host, port)) self.add_auth_header(request) stream = Stream(connection, keep_alive=True) _logger.debug() yield from stream.write_request(request) _logger.debug() response = yield from stream.read_response() if response.status_code != 200: debug_file = io.BytesIO() _logger.debug() yield from stream.read_body(request, response, file=debug_file) debug_file.seek(0) _logger.debug(ascii(debug_file.read())) if response.status_code == 200: connection.tunneled = True else: raise NetworkError( .format(response.status_code, wpull.string.printable_str(response.reason)) )
Establish a TCP tunnel. Coroutine.
389,034
def to_type(self, tokens): result = [] name_tokens = [] reference = pointer = array = False inside_array = False empty_array = True templated_tokens = [] def add_type(): if not name_tokens: return names = [] modifiers = [] for t in name_tokens: if keywords.is_keyword(t.name): modifiers.append(t.name) else: names.append(t.name) name = .join(names) templated_types = self.to_type(templated_tokens) result.append(Type(name_tokens[0].start, name_tokens[-1].end, name, templated_types, modifiers, reference, pointer, array)) del name_tokens[:] del templated_tokens[:] i = 0 end = len(tokens) while i < end: token = tokens[i] if token.name == : inside_array = False if empty_array: pointer = True else: array = True elif inside_array: empty_array = False elif token.name == : templated_tokens, i = self._get_template_end(tokens, i + 1) continue elif token.name == or token.name == : add_type() reference = pointer = array = False empty_array = True elif token.name == : pointer = True elif token.name == : reference = True elif token.name == : inside_array = True elif token.name != : name_tokens.append(token) i += 1 add_type() return result
Convert [Token,...] to [Class(...), ] useful for base classes. For example, code like class Foo : public Bar<x, y> { ... }; the "Bar<x, y>" portion gets converted to an AST. Returns: [Class(...), ...]
389,035
def get_vexrc(options, environ): if options.config and not os.path.exists(options.config): raise exceptions.InvalidVexrc("nonexistent config: {0!r}".format(options.config)) filename = options.config or os.path.expanduser() vexrc = config.Vexrc.from_file(filename, environ) return vexrc
Get a representation of the contents of the config file. :returns: a Vexrc instance.
389,036
def set_mode(self, mode): if not mode in [self.STRICT_MIN_LENGTH, self.DROP_TRAILING_SILENCE, self.STRICT_MIN_LENGTH | self.DROP_TRAILING_SILENCE, 0]: raise ValueError("Wrong value for mode") self._mode = mode self._strict_min_length = (mode & self.STRICT_MIN_LENGTH) != 0 self._drop_tailing_silence = (mode & self.DROP_TRAILING_SILENCE) != 0
:Parameters: `mode` : *(int)* New mode, must be one of: - `StreamTokenizer.STRICT_MIN_LENGTH` - `StreamTokenizer.DROP_TRAILING_SILENCE` - `StreamTokenizer.STRICT_MIN_LENGTH | StreamTokenizer.DROP_TRAILING_SILENCE` - `0` See `StreamTokenizer.__init__` for more information about the mode.
389,037
def fromBban(bban): countryCode = "XE" remainder = mod9710(iso13616Prepare(countryCode + "00" + bban)) checkDigit = ("0" + str(98 - remainder))[-2:] return Iban(countryCode + checkDigit + bban)
Convert the passed BBAN to an IBAN for this country specification. Please note that <i>"generation of the IBAN shall be the exclusive responsibility of the bank/branch servicing the account"</i>. This method implements the preferred algorithm described in http://en.wikipedia.org/wiki/International_Bank_Account_Number#Generating_IBAN_check_digits @method fromBban @param {String} bban the BBAN to convert to IBAN @returns {Iban} the IBAN object
389,038
def _run_cmd_line_code(self): if self.code_to_run: line = self.code_to_run try: self.log.info("Running code given at command line (c=): %s" % line) self.shell.run_cell(line, store_history=False) except: self.log.warn("Error in executing line in user namespace: %s" % line) self.shell.showtraceback() elif self.file_to_run: fname = self.file_to_run try: self._exec_file(fname) except: self.log.warn("Error in executing file in user namespace: %s" % fname) self.shell.showtraceback()
Run code or file specified at the command-line
389,039
def to_routing_header(params): if sys.version_info[0] < 3: return urlencode(params).replace("%2F", "/") return urlencode( params, safe="/", )
Returns a routing header string for the given request parameters. Args: params (Mapping[str, Any]): A dictionary containing the request parameters used for routing. Returns: str: The routing header string.
389,040
def add_file_arg(self, filename): self.__arguments.append(filename) if filename not in self.__input_files: self.__input_files.append(filename)
Add a file argument to the executable. Arguments are appended after any options and their order is guaranteed. Also adds the file name to the list of required input data for this job. @param filename: file to add as argument.
389,041
def get_vnetwork_vms_input_last_rcvd_instance(self, **kwargs): config = ET.Element("config") get_vnetwork_vms = ET.Element("get_vnetwork_vms") config = get_vnetwork_vms input = ET.SubElement(get_vnetwork_vms, "input") last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance") last_rcvd_instance.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
389,042
def check_prompt_code(response): num_code = response.find("div", {"jsname": "EKvSSd"}) if num_code: print("numerical code for prompt: {}".format(num_code.string))
Sometimes there is an additional numerical code on the response page that needs to be selected on the prompt from a list of multiple choice. Print it if it's there.
389,043
def _generate_null_hocr(output_hocr, output_sidecar, image): from PIL import Image im = Image.open(image) w, h = im.size with open(output_hocr, , encoding="utf-8") as f: f.write(HOCR_TEMPLATE.format(w, h)) with open(output_sidecar, , encoding=) as f: f.write()
Produce a .hocr file that reports no text detected on a page that is the same size as the input image.
389,044
def get_activity_photos(self, activity_id, size=None, only_instagram=False): params = {} if not only_instagram: params[] = if size is not None: params[] = size result_fetcher = functools.partial(self.protocol.get, , id=activity_id, **params) return BatchedResultsIterator(entity=model.ActivityPhoto, bind_client=self, result_fetcher=result_fetcher)
Gets the photos from an activity. http://strava.github.io/api/v3/photos/ :param activity_id: The activity for which to fetch kudos. :type activity_id: int :param size: the requested size of the activity's photos. URLs for the photos will be returned that best match the requested size. If not included, the smallest size is returned :type size: int :param only_instagram: Parameter to preserve legacy behavior of only returning Instagram photos. :type only_instagram: bool :return: An iterator of :class:`stravalib.model.ActivityPhoto` objects. :rtype: :class:`BatchedResultsIterator`
389,045
def transfer_owner(self, new_owner: Address) -> TxReceipt: tx_hash = self.registry.functions.transferOwner(new_owner).transact() return self.w3.eth.waitForTransactionReceipt(tx_hash)
Transfers ownership of this registry instance to the given ``new_owner``. Only the ``owner`` is allowed to transfer ownership. * Parameters: * ``new_owner``: The address of the new owner.
389,046
def readline(self): b = super(PtyProcessUnicode, self).readline() return self.decoder.decode(b, final=False)
Read one line from the pseudoterminal, and return it as unicode. Can block if there is nothing to read. Raises :exc:`EOFError` if the terminal was closed.
389,047
def _set_source(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("src_interface_type src_interface_name destination dst_interface_type dst_interface_name",source.source, yang_name="source", rest_name="source", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: None, u: None}}), is_container=, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__source = t if hasattr(self, ): self._set()
Setter method for source, mapped from YANG variable /acl_mirror/source (list) If this variable is read-only (config: false) in the source YANG file, then _set_source is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source() directly.
389,048
def _refresh_resource_index(self, resource): if self._resource_config(resource, , True): self.elastic(resource).indices.refresh(self._resource_index(resource))
Refresh index for given resource. :param resource: resource name
389,049
def load_code(self): a = self.load_int() b = self.load_int() key = get_keys(a, b) padsize = (b + 15) & ~0xf intsize = padsize/4 data = self.bufstr[self.bufpos:self.bufpos+padsize] data = list(struct.unpack( % intsize, data)) tea_decipher(data, key) self.bufpos += padsize obj = xmarshal._FastUnmarshaller(struct.pack( % intsize, *data)) code = obj.load_code() co_code = patch(code.co_code) if PYTHON3: return Code2Compat(code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags, co_code, code.co_consts, code.co_names, code.co_varnames, code.co_filename, code.co_name, code.co_firstlineno, code.co_lnotab, code.co_freevars, code.co_cellvars) else: return types.CodeType(code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags, co_code, code.co_consts, code.co_names, code.co_varnames, code.co_filename, code.co_name, code.co_firstlineno, code.co_lnotab, code.co_freevars, code.co_cellvars)
Returns a Python code object like xdis.unmarshal.load_code(), but in we decrypt the data in self.bufstr. That is: * calculate the TEA key, * decrypt self.bufstr * create and return a Python code-object
389,050
async def main(): async with ClientSession() as websession: try: client = Client(websession) await client.load_local(, , websession) for controller in client.controllers.values(): print() print(.format(controller.name)) print(.format(controller.mac)) print(.format(controller.api_version)) print( .format( controller.software_version)) print( .format( controller.hardware_version)) print() print() data = await controller.diagnostics.current() print(.format(data[])) print(.format(data[])) print() print() for parser in await controller.parsers.current(): print(parser[]) print() print() for program in await controller.programs.all( include_inactive=True): print( .format( program[], program[])) print() print() program_1 = await controller.programs.get(1) print( "Program 1startTimeNEXT RUN TIMESProgram zone_1[], zone_1[])) print() print() print(await controller.zones.start(1, 3)) await asyncio.sleep(3) print() print() print(await controller.zones.stop(1)) except RainMachineError as err: print(err)
Run.
389,051
def override (overrider_id, overridee_id): assert isinstance(overrider_id, basestring) assert isinstance(overridee_id, basestring) __overrides.setdefault(overrider_id, []).append(overridee_id)
Make generator 'overrider-id' be preferred to 'overridee-id'. If, when searching for generators that could produce a target of certain type, both those generators are amoung viable generators, the overridden generator is immediately discarded. The overridden generators are discarded immediately after computing the list of viable generators, before running any of them.
389,052
def export_pages(root_page, export_unpublished=False): pages = Page.objects.descendant_of(root_page, inclusive=True).order_by().specific() if not export_unpublished: pages = pages.filter(live=True) page_data = [] exported_paths = set() for (i, page) in enumerate(pages): parent_path = page.path[:-(Page.steplen)] }
Create a JSON defintion of part of a site's page tree starting from root_page and descending into its descendants By default only published pages are exported. If a page is unpublished it and all its descendants are pruned even if some of those descendants are themselves published. This ensures that there are no orphan pages when the subtree is created in the destination site. If export_unpublished=True the root_page and all its descendants are included.
389,053
def _calculate_mapping_reads(items, work_dir, input_backs=None): out_file = os.path.join(work_dir, "mapping_reads.txt") if not utils.file_exists(out_file): lines = [] for data in items: count = 0 for line in subprocess.check_output([ "samtools", "idxstats", dd.get_align_bam(data)]).decode().split("\n"): if line.strip(): count += int(line.split("\t")[2]) lines.append("%s\t%s" % (dd.get_sample_name(data), count)) with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("\n".join(lines) + "\n") if input_backs: for input_back in input_backs: with open(input_back) as in_handle: for line in in_handle: if len(line.split()) == 2: out_handle.write(line) return out_file
Calculate read counts from samtools idxstats for each sample. Optionally moves over pre-calculated mapping counts from a background file.
389,054
def rebuild(self, recreate=True, force=False, **kwargs): "Recreate (if needed) the wx_obj and apply new properties" needs_rebuild = any([isinstance(spec, (StyleSpec, InitSpec)) for spec_name, spec in self._meta.specs.items() if spec_name in kwargs]) if needs_rebuild and recreate or force: if DEBUG: print "rebuilding window!" self.__init__(**kwargs) else: if DEBUG: print "just setting attr!" for name, value in kwargs.items(): setattr(self, name, value)
Recreate (if needed) the wx_obj and apply new properties
389,055
def compare_version(version1, version2): def normalize(v): return [int(x) for x in re.sub(r,, v).split(".")] return (normalize(version1) > normalize(version2))-(normalize(version1) < normalize(version2))
Compares two versions.
389,056
def write_antenna(page, args, seg_plot=None, grid=False, ipn=False): from pylal import antenna page.h3() page.add() page.h3.close() th = [] td = [] th2 = [] td2 = [] ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)] if ipn: antenna_ifo = {} ra = [] dec = [] search_file = open( % args.grb_name) for line in search_file: ra.append(line.split()[0]) dec.append(line.split()[1]) for ifo in ifos: antenna_ifo[ifo] = [] for k, l in zip(ra, dec): _, _, _, f_q = antenna.response(args.start_time, float(k), float(l), 0.0, 0.0, , ifo) antenna_ifo[ifo].append(round(f_q,3)) dectKeys = antenna_ifo.keys() for elements in range(len(antenna_ifo.values()[0])): newDict={} for detectors in range(len(antenna_ifo.keys())): newDict[dectKeys[detectors]] = antenna_ifo[\ dectKeys[detectors]][elements] for key in newDict.keys(): th.append(key) td.append(newDict.values()) page = write_table(page, list(set(th)), td) for ifo in ifos: _, _, _, f_q = antenna.response(args.start_time, args.ra, args.dec, 0.0, 0.0, ,ifo) th.append(ifo) td.append(round(f_q, 3)) page = write_table(page, th, td) if seg_plot is not None: plot = markup.page() p = os.path.basename(seg_plot) plot.a(href=p, title="Science Segments") plot.img(src=p) plot.a.close() th2.append() td2.append(plot()) plot = markup.page() p = "ALL_TIMES/plots_clustered/GRB%s_sky_grid.png"\ % args.grb_name plot.a(href=p, title="Sky Grid") plot.img(src=p) plot.a.close() th2.append() td2.append(plot()) page = write_table(page, th2, td2) return page
Write antenna factors to merkup.page object page and generate John's detector response plot.
389,057
def cleanPolyline(elem, options): pts = parseListOfPoints(elem.getAttribute()) elem.setAttribute(, scourCoordinates(pts, options, True))
Scour the polyline points attribute
389,058
def backfill_previous_messages(self, reverse=False, limit=10): res = self.client.api.get_room_messages(self.room_id, self.prev_batch, direction="b", limit=limit) events = res["chunk"] if not reverse: events = reversed(events) for event in events: self._put_event(event)
Backfill handling of previous messages. Args: reverse (bool): When false messages will be backfilled in their original order (old to new), otherwise the order will be reversed (new to old). limit (int): Number of messages to go back.
389,059
def callable_name(callable_obj): try: if (isinstance(callable_obj, type) and issubclass(callable_obj, param.ParameterizedFunction)): return callable_obj.__name__ elif (isinstance(callable_obj, param.Parameterized) and in callable_obj.params()): return callable_obj.operation.__name__ elif isinstance(callable_obj, partial): return str(callable_obj) elif inspect.isfunction(callable_obj): return callable_obj.__name__ elif inspect.ismethod(callable_obj): meth = callable_obj if sys.version_info < (3,0): owner = meth.im_class if meth.im_self is None else meth.im_self else: owner = meth.__self__ if meth.__name__ == : return type(owner).__name__ return .join([owner.__name__, meth.__name__]) elif isinstance(callable_obj, types.GeneratorType): return callable_obj.__name__ else: return type(callable_obj).__name__ except: return str(callable_obj)
Attempt to return a meaningful name identifying a callable or generator
389,060
def add_hook(self, name, func): if name in self.__hook_reversed: self._hooks[name].insert(0, func) else: self._hooks[name].append(func)
Attach a callback to a hook. Three hooks are currently implemented: before_request Executed once before each request. The request context is available, but no routing has happened yet. after_request Executed once after each request regardless of its outcome. app_reset Called whenever :meth:`Bottle.reset` is called.
389,061
def _handle_pong(self, ts, *args, **kwargs): log.info("BitfinexWSS.ping(): Ping received! (%ss)", ts - self.ping_timer) self.ping_timer = None
Handles pong messages; resets the self.ping_timer variable and logs info message. :param ts: timestamp, declares when data was received by the client :return:
389,062
def filter_by(self, values, exclude=False): from .sframe import SFrame as _SFrame column_name = if not isinstance(values, SArray): if not _is_non_string_iterable(values): values = [values] values = SArray(values) value_sf = _SFrame() value_sf.add_column(values, column_name, inplace=True) given_type = value_sf.column_types()[0] existing_type = self.dtype sarray_sf = _SFrame() sarray_sf.add_column(self, column_name, inplace=True) if given_type != existing_type: raise TypeError("Type of given values does not match type of the SArray") value_sf = value_sf.groupby(column_name, {}) with cython_context(): if exclude: id_name = "id" value_sf = value_sf.add_row_number(id_name) tmp = _SFrame(_proxy=sarray_sf.__proxy__.join(value_sf.__proxy__, , {column_name:column_name})) ret_sf = tmp[tmp[id_name] == None] return ret_sf[column_name] else: ret_sf = _SFrame(_proxy=sarray_sf.__proxy__.join(value_sf.__proxy__, , {column_name:column_name})) return ret_sf[column_name]
Filter an SArray by values inside an iterable object. The result is an SArray that only includes (or excludes) the values in the given ``values`` :class:`~turicreate.SArray`. If ``values`` is not an SArray, we attempt to convert it to one before filtering. Parameters ---------- values : SArray | list | numpy.ndarray | pandas.Series | str The values to use to filter the SArray. The resulting SArray will only include rows that have one of these values in the given column. exclude : bool If True, the result SArray will contain all rows EXCEPT those that have one of the ``values``. Returns ------- out : SArray The filtered SArray. Examples -------- >>> sa = SArray(['dog', 'cat', 'cow', 'horse']) >>> sa.filter_by(['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']) dtype: str Rows: 2 ['dog', 'cat'] >>> sa.filter_by(['cat', 'hamster', 'dog', 'fish', 'bird', 'snake'], exclude=True) dtype: str Rows: 2 ['horse', 'cow']
389,063
def get_keywords_from_text(text_lines, taxonomy_name, output_mode="text", output_limit=None, spires=False, match_mode="full", no_cache=False, with_author_keywords=False, rebuild_cache=False, only_core_tags=False, extract_acronyms=False): if output_limit is None: output_limit = current_app.config[] cache = get_cache(taxonomy_name) if not cache: set_cache(taxonomy_name, get_regular_expressions(taxonomy_name, rebuild=rebuild_cache, no_cache=no_cache)) cache = get_cache(taxonomy_name) _skw = cache[0] _ckw = cache[1] text_lines = cut_references(text_lines) fulltext = normalize_fulltext("\n".join(text_lines)) if match_mode == "partial": fulltext = get_partial_text(fulltext) author_keywords = None if with_author_keywords: author_keywords = extract_author_keywords(_skw, _ckw, fulltext) acronyms = {} if extract_acronyms: acronyms = extract_abbreviations(fulltext) single_keywords = extract_single_keywords(_skw, fulltext) composite_keywords = extract_composite_keywords( _ckw, fulltext, single_keywords) if only_core_tags: single_keywords = clean_before_output( filter_core_keywords(single_keywords)) composite_keywords = filter_core_keywords(composite_keywords) else: single_keywords = clean_before_output(single_keywords) return get_keywords_output( single_keywords=single_keywords, composite_keywords=composite_keywords, taxonomy_name=taxonomy_name, author_keywords=author_keywords, acronyms=acronyms, output_mode=output_mode, output_limit=output_limit, spires=spires, only_core_tags=only_core_tags )
Extract keywords from the list of strings. :param text_lines: list of strings (will be normalized before being joined into one string) :param taxonomy_name: string, name of the taxonomy_name :param output_mode: string - text|html|marcxml|raw :param output_limit: int :param spires: boolean, if True marcxml output reflect spires codes. :param match_mode: str - partial|full; in partial mode only beginning of the fulltext is searched. :param no_cache: boolean, means loaded definitions will not be saved. :param with_author_keywords: boolean, extract keywords from the pdfs. :param rebuild_cache: boolean :param only_core_tags: boolean :return: if output_mode=raw, it will return (single_keywords, composite_keywords, author_keywords, acronyms) for other output modes it returns formatted string
389,064
def matchPatterns(patterns, keys): results = [] if patterns: for pattern in patterns: prog = re.compile(pattern) for key in keys: if prog.match(key): results.append(key) else: return None return results
Returns a subset of the keys that match any of the given patterns :param patterns: (list) regular expressions to match :param keys: (list) keys to search for matches
389,065
def removeChild(self, child): try: self.children.remove(child) self.blocks.remove(child) child.parentNode = None child.ownerDocument = None for subChild in child.getAllChildNodes(): subChild.ownerDocument = None return child except ValueError: return None
removeChild - Remove a child tag, if present. @param child <AdvancedTag> - The child to remove @return - The child [with parentNode cleared] if removed, otherwise None. NOTE: This removes a tag. If removing a text block, use #removeText function. If you need to remove an arbitrary block (text or AdvancedTag), @see removeBlock Removing multiple children? @see removeChildren
389,066
def subdivide(self): r left_nodes, right_nodes = _curve_helpers.subdivide_nodes(self._nodes) left = Curve(left_nodes, self._degree, _copy=False) right = Curve(right_nodes, self._degree, _copy=False) return left, right
r"""Split the curve :math:`B(s)` into a left and right half. Takes the interval :math:`\left[0, 1\right]` and splits the curve into :math:`B_1 = B\left(\left[0, \frac{1}{2}\right]\right)` and :math:`B_2 = B\left(\left[\frac{1}{2}, 1\right]\right)`. In order to do this, also reparameterizes the curve, hence the resulting left and right halves have new nodes. .. image:: ../../images/curve_subdivide.png :align: center .. doctest:: curve-subdivide :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 1.25, 2.0], ... [0.0, 3.0 , 1.0], ... ]) >>> curve = bezier.Curve(nodes, degree=2) >>> left, right = curve.subdivide() >>> left.nodes array([[0. , 0.625, 1.125], [0. , 1.5 , 1.75 ]]) >>> right.nodes array([[1.125, 1.625, 2. ], [1.75 , 2. , 1. ]]) .. testcleanup:: curve-subdivide import make_images make_images.curve_subdivide(curve, left, right) Returns: Tuple[Curve, Curve]: The left and right sub-curves.
389,067
def status_set(workload_state, message): valid_states = [, , , ] if workload_state not in valid_states: raise ValueError( .format(workload_state) ) cmd = [, workload_state, message] try: ret = subprocess.call(cmd) if ret == 0: return except OSError as e: if e.errno != errno.ENOENT: raise log_message = .format(workload_state, message) log(log_message, level=)
Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then assume this is juju < 1.23 and juju-log the message unstead. workload_state -- valid juju workload state. message -- status update message
389,068
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: return Double(context.jvm_view().ArcSinVertex, label, cast_to_double_vertex(input_vertex))
Takes the inverse sin of a vertex, Arcsin(vertex) :param input_vertex: the vertex
389,069
def simplify(self): node = self.node.simplify() if node is self.node: return self else: return _expr(node)
Return a simplified expression.
389,070
def do_gate_matrix(self, matrix: np.ndarray, qubits: Sequence[int]) -> : unitary = lifted_gate_matrix(matrix=matrix, qubit_inds=qubits, n_qubits=self.n_qubits) self.density = unitary.dot(self.density).dot(np.conj(unitary).T) return self
Apply an arbitrary unitary; not necessarily a named gate. :param matrix: The unitary matrix to apply. No checks are done :param qubits: A list of qubits to apply the unitary to. :return: ``self`` to support method chaining.
389,071
def strlify(a): t matter, but some functions in Python 3 brick when they get bytes instead of a string, so ita1b2c3a1b2c3a1b2c3a1b2c3a1b2c3a1b2c3a1b2c3a1b2c3" in the middle of them. Obviously it" in the middle of the string. Use this for making sure base 16/58/64 objects are in string format. Use normalize_input() below to convert unicode objects back to ascii strings when possible. bbb").replace("b","")
Used to turn hexlify() into hex string. Does nothing in Python 2, but is necessary for Python 3, so that all inputs and outputs are always the same encoding. Most of the time it doesn't matter, but some functions in Python 3 brick when they get bytes instead of a string, so it's safer to just strlify() everything. In Python 3 for example (examples commented out for doctest): # >>> hexlify(unhexlify("a1b2c3")) b'a1b2c3' # >>> b'a1b2c3' == 'a1b2c3' False # >>> strlify(hexlify(unhexlify("a1b2c3"))) 'a1b2c3' Whereas in Python 2, the results would be: # >>> hexlify(unhexlify("a1b2c3")) 'a1b2c3' # >>> b'a1b2c3' == 'a1b2c3' True # >>> strlify(hexlify(unhexlify("a1b2c3"))) 'a1b2c3' Safe to use redundantly on hex and base64 that may or may not be byte objects, as well as base58, since hex and base64 and base58 strings will never have "b'" in the middle of them. Obviously it's NOT safe to use on random strings which might have "b'" in the middle of the string. Use this for making sure base 16/58/64 objects are in string format. Use normalize_input() below to convert unicode objects back to ascii strings when possible.
389,072
def _lval_add_towards_polarity(x, polarity): if x < 0: if polarity < 0: return Lval(, x) return Lval(, x) elif polarity > 0: return Lval(, x) return Lval(, x)
Compute the appropriate Lval "kind" for the limit of value `x` towards `polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and the infinity direction of polarity.
389,073
def yield_sorted_by_type(*typelist): def decorate(fun): @wraps(fun) def decorated(*args, **kwds): return iterate_by_type(fun(*args, **kwds), typelist) return decorated return decorate
a useful decorator for the collect_impl method of SuperChange subclasses. Caches the yielded changes, and re-emits them collected by their type. The order of the types can be specified by listing the types as arguments to this decorator. Unlisted types will be yielded last in no guaranteed order. Grouping happens by exact type match only. Inheritance is not taken into consideration for grouping.
389,074
def _initiate_resumable_upload(self, stream, metadata, num_retries): chunk_size = _DEFAULT_CHUNKSIZE transport = self._http headers = _get_upload_headers(self._connection.USER_AGENT) upload_url = _RESUMABLE_URL_TEMPLATE.format(project=self.project) upload = ResumableUpload(upload_url, chunk_size, headers=headers) if num_retries is not None: upload._retry_strategy = resumable_media.RetryStrategy( max_retries=num_retries ) upload.initiate( transport, stream, metadata, _GENERIC_CONTENT_TYPE, stream_final=False ) return upload, transport
Initiate a resumable upload. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :type metadata: dict :param metadata: The metadata associated with the upload. :type num_retries: int :param num_retries: Number of upload retries. (Deprecated: This argument will be removed in a future release.) :rtype: tuple :returns: Pair of * The :class:`~google.resumable_media.requests.ResumableUpload` that was created * The ``transport`` used to initiate the upload.
389,075
def make_decoder(activation, latent_size, output_shape, base_depth): deconv = functools.partial( tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation) conv = functools.partial( tf.keras.layers.Conv2D, padding="SAME", activation=activation) decoder_net = tf.keras.Sequential([ deconv(2 * base_depth, 7, padding="VALID"), deconv(2 * base_depth, 5), deconv(2 * base_depth, 5, 2), deconv(base_depth, 5), deconv(base_depth, 5, 2), deconv(base_depth, 5), conv(output_shape[-1], 5, activation=None), ]) def decoder(codes): original_shape = tf.shape(input=codes) codes = tf.reshape(codes, (-1, 1, 1, latent_size)) logits = decoder_net(codes) logits = tf.reshape( logits, shape=tf.concat([original_shape[:-1], output_shape], axis=0)) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name="image") return decoder
Creates the decoder function. Args: activation: Activation function in hidden layers. latent_size: Dimensionality of the encoding. output_shape: The output image shape. base_depth: Smallest depth for a layer. Returns: decoder: A `callable` mapping a `Tensor` of encodings to a `tfd.Distribution` instance over images.
389,076
def options(self): response = self.repo.api.http_request(, self.uri) return response.headers
Small method to return headers of an OPTIONS request to self.uri Args: None Return: (dict) response headers from OPTIONS request
389,077
def decode_packet(data): if _debug: decode_packet._debug("decode_packet %r", data) if not data: return None d = decode_ethernet(data) pduSource = Address(d[]) pduDestination = Address(d[]) data = d[] if (d[] == 0x8100): if _debug: decode_packet._debug(" - vlan found") d = decode_vlan(data) data = d[] if (d[] == 0x0800): if _debug: decode_packet._debug(" - IP found") d = decode_ip(data) pduSource, pduDestination = d[], d[] data = d[] if (d[] == ): if _debug: decode_packet._debug(" - UDP found") d = decode_udp(data) data = d[] pduSource = Address((pduSource, d[])) pduDestination = Address((pduDestination, d[])) if _debug: decode_packet._debug(" - pduSource: %r", pduSource) decode_packet._debug(" - pduDestination: %r", pduDestination) else: if _debug: decode_packet._debug(" - not a UDP packet") else: if _debug: decode_packet._debug(" - not an IP packet") if not data: if _debug: decode_packet._debug(" - empty packet") return None pdu = PDU(data, source=pduSource, destination=pduDestination) if (pdu.pduData[0] == 0x81): if _debug: decode_packet._debug(" - BVLL header found") try: xpdu = BVLPDU() xpdu.decode(pdu) pdu = xpdu except Exception as err: if _debug: decode_packet._debug(" - BVLPDU decoding error: %r", err) return pdu atype = bvl_pdu_types.get(pdu.bvlciFunction) if not atype: if _debug: decode_packet._debug(" - unknown BVLL type: %r", pdu.bvlciFunction) return pdu try: xpdu = pdu bpdu = atype() bpdu.decode(pdu) if _debug: decode_packet._debug(" - bpdu: %r", bpdu) pdu = bpdu try: npdu = NPDU() npdu.decode(pdu) except Exception as err: if _debug: decode_packet._debug(" - decoding Error: %r", err) return None if npdu.npduNetMessage is None: if _debug: decode_packet._debug(" - not a network layer message, try as an APDU") try: xpdu = APDU() xpdu.decode(npdu) apdu = xpdu except Exception as err: if _debug: decode_packet._debug(" - decoding Error: %r", err) return npdu if npdu.npduSADR: apdu.pduSource = npdu.npduSADR else: apdu.pduSource = npdu.pduSource if npdu.npduDADR: apdu.pduDestination = npdu.npduDADR else: apdu.pduDestination = npdu.pduDestination atype = apdu_types.get(apdu.apduType) if not atype: if _debug: decode_packet._debug(" - unknown APDU type: %r", apdu.apduType) return apdu try: xpdu = apdu apdu = atype() apdu.decode(xpdu) except Exception as err: if _debug: decode_packet._debug(" - decoding Error: %r", err) return xpdu if isinstance(apdu, ConfirmedRequestPDU): atype = confirmed_request_types.get(apdu.apduService) if not atype: if _debug: decode_packet._debug(" - no confirmed request decoder: %r", apdu.apduService) return apdu elif isinstance(apdu, UnconfirmedRequestPDU): atype = unconfirmed_request_types.get(apdu.apduService) if not atype: if _debug: decode_packet._debug(" - no unconfirmed request decoder: %r", apdu.apduService) return apdu elif isinstance(apdu, SimpleAckPDU): atype = None elif isinstance(apdu, ComplexAckPDU): atype = complex_ack_types.get(apdu.apduService) if not atype: if _debug: decode_packet._debug(" - no complex ack decoder: %r", apdu.apduService) return apdu elif isinstance(apdu, SegmentAckPDU): atype = None elif isinstance(apdu, ErrorPDU): atype = error_types.get(apdu.apduService) if not atype: if _debug: decode_packet._debug(" - no error decoder: %r", apdu.apduService) return apdu elif isinstance(apdu, RejectPDU): atype = None elif isinstance(apdu, AbortPDU): atype = None if _debug: decode_packet._debug(" - atype: %r", atype) try: if atype: xpdu = apdu apdu = atype() apdu.decode(xpdu) except Exception as err: if _debug: decode_packet._debug(" - decoding error: %r", err) return xpdu return apdu else: ntype = npdu_types.get(npdu.npduNetMessage) if not ntype: if _debug: decode_packet._debug(" - no network layer decoder: %r", npdu.npduNetMessage) return npdu if _debug: decode_packet._debug(" - ntype: %r", ntype) try: xpdu = npdu npdu = ntype() npdu.decode(xpdu) except Exception as err: if _debug: decode_packet._debug(" - decoding error: %r", err) return xpdu return npdu
decode the data, return some kind of PDU.
389,078
def get_time_interval(time1, time2): try: time1 = time.mktime(time.strptime(time1, )) time2 = time.mktime(time.strptime(time2, )) seconds = (datetime.datetime.fromtimestamp(time2) - datetime.datetime.fromtimestamp(time1)).seconds days = seconds / 86400 seconds %= 86400 hours = seconds / 3600 seconds %= 3600 minutes = seconds / 60 seconds %= 60 return % (days, hours, minutes, seconds) except: return
get the interval of two times
389,079
def submatrix(dmat, indices_col, n_neighbors): n_samples_transform, n_samples_fit = dmat.shape submat = np.zeros((n_samples_transform, n_neighbors), dtype=dmat.dtype) for i in numba.prange(n_samples_transform): for j in numba.prange(n_neighbors): submat[i, j] = dmat[i, indices_col[i, j]] return submat
Return a submatrix given an orginal matrix and the indices to keep. Parameters ---------- mat: array, shape (n_samples, n_samples) Original matrix. indices_col: array, shape (n_samples, n_neighbors) Indices to keep. Each row consists of the indices of the columns. n_neighbors: int Number of neighbors. Returns ------- submat: array, shape (n_samples, n_neighbors) The corresponding submatrix.
389,080
def _gte(field, value, document): try: return document.get(field, None) >= value except TypeError: return False
Returns True if the value of a document field is greater than or equal to a given value
389,081
def _init_args(self): p = argparse.ArgumentParser(__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) p.add_argument(, type=str, nargs=3, help=) p.add_argument(, default=None, type=str, help=( ), choices=[, , , ]) p.add_argument(, default=9606, type=int, help="When using NCBI--alphaTest-wise alpha for multiple testing--pvalOnly print results with uncorrected p-value < PVAL.--pval_fieldOnly print results when PVAL_FIELD < PVAL.--outfileWrite enrichment results into xlsx or tsv file--id2symASCII file containing one geneid and its symbol per line--sectionsUse sections file for printing grouped GOEA results. Example SECTIONS values:\ngoatools.test_data.sections.gjoneska_pfenning \ngoatools/test_data/sections/gjoneska_pfenning.py \ndata/gjoneska_pfenning/sections_in.txt\n--outfile_detailWrite enrichment results into a text file \ncontaining the following information: \n1) GOEA GO terms, grouped into sections \n\n2) List of genes and ASCII art showing section membership \n3) Detailed list of each gene and GO terms w/their P-values \n--comparecomparestore_true--ratioratio--indentindentstore_true--obo--no_propagate_countsstore_true--method--pvalcalc--min_overlap--goslimgoslim_generic.obo--ev_inc--ev_exc--ev_helpev_helpstore_false--ev_help\nEVIDENCE CODE HELP: --ev_exc --ev_incUse any of these group names, like Experimental or Similarity or Experimental,Similarity,or evidence codes, like IEA or ISS,ISO,ISA in --ev_exc or --ev_inc:\n') obj = EvidenceCodes() obj.prt_details() sys.exit(0) args = p.parse_args() self._check_input_files(args, p) return args
Get enrichment arg parser.
389,082
def run(self): ret = eventlet.spawn(self.context(self.func)) eventlet.sleep(self.seconds) try: ret.wait() except Exception: traceback.print_exc() self.thread = eventlet.spawn(self.run)
Invoke the function repeatedly on a timer.
389,083
def filter_with_schema(self, model=None, context=None): if model is None or self.schema is None: return self._schema.filter( model=model, context=context if self.use_context else None )
Perform model filtering with schema
389,084
def list_all_by_reqvip(self, id_vip, pagination): uri = "api/pools/pool_list_by_reqvip/" data = dict() data["start_record"] = pagination.start_record data["end_record"] = pagination.end_record data["asorting_cols"] = pagination.asorting_cols data["searchable_columns"] = pagination.searchable_columns data["custom_search"] = pagination.custom_search or None data["id_vip"] = id_vip or None return self.post(uri, data=data)
List All Pools To Populate Datatable :param pagination: Object Pagination :return: Following dictionary:{ "total" : < total >, "pools" :[{ "id": < id > "default_port": < default_port >, "identifier": < identifier >, "healthcheck": < healthcheck >, }, ... too ... ]} :raise NetworkAPIException: Falha ao acessar fonte de dados
389,085
def setup_logging(): fmt = handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(logging.Formatter(fmt)) if OPTIONS[] == 1: handler_stderr.addFilter(logging.Filter(__name__)) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) root_logger.addHandler(handler_stderr)
Called when __name__ == '__main__' below. Sets up logging library. All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages.
389,086
def _padleft(width, s, has_invisible=True): iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width fmt = "{0:>%ds}" % iwidth return fmt.format(s)
Flush right. >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430' True
389,087
def global_defaults(): defaults = InvokeConfig.global_defaults() ours = { "connect_kwargs": {}, "forward_agent": False, "gateway": None, "load_ssh_configs": True, "port": 22, "run": {"replace_env": True}, "runners": {"remote": Remote}, "ssh_config_path": None, "tasks": {"collection_name": "fabfile"}, "timeouts": {"connect": None}, "user": get_local_user(), } merge_dicts(defaults, ours) return defaults
Default configuration values and behavior toggles. Fabric only extends this method in order to make minor adjustments and additions to Invoke's `~invoke.config.Config.global_defaults`; see its documentation for the base values, such as the config subtrees controlling behavior of ``run`` or how ``tasks`` behave. For Fabric-specific modifications and additions to the Invoke-level defaults, see our own config docs at :ref:`default-values`. .. versionadded:: 2.0
389,088
def reward(self, action=None): reward = 0. if self._check_success(): reward = 1.0 if self.reward_shaping: cube_pos = self.sim.data.body_xpos[self.cube_body_id] gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id] dist = np.linalg.norm(gripper_site_pos - cube_pos) reaching_reward = 1 - np.tanh(10.0 * dist) reward += reaching_reward touch_left_finger = False touch_right_finger = False for i in range(self.sim.data.ncon): c = self.sim.data.contact[i] if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cube_geom_id: touch_left_finger = True if c.geom1 == self.cube_geom_id and c.geom2 in self.l_finger_geom_ids: touch_left_finger = True if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cube_geom_id: touch_right_finger = True if c.geom1 == self.cube_geom_id and c.geom2 in self.r_finger_geom_ids: touch_right_finger = True if touch_left_finger and touch_right_finger: reward += 0.25 return reward
Reward function for the task. The dense reward has three components. Reaching: in [0, 1], to encourage the arm to reach the cube Grasping: in {0, 0.25}, non-zero if arm is grasping the cube Lifting: in {0, 1}, non-zero if arm has lifted the cube The sparse reward only consists of the lifting component. Args: action (np array): unused for this task Returns: reward (float): the reward
389,089
def SendVoicemail(self, Username): if self._Api.protocol >= 6: self._DoCommand( % Username) else: self._DoCommand( % Username)
Sends a voicemail to a specified user. :Parameters: Username : str Skypename of the user. :note: Should return a `Voicemail` object. This is not implemented yet.
389,090
def list_files(tag=None, sat_id=None, data_path=None, format_str=None, supported_tags=None, fake_daily_files_from_monthly=False, two_digit_year_break=None): if data_path is not None: if format_str is None: try: format_str = supported_tags[sat_id][tag] except KeyError: raise ValueError() out = pysat.Files.from_os(data_path=data_path, format_str=format_str) if (not out.empty) and fake_daily_files_from_monthly: out.ix[out.index[-1] + pds.DateOffset(months=1) - pds.DateOffset(days=1)] = out.iloc[-1] out = out.asfreq(, ) out = out + + out.index.strftime() return out return out else: estr = raise ValueError (estr)
Return a Pandas Series of every file for chosen satellite data. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are <tag strings>. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) supported_tags : (dict or NoneType) keys are tags supported by list_files routine. Values are the default format_str values for key. (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month, interfering with pysat's functionality of loading by day. This flag, when true, appends daily dates to monthly files internally. These dates are used by load routine in this module to provide data by day. Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files Examples -------- :: fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf' supported_tags = {'dc_b':fname} list_files = functools.partial(nasa_cdaweb_methods.list_files, supported_tags=supported_tags) ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf' supported_tags = {'':ivm_fname} list_files = functools.partial(cdw.list_files, supported_tags=supported_tags)
389,091
def read(self, to_read, timeout_ms): if not isinstance(to_read, baseinteger): raise TypeError("to_read can only be an instance of type baseinteger") if not isinstance(timeout_ms, baseinteger): raise TypeError("timeout_ms can only be an instance of type baseinteger") data = self._call("read", in_p=[to_read, timeout_ms]) return data
Reads data from this file. in to_read of type int Number of bytes to read. in timeout_ms of type int Timeout (in ms) to wait for the operation to complete. Pass 0 for an infinite timeout. return data of type str Array of data read. raises :class:`OleErrorNotimpl` The method is not implemented yet.
389,092
def _split_symbol_mappings(df, exchanges): mappings = df[list(mapping_columns)] with pd.option_context(, None): mappings[] = mappings.index mappings.reset_index(drop=True, inplace=True) asset_exchange = df[ [, ] ].sort_values().groupby(level=0)[].nth(-1) _check_symbol_mappings(mappings, exchanges, asset_exchange) return ( df.groupby(level=0).apply(_check_asset_group), mappings, )
Split out the symbol: sid mappings from the raw data. Parameters ---------- df : pd.DataFrame The dataframe with multiple rows for each symbol: sid pair. exchanges : pd.DataFrame The exchanges table. Returns ------- asset_info : pd.DataFrame The asset info with one row per asset. symbol_mappings : pd.DataFrame The dataframe of just symbol: sid mappings. The index will be the sid, then there will be three columns: symbol, start_date, and end_date.
389,093
def sleep_and_retry(func): @wraps(func) def wrapper(*args, **kargs): while True: try: return func(*args, **kargs) except RateLimitException as exception: time.sleep(exception.period_remaining) return wrapper
Return a wrapped function that rescues rate limit exceptions, sleeping the current thread until rate limit resets. :param function func: The function to decorate. :return: Decorated function. :rtype: function
389,094
def filter_and_transform_data(df, settings): df["length_filter"] = True settings["filtered"] = False if settings.get("alength") and settings.get("bam"): settings["lengths_pointer"] = "aligned_lengths" logging.info("Using aligned read lengths for plotting.") else: settings["lengths_pointer"] = "lengths" logging.info("Using sequenced read lengths for plotting.") if settings.get("drop_outliers"): num_reads_prior = non_filtered_reads(df) df.loc[flag_length_outliers(df, settings["lengths_pointer"]), "length_filter"] = False num_reads_post = non_filtered_reads(df) logging.info("Hidding {} length outliers in length plots.".format( str(num_reads_prior - num_reads_post))) if settings.get("maxlength"): num_reads_prior = non_filtered_reads(df) df.loc[df[settings["lengths_pointer"]] > settings["maxlength"], "length_filter"] = False num_reads_post = non_filtered_reads(df) logging.info("Hidding {} reads longer than {}bp in length plots.".format( str(num_reads_prior - num_reads_post), str(settings["maxlength"]))) if settings.get("minlength"): num_reads_prior = non_filtered_reads(df) df.loc[df[settings["lengths_pointer"]] < settings["minlength"], "length_filter"] = False num_reads_post = non_filtered_reads(df) logging.info("Hidding {} reads shorter than {}bp in length plots.".format( str(num_reads_prior - num_reads_post), str(settings["minlength"]))) if settings.get("minqual"): num_reads_prior = non_filtered_reads(df) df = df.loc[df["quals"] > settings["minqual"]].copy() num_reads_post = non_filtered_reads(df) logging.info("Removing {} reads with quality below Q{}.".format( str(num_reads_prior - num_reads_post), str(settings["minqual"]))) settings["filtered"] = True if settings.get("loglength"): df["log_" + settings["lengths_pointer"]] = np.log10(df[settings["lengths_pointer"]]) settings["lengths_pointer"] = "log_" + settings["lengths_pointer"] logging.info("Using log10 scaled read lengths.") settings["logBool"] = True else: settings["logBool"] = False if settings.get("runtime_until"): num_reads_prior = non_filtered_reads(df) df = df[df.start_time < timedelta(hours=settings["runtime_until"])] num_reads_post = non_filtered_reads(df) logging.info("Removing {} reads generated after {} hours in the run.".format( str(num_reads_prior - num_reads_post), str(settings["runtime_until"]))) settings["filtered"] = True if "quals" in df: num_reads_prior = len(df) df = df.loc[-((df["lengths"] < 20) & (df["quals"] > 30))].copy() num_reads_post = len(df) if num_reads_prior - num_reads_post > 0: logging.info( "Removed {} artefactual reads with very short length and very high quality." .format(num_reads_prior - num_reads_post)) settings["filtered"] = True if settings.get("downsample"): new_size = min(settings["downsample"], len(df)) logging.info("Downsampling the dataset from {} to {} reads".format( len(df), new_size)) df = df.sample(new_size) settings["filtered"] = True if settings.get("percentqual"): df["quals"] = df["quals"].apply(phred_to_percent) logging.info("Converting quality scores to theoretical percent identities.") return(df, settings)
Perform filtering on the data based on arguments set on commandline - use aligned length or sequenced length (bam mode only) - hide outliers from length plots* - hide reads longer than maxlength or shorter than minlength from length plots* - filter reads with a quality below minqual - use log10 scaled reads rather than normal - use empirical percent accuracy rather than phred score quality - downsample reads to args.downsample - always: drop reads which are basecaller artefacts judged by length below 20 and quality above 30 * using a boolean column length_filter
389,095
def MatrixSolve(a, rhs, adj): return np.linalg.solve(a if not adj else _adjoint(a), rhs),
Matrix solve op.
389,096
def validate(self, collection: BioCCollection): for document in collection.documents: self.validate_doc(document)
Validate a single collection.
389,097
def _timeout_handler(self, signum, frame): msgfmt = self.exit(code=self._timeout_code, message=msgfmt.format(self._timeout_delay))
internal timeout handler
389,098
def write(self, data): if self._ignore_write_operations: return assert self.is_connected() try: self._connection.send(data.encode()) except socket.error: self.close() self._ignore_write_operations = True
Sends some data to the client.
389,099
def _read_configuration(config_filename): config = ConfigParser() config.read(config_filename) if in config[]: path = os.path.dirname(config_filename) + + config.get(, ) config_supplement = ConfigParser() config_supplement.read(path) else: config_supplement = None return config, config_supplement
Checks the supplement file. :param str config_filename: The name of the configuration file. :rtype: (configparser.ConfigParser,configparser.ConfigParser)