code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def _cleanup(lst): clean = [] for ele in lst: if ele and isinstance(ele, dict): clean.append(ele) return clean
Return a list of non-empty dictionaries.
def render(self, name, color=True, just=None, **kwargs): res = self._render(name, color=color, **kwargs) invis_chars = self.invisible_chars[name] if color else 0 self.txtwidth = _lenlastline(res) - invis_chars just = self.justify if (just is None) else just res = res.rjust(self.width + invis_chars) self.width = _lenlastline(res) - invis_chars return res
Render the selected prompt. Parameters ---------- name : str Which prompt to render. One of 'in', 'in2', 'out', 'rewrite' color : bool If True (default), include ANSI escape sequences for a coloured prompt. just : bool If True, justify the prompt to the width of the last prompt. The default is stored in self.justify. **kwargs : Additional arguments will be passed to the string formatting operation, so they can override the values that would otherwise fill in the template. Returns ------- A string containing the rendered prompt.
def _set_data(self): if getattr(self, , False) and not getattr(self, , False) and not getattr(self, , False): _x = XVariable() _y = YVariable() _x.contribute_to_class(self, , self.data) _y.contribute_to_class(self, , self.data) self[] = zip(self._x.points, self._y.points) else: for axis in (, ): axis_obj = getattr(self, axis, False) if not axis_obj: raise exception.MissingAxisException("%s missing" % axis) if not getattr(axis_obj, , False): raise exception.MissingDataException() self[] = zip(self._x.points, self._y.points)
This method will be called to set Series data
def seed_aws_organization(ctx, owner): swag = create_swag_from_ctx(ctx) accounts = swag.get_all() _ids = [result.get() for result in accounts] client = boto3.client() paginator = client.get_paginator() response_iterator = paginator.paginate() count = 0 for response in response_iterator: for account in response[]: if account[] in _ids: click.echo(click.style( .format(account[]), fg=) ) continue if account[] == : status = else: status = data = { : account[], : account[], : , : account[], : owner, : , : [], : False, : [{: , : status}] } click.echo(click.style( .format(data[]), fg=) ) count += 1 swag.create(data, dry_run=ctx.dry_run) click.echo(.format(count))
Seeds SWAG from an AWS organziation.
def timezone(client, location, timestamp=None, language=None): params = { "location": convert.latlng(location), "timestamp": convert.time(timestamp or datetime.utcnow()) } if language: params["language"] = language return client._request( "/maps/api/timezone/json", params)
Get time zone for a location on the earth, as well as that location's time offset from UTC. :param location: The latitude/longitude value representing the location to look up. :type location: string, dict, list, or tuple :param timestamp: Timestamp specifies the desired time as seconds since midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to determine whether or not Daylight Savings should be applied. Times before 1970 can be expressed as negative values. Optional. Defaults to ``datetime.utcnow()``. :type timestamp: int or datetime.datetime :param language: The language in which to return results. :type language: string :rtype: dict
def analyse(self, traj, network, current_subrun, subrun_list, network_dict): if len(subrun_list)==0: traj.f_add_result(Brian2MonitorResult, , self.spike_monitor, comment = ) traj.f_add_result(Brian2MonitorResult, , self.V_monitor, comment = ) traj.f_add_result(Brian2MonitorResult, , self.I_syn_e_monitor, comment = ) traj.f_add_result(Brian2MonitorResult, , self.I_syn_i_monitor, comment = ) print() if traj.parameters.analysis.make_plots: self._print_graphs(traj)
Extracts monitor data and plots. Data extraction is done if all subruns have been completed, i.e. `len(subrun_list)==0` First, extracts results from the monitors and stores them into `traj`. Next, uses the extracted data for plots. :param traj: Trajectory container Adds: Data from monitors :param network: The BRIAN network :param current_subrun: BrianParameter :param subrun_list: List of coming subruns :param network_dict: Dictionary of items shared among all components
def _checkSetpointValue( setpointvalue, maxvalue ): if maxvalue is None: raise TypeError() minimalmodbus._checkNumerical(setpointvalue, minvalue=0, maxvalue=maxvalue, description=)
Check that the given setpointvalue is valid. Args: * setpointvalue (numerical): The setpoint value to be checked. Must be positive. * maxvalue (numerical): Upper limit for setpoint value. Must be positive. Raises: TypeError, ValueError
def init_config(self, app): _vars = [, , ] for k in dir(config): if k.startswith() or k in [ , ] + _vars: app.config.setdefault(k, getattr(config, k)) for varname in _vars: theme_varname = .format(varname) if app.config[theme_varname] is None: app.config[theme_varname] = app.config[varname] app.config.setdefault( , config.ADMIN_BASE_TEMPLATE)
Initialize configuration. :param app: An instance of :class:`~flask.Flask`.
def print_stats(self, stream=None): if not stream: stream = sys.stdout self.metadata.sort(key=lambda x: -x.size) stream.write( % (, , , )) for g in self.metadata: stream.write( % (g.id, g.size, trunc(g.type, 12), trunc(g.str, 46))) stream.write( % \ (self.count, self.num_in_cycles, pp(self.total_size)))
Log annotated garbage objects to console or file. :param stream: open file, uses sys.stdout if not given
def delete_archives(self, *archives): _archives = [] for archive in archives: _archives.append(os.path.basename(archive)) archives = _archives[:] ret = {: {}, : {}} for archive in self.archives(): arc_dir = os.path.dirname(archive) archive = os.path.basename(archive) if archives and archive in archives or not archives: archive = os.path.join(arc_dir, archive) try: os.unlink(archive) ret[][archive] = except Exception as err: ret[][archive] = str(err) ret[][archive] = return ret
Delete archives :return:
def multenterbox(msg="Fill in values for the fields.", title=" ", fields=(), values=()): r return bb.__multfillablebox(msg, title, fields, values, None)
r""" Show screen with multiple data entry fields. If there are fewer values than names, the list of values is padded with empty strings until the number of values is the same as the number of names. If there are more values than names, the list of values is truncated so that there are as many values as names. Returns a list of the values of the fields, or None if the user cancels the operation. Here is some example code, that shows how values returned from multenterbox can be checked for validity before they are accepted:: msg = "Enter your personal information" title = "Credit Card Application" fieldNames = ["Name","Street Address","City","State","ZipCode"] fieldValues = [] # we start with blanks for the values fieldValues = multenterbox(msg,title, fieldNames) # make sure that none of the fields was left blank while 1: if fieldValues is None: break errmsg = "" for i in range(len(fieldNames)): if fieldValues[i].strip() == "": errmsg += ('"%s" is a required field.\n\n' % fieldNames[i]) if errmsg == "": break # no problems found fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues) print("Reply was: %s" % str(fieldValues)) :param str msg: the msg to be displayed. :param str title: the window title :param list fields: a list of fieldnames. :param list values: a list of field values :return: String
def _forwardImplementation(self, inbuf, outbuf): assert self.module propensities = self.module.getActionValues(0) summedProps = sum(propensities) probabilities = propensities / summedProps action = eventGenerator(probabilities) outbuf[:] = scipy.array([action])
Proportional probability method.
def query(url, **kwargs): **key1=val1&key2=val2*<xml>somecontent</xml> opts = __opts__.copy() if in kwargs: opts.update(kwargs[]) del kwargs[] return salt.utils.http.query(url=url, opts=opts, **kwargs)
Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: .. autofunction:: salt.utils.http.query CLI Example: .. code-block:: bash salt '*' http.query http://somelink.com/ salt '*' http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt '*' http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' For more information about the ``http.query`` module, refer to the :ref:`HTTP Tutorial <tutorial-http>`.
def pipe_subelement(context=None, _INPUT=None, conf=None, **kwargs): path = DotDict(conf).get(, **kwargs) for item in _INPUT: element = DotDict(item).get(path, **kwargs) for i in utils.gen_items(element): yield {: i} if item.get(): break
An operator extracts select sub-elements from a feed. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) conf : {'path': {'value': <element path>}} Yields ------ _OUTPUT : items
def get_target_forums_for_moved_topics(self, user): return [f for f in self._get_forums_for_user(user, [, ]) if f.is_forum]
Returns a list of forums in which the considered user can add topics that have been moved from another forum.
def import_split(import_name): obj = None attr = None if in import_name: module, obj = import_name.split(, 1) if in obj: obj, attr = obj.rsplit(, 1) elif in import_name: module, obj = import_name.rsplit(, 1) else: module = import_name return module, obj, attr
takes a dotted string path and returns the components: import_split('path') == 'path', None, None import_split('path.part.object') == 'path.part', 'object', None import_split('path.part:object') == 'path.part', 'object', None import_split('path.part:object.attribute') == 'path.part', 'object', 'attribute'
def makescacoldesc(columnname, value, datamanagertype=, datamanagergroup=, options=0, maxlen=0, comment=, valuetype=, keywords={}): vtype = valuetype if vtype == : vtype = _value_type_name(value) rec2 = {: vtype, : datamanagertype, : datamanagergroup, : options, : maxlen, : comment, : keywords} return {: columnname, : rec2}
Create description of a scalar column. A description for a scalar column can be created from a name for the column and a data value, which is used only to determine the type of the column. Note that a dict value is also possible. It is possible to create the column description in more detail by giving the data manager name, group, option, and comment as well. The data manager type tells which data manager (storage manager) is used to store the columns. The data manager type and group are explained in more detail in the `casacore Tables <../../casacore/doc/html/group__Tables__module.html>`_ documentation. It returns a dict with fields `name` and `desc` which can thereafter be used to build a table description using function :func:`maketabdesc`. `columname` Name of column `value` Example data value used to determine the column's data type. It is only used if argument `valuetype` is not given. `datamanagertype` Type of data manager which can be one of StandardStMan (default) or IncrementalStMan. The latter one can save disk space if many subsequent cells in the column will have the same value. `datamanagergroup` Data manager group. Only for the expert user. `options` Options. Need not be filled in. `maxlen` Maximum length of string values in a column. Default 0 means unlimited. `comment` Comment: informational for user. `valuetype` A string giving the column's data type. Possible data types are bool (or boolean), uchar (or byte), short, int (or integer), uint, float, double, complex, dcomplex, and string. 'keywords' A dict defining initial keywords for the column. For example:: scd1 = makescacoldesc("col2", "")) scd2 = makescacoldesc("col1", 1, "IncrementalStMan") td = maketabdesc([scd1, scd2]) This creates a table description consisting of an integer column `col1`, and a string column `col2`. `col1` uses the IncrementalStMan storage manager, while `col2` uses the default storage manager StandardStMan.
def from_elements(cls, items=None): node = cls() if items is None: node.items = [] else: node.items = [ (const_factory(k), const_factory(v) if _is_const(v) else v) for k, v in items.items() if _is_const(k) ] return node
Create a :class:`Dict` of constants from a live dictionary. :param items: The items to store in the node. :type items: dict :returns: The created dictionary node. :rtype: Dict
def components(self): from pandas import DataFrame columns = [, , , , , , ] hasnans = self._hasnans if hasnans: def f(x): if isna(x): return [np.nan] * len(columns) return x.components else: def f(x): return x.components result = DataFrame([f(x) for x in self], columns=columns) if not hasnans: result = result.astype() return result
Return a dataframe of the components (days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. Returns ------- a DataFrame
def idxmin(self, axis=0, skipna=True, *args, **kwargs): skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmin. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan
def __field_to_subfields(self, field): if not isinstance(field, messages.MessageField): return [[field]] result = [] for subfield in sorted(field.message_type.all_fields(), key=lambda f: f.number): subfield_results = self.__field_to_subfields(subfield) for subfields_list in subfield_results: subfields_list.insert(0, field) result.append(subfields_list) return result
Fully describes data represented by field, including the nested case. In the case that the field is not a message field, we have no fields nested within a message definition, so we can simply return that field. However, in the nested case, we can't simply describe the data with one field or even with one chain of fields. For example, if we have a message field m_field = messages.MessageField(RefClass, 1) which references a class with two fields: class RefClass(messages.Message): one = messages.StringField(1) two = messages.IntegerField(2) then we would need to include both one and two to represent all the data contained. Calling __field_to_subfields(m_field) would return: [ [<MessageField "m_field">, <StringField "one">], [<MessageField "m_field">, <StringField "two">], ] If the second field was instead a message field class RefClass(messages.Message): one = messages.StringField(1) two = messages.MessageField(OtherRefClass, 2) referencing another class with two fields class OtherRefClass(messages.Message): three = messages.BooleanField(1) four = messages.FloatField(2) then we would need to recurse one level deeper for two. With this change, calling __field_to_subfields(m_field) would return: [ [<MessageField "m_field">, <StringField "one">], [<MessageField "m_field">, <StringField "two">, <StringField "three">], [<MessageField "m_field">, <StringField "two">, <StringField "four">], ] Args: field: An instance of a subclass of messages.Field. Returns: A list of lists, where each sublist is a list of fields.
def list(self, **params): _, _, text_messages = self.http_client.get("/text_messages", params=params) return text_messages
Retrieve text messages Returns Text Messages, according to the parameters provided :calls: ``get /text_messages`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages. :rtype: list
def add_point_region(self, y: float, x: float) -> Graphic: graphic = Graphics.PointGraphic() graphic.position = Geometry.FloatPoint(y, x) self.__display_item.add_graphic(graphic) return Graphic(graphic)
Add a point graphic to the data item. :param x: The x coordinate, in relative units [0.0, 1.0] :param y: The y coordinate, in relative units [0.0, 1.0] :return: The :py:class:`nion.swift.Facade.Graphic` object that was added. .. versionadded:: 1.0 Scriptable: Yes
def getTJstr(text, glyphs, simple, ordering): if text.startswith("[<") and text.endswith(">]"): return text if not bool(text): return "[<>]" if simple: if glyphs is None: otxt = "".join([hex(ord(c))[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text]) else: otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text]) return "[<" + otxt + ">]" if ordering < 0: otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(4, "0") for c in text]) else: otxt = "".join([hex(ord(c))[2:].rjust(4, "0") for c in text]) return "[<" + otxt + ">]"
Return a PDF string enclosed in [] brackets, suitable for the PDF TJ operator. Notes: The input string is converted to either 2 or 4 hex digits per character. Args: simple: no glyphs: 2-chars, use char codes as the glyph glyphs: 2-chars, use glyphs instead of char codes (Symbol, ZapfDingbats) not simple: ordering < 0: 4-chars, use glyphs not char codes ordering >=0: a CJK font! 4 chars, use char codes as glyphs
def clone(self, screen, scene): if self._on_close is None or isfunction(self._on_close): scene.add_effect(PopUpDialog(screen, self._text, self._buttons, self._on_close))
Create a clone of this Dialog into a new Screen. :param screen: The new Screen object to clone into. :param scene: The new Scene object to clone into.
def set_base_headers(self, hdr): hdr[] = (__version__, ) hdr[] = (self.__class__.__name__, ) hdr[] = (self.__version__, ) return hdr
Set metadata in FITS headers.
def clear(self): with self._conn: self._conn.execute() self._conn.execute()
Clear all work items from the session. This removes any associated results as well.
def p_intermfluent_def(self, p): if len(p) == 16: p[0] = PVariable(name=p[1], fluent_type=, range_type=p[9], param_types=p[3], level=p[13]) else: p[0] = PVariable(name=p[1], fluent_type=, range_type=p[6], level=p[10])
intermfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI | IDENT COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI
def authenticate_device(self, api_token, device_token, email=None, user_url=None, override=False, fetch=True): if (self.context.has_auth_params() and not override): raise OverrideError() if (not api_token or not device_token or (not email and not user_url) or not self.context.authorize(, api_token=api_token, user_email=email, user_url=user_url, device_token=device_token)): raise AuthUsageError(self.context, ) if fetch: user = self.user(email) if email else self.user() return user.refresh() else: return True
Set credentials for Device authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. device_token (str): Physical device identifier. You will receive this from a user.devices.create call or from users.create. email (str, optional): User's email address, required if user_url is not provided. user_url (str, optional): User's Gem url. override (boolean, optional): Replace existing Application credentials. fetch (boolean, optional): Return the authenticated User. Returns: An User object if `fetch` is True.
def circular_shift(X): N = X.shape[0] L = np.zeros(X.shape) for i in range(N): L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)]) return L
Shifts circularly the X squre matrix in order to get a time-lag matrix.
def after_serving(self, func: Callable) -> Callable: handler = ensure_coroutine(func) self.after_serving_funcs.append(handler) return func
Add a after serving function. This will allow the function provided to be called once after anything is served (after last byte is sent). This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_serving def func(): ... Arguments: func: The function itself.
def file_size(self): if self._file_size is None: if isinstance(self._path_or_fd, string_types()): self._file_size = os.stat(self._path_or_fd).st_size else: self._file_size = os.fstat(self._path_or_fd).st_size return self._file_size
:return: size of file we manager
def iterkeys(obj): "Get key iterator from dictionary for Python 2 and 3" return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys()
Get key iterator from dictionary for Python 2 and 3
def dump(self): import tempfile with tempfile.NamedTemporaryFile(delete=False, mode="w") as fobj: json.dump(self.info, fobj) return fobj.name
Save analytics report to a temporary file. Returns: str: path to the temporary file that contains the analytics report.
def flush(self): i = 0 while self._frame_data.is_dirty and i < 10: i += 1 time.sleep(0.1)
Wait until history is read but no more than 10 cycles in case a browser session is closed.
def _clean_suffix(string, suffix): suffix_len = len(suffix) if len(string) < suffix_len: raise ValueError("A suffix can not be bigger than string argument.") if string.endswith(suffix): return string[0:-suffix_len] else: return string
If string endswith the suffix, remove it. Else leave it alone.
async def list_batches(self, request): paging_controls = self._get_paging_controls(request) validator_query = client_batch_pb2.ClientBatchListRequest( head_id=self._get_head_id(request), batch_ids=self._get_filter_ids(request), sorting=self._get_sorting_message(request, "default"), paging=self._make_paging_message(paging_controls)) response = await self._query_validator( Message.CLIENT_BATCH_LIST_REQUEST, client_batch_pb2.ClientBatchListResponse, validator_query) return self._wrap_paginated_response( request=request, response=response, controls=paging_controls, data=[self._expand_batch(b) for b in response[]])
Fetches list of batches from validator, optionally filtered by id. Request: query: - head: The id of the block to use as the head of the chain - id: Comma separated list of batch ids to include in results Response: data: JSON array of fully expanded Batch objects head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block paging: Paging info and nav, like total resources and a next link
def supported(aln): def col_consensus(columns): for col in columns: if ( (col.count() >= len(col)/2) or else: yield most_common[0][0] return list(col_consensus(zip(*aln)))
Get only the supported consensus residues in each column. Meaning: - Omit majority-gap columns - Omit columns where no residue type appears more than once - In case of a tie, return all the top-scoring residue types (no prioritization) Returns a *list* -- not a string! -- where elements are strings of the consensus character(s), potentially a gap ('-') or multiple chars ('KR').
def get_not_num(self, seq, num=0): ind = next((i for i, x in enumerate(seq) if x != num), None) if ind == None: return self.board_size else: return ind
Find the index of first non num element
def OSLibpath(self): ref = os.path.join(self.si.WindowsSdkDir, ) libpath = [] if self.vc_ver <= 9.0: libpath += self.OSLibraries if self.vc_ver >= 11.0: libpath += [os.path.join(ref, r)] if self.vc_ver >= 14.0: libpath += [ ref, os.path.join(self.si.WindowsSdkDir, ), os.path.join( ref, , , ), os.path.join( ref, , , ), os.path.join( ref, , , ), os.path.join( self.si.WindowsSdkDir, , , % self.vc_ver, , , , ), ] return libpath
Microsoft Windows SDK Libraries Paths
def start(self): self.connect() if not self.isAlive(): super(WAMPClient,self).start() self.hello() return self
Initialize websockets, say hello, and start listening for events
def set_global_provenance(wf: Workflow, registry: Registry): stack = [wf.root] while stack: i = stack.pop() n = wf.nodes[i] if n.prov: continue if is_node_ready(n): job_msg = registry.deep_encode(n) n.prov = prov_key(job_msg) continue deps = wf.inverse_links[i] todo = [j for j in deps if not wf.nodes[j].prov] if not todo: link_dict = dict(links(wf, i, deps)) link_prov = registry.deep_encode( [link_dict[arg] for arg in empty_args(n)]) job_msg = registry.deep_encode(n) n.prov = prov_key(job_msg, link_prov) continue stack.append(i) stack.extend(deps)
Compute a global provenance key for the entire workflow before evaluation. This key can be used to store and retrieve results in a database. The key computed in this stage is different from the (local) provenance key that can be computed for a node if all its arguments are known. In cases where a result derives from other results that were computed in child workflows, we can prevent the workflow system from reevaluating the results at each step to find that we already had the end-result somewhere. This is where the global prov-key comes in. Each node is assigned a `prov` attribute. If all arguments for this node are known, this key will be the same as the local prov-key. If some of the arguments are still empty, we add the global prov-keys of the dependent nodes to the hash. In this algorithm we traverse from the bottom of the DAG to the top and back using a stack. This allows us to compute the keys for each node without modifying the node other than setting the `prov` attribute with the resulting key.
def before(self, callback: Union[Callable, str]) -> "Control": if isinstance(callback, Control): callback = callback._before self._before = callback return self
Register a control method that reacts before the trigger method is called. Parameters: callback: The control method. If given as a callable, then that function will be used as the callback. If given as a string, then the control will look up a method with that name when reacting (useful when subclassing).
def persistent_popen_align3(data, samples, chunk): with open(chunk, ) as infile: clusts = infile.read().split("//\n//\n")[:-1] samples.sort(key=lambda x: x.name) snames = [sample.name for sample in samples] maxlen = data._hackersonly["max_fragment_length"] + 20 indels = np.zeros((len(samples), len(clusts), maxlen), dtype=np.bool_) duples = np.zeros(len(clusts), dtype=np.bool_) proc = sps.Popen(["bash"], stdin=sps.PIPE, stdout=sps.PIPE, universal_newlines=True) allstack = [] for ldx in xrange(len(clusts)): aligned = [] istack = [] lines = clusts[ldx].strip().split("\n") names = lines[::2] seqs = lines[1::2] align1 = "" align2 = "" names = [">{};*{}".format(j[1:], i) for i, j in enumerate(names)] try: clust1, clust2 = zip(*[i.split("nnnn") for i in seqs]) cl1 = "\n".join(itertools.chain(*zip(names, clust1))) cl2 = "\n".join(itertools.chain(*zip(names, clust2))) shape = (len(seqs), max([len(i) for i in seqs])) arrseqs = np.zeros(shape, dtype="S1") for row in range(arrseqs.shape[0]): seqsrow = seqs[row] arrseqs[row, :len(seqsrow)] = list(seqsrow) amask = np.char.islower(arrseqs) save_alleles = np.any(amask) cmd1 = "echo -e | {} -quiet -in - ; echo {}"\ .format(cl1, ipyrad.bins.muscle, "//") print(cmd1, file=proc.stdin) for line in iter(proc.stdout.readline, "//\n"): align1 += line cmd2 = "echo -e | {} -quiet -in - ; echo {}"\ .format(cl2, ipyrad.bins.muscle, "//") print(cmd2, file=proc.stdin) for line in iter(proc.stdout.readline, "//\n"): align2 += line la1 = align1[1:].split("\n>") la2 = align2[1:].split("\n>") dalign1 = dict([i.split("\n", 1) for i in la1]) dalign2 = dict([i.split("\n", 1) for i in la2]) keys = sorted(dalign1.keys(), key=DEREP) keys2 = sorted(dalign2.keys(), key=DEREP) if not len(keys) == len(keys2): LOGGER.error("R1 and R2 results differ in length: "\ + "\nR1 - {}\nR2 - {}".format(keys, keys2)) continue for kidx, key in enumerate(keys): concatseq = dalign1[key].replace("\n", "")+\ "nnnn"+dalign2[key].replace("\n", "") if save_alleles: newmask = np.zeros(len(concatseq), dtype=np.bool_) indidx = np.where(np.array(list(concatseq)) == "-")[0] if indidx.size: allrows = np.arange(amask.shape[1]) mask = np.ones(allrows.shape[0], dtype=np.bool_) for idx in indidx: if idx < mask.shape[0]: mask[idx] = False not_idx = allrows[mask == 1] newmask[not_idx] = amask[kidx, :not_idx.shape[0]] else: newmask = amask[kidx] concatarr = np.array(list(concatseq)) concatarr[newmask] = np.char.lower(concatarr[newmask]) concatseq = concatarr.tostring() aligned.append("{}\n{}".format(key, concatseq)) except IndexError as inst: LOGGER.debug("Error in PE - ldx: {}".format()) LOGGER.debug("Vars: {}".format(dict(globals(), **locals()))) raise except ValueError: cl1 = "\n".join(["\n".join(i) for i in zip(names, seqs)]) shape = (len(seqs), max([len(i) for i in seqs])) arrseqs = np.zeros(shape, dtype="S1") for row in range(arrseqs.shape[0]): seqsrow = seqs[row] arrseqs[row, :len(seqsrow)] = list(seqsrow) amask = np.char.islower(arrseqs) save_alleles = np.any(amask) cmd1 = "echo -e | {} -quiet -in - ; echo {}"\ .format(cl1, ipyrad.bins.muscle, "//") print(cmd1, file=proc.stdin) for line in iter(proc.stdout.readline, "//\n"): align1 += line la1 = align1[1:].split("\n>") dalign1 = dict([i.split("\n", 1) for i in la1]) keys = sorted(dalign1.keys(), key=DEREP) for kidx, key in enumerate(keys): concatseq = dalign1[key].replace("\n", "") if save_alleles: newmask = np.zeros(len(concatseq), dtype=np.bool_) indidx = np.where(np.array(list(concatseq)) == "-")[0] if indidx.size: allrows = np.arange(amask.shape[1]) mask = np.ones(allrows.shape[0], dtype=np.bool_) for idx in indidx: if idx < mask.shape[0]: mask[idx] = False not_idx = allrows[mask == 1] newmask[not_idx] = amask[kidx, :not_idx.shape[0]] else: newmask = amask[kidx] concatarr = np.array(list(concatseq)) concatarr[newmask] = np.char.lower(concatarr[newmask]) concatseq = concatarr.tostring() aligned.append("{}\n{}".format(key, concatseq)) aseqs = np.vstack([list(i.split("\n")[1]) for i in aligned]) LOGGER.info("\naseqs here: %s", aseqs) sidxs = [snames.index(key.rsplit("_", 1)[0]) for key in keys] thislen = min(maxlen, aseqs.shape[1]) for idx in xrange(aseqs.shape[0]): newn = aligned[idx].split(";", 1)[0] istack.append("{}\n{}".format(newn, aseqs[idx, :thislen].tostring())) sidx = sidxs[idx] indels[sidx, ldx, :thislen] = aseqs[idx, :thislen] == "-" if istack: allstack.append("\n".join(istack)) proc.stdout.close() if proc.stderr: proc.stderr.close() proc.stdin.close() proc.wait() odx = chunk.rsplit("_")[-1] alignfile = os.path.join(data.tmpdir, "align_{}.fa".format(odx)) with open(alignfile, ) as outfile: outfile.write("\n//\n//\n".join(allstack)+"\n") os.remove(chunk) ifile = os.path.join(data.tmpdir, "indels_{}.tmp.npy".format(odx)) np.save(ifile, indels) dfile = os.path.join(data.tmpdir, "duples_{}.tmp.npy".format(odx)) np.save(dfile, duples)
notes
def _did_receive_response(self, connection): if connection.has_timeouted: bambou_logger.info("NURESTConnection has timeout.") return has_callbacks = connection.has_callbacks() should_post = not has_callbacks if connection.handle_response_for_connection(should_post=should_post) and has_callbacks: callback = connection.callbacks[] callback(connection)
Receive a response from the connection
def report_response(response, request_headers=True, request_body=True, response_headers=False, response_body=False, redirection=False): url = .format( method=response.request.method, url=response.url, status=response.status_code, elapsed=response.elapsed.total_seconds() * 1000 ) pieces = [url] if request_headers: request_headers = .format(request_headers=response.request.headers) pieces.append(request_headers) if request_body: request_body = .format(request_body=response.request.body) pieces.append(request_body) if response_headers: response_headers = .format(response_headers=response.headers) pieces.append(response_headers) if response_body: response_body = .format(response_body=response.text) pieces.append(response_body) reporter = .join(pieces) if redirection and response.history: for h in response.history[::-1]: redirect_reporter = report_response( h, request_headers, request_body, response_headers, response_body, redirection=False ) reporter = .join([redirect_reporter, .center(72, ), reporter]) return reporter
生成响应报告 :param response: ``requests.models.Response`` 对象 :param request_headers: 是否加入请求头 :param request_body: 是否加入请求体 :param response_headers: 是否加入响应头 :param response_body: 是否加入响应体 :param redirection: 是否加入重定向响应 :return: str
def RepackTemplates(self, repack_configs, templates, output_dir, config=None, sign=False, signed_template=False): pool = multiprocessing.Pool(processes=10) results = [] bulk_sign_installers = False for repack_config in repack_configs: for template in templates: repack_args = ["grr_client_build"] if config: repack_args.extend(["--config", config]) repack_args.extend([ "--secondary_configs", repack_config, "repack", "--template", template, "--output_dir", self.GetOutputDir(output_dir, repack_config) ]) passwd = None if sign: if template.endswith(".exe.zip"): if platform.system() != "Windows": passwd = self.GetWindowsPassphrase() repack_args.append("--sign") else: bulk_sign_installers = True if signed_template: repack_args.append("--signed_template") elif template.endswith(".rpm.zip"): bulk_sign_installers = True print("Calling %s" % " ".join(repack_args)) results.append( pool.apply_async(SpawnProcess, (repack_args,), dict(passwd=passwd))) if template.endswith(".exe.zip"): debug_args = [] debug_args.extend(repack_args) debug_args.append("--debug_build") print("Calling %s" % " ".join(debug_args)) results.append( pool.apply_async(SpawnProcess, (debug_args,), dict(passwd=passwd))) try: pool.close() for result_obj in results: result_obj.get(9999) pool.join() except KeyboardInterrupt: print("parent received control-c") pool.terminate() except ErrorDuringRepacking: pool.terminate() raise if bulk_sign_installers: to_sign = {} for root, _, files in os.walk(output_dir): for f in files: if f.endswith(".exe"): to_sign.setdefault("windows", []).append(os.path.join(root, f)) elif f.endswith(".rpm"): to_sign.setdefault("rpm", []).append(os.path.join(root, f)) if to_sign.get("windows"): signer = repacking.TemplateRepacker().GetSigner([ "ClientBuilder Context", "Platform:%s" % platform.system(), "Target:Windows" ]) signer.SignFiles(to_sign.get("windows")) if to_sign.get("rpm"): signer = repacking.TemplateRepacker().GetSigner([ "ClientBuilder Context", "Platform:%s" % platform.system(), "Target:Linux", "Target:LinuxRpm" ]) signer.AddSignatureToRPMs(to_sign.get("rpm"))
Call repacker in a subprocess.
def on_batch_end(self, last_target, train, **kwargs): "Update the metrics if not `train`" if train: return bs = last_target.size(0) for name in self.names: self.metrics[name] += bs * self.learn.loss_func.metrics[name].detach().cpu() self.nums += bs
Update the metrics if not `train`
def delete_menu(self, menu): if menu.parent is None: del self.menus[menu.name()] menu._delete()
Delete the specified menu :param menu: :type menu: :returns: :rtype: :raises:
def add_config(parser): default_config_path = config.get_heron_conf_dir() parser.add_argument( , metavar= + default_config_path + , default=os.path.join(config.get_heron_dir(), default_config_path)) return parser
add config
def makeResetPacket(ID, param): if param not in [0x01, 0x02, 0xff]: raise Exception(.format(param)) pkt = makePacket(ID, xl320.XL320_RESET, None, [1]) return pkt
Resets a servo to one of 3 reset states: XL320_RESET_ALL = 0xFF XL320_RESET_ALL_BUT_ID = 0x01 XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
def _update_kube_events(self, instance, pods_list, event_items): node_ip, node_name = self.kubeutil.get_node_info() self.log.debug(.format(node_name, node_ip)) k8s_namespaces = instance.get(, DEFAULT_NAMESPACES) if not isinstance(k8s_namespaces, list): self.log.warning() k8s_namespaces = DEFAULT_NAMESPACES if in instance and instance.get() not in (None, ): self.log.warning(namespace namespacesnamespace_name_regexp) k8s_namespaces.append(instance.get()) if self.k8s_namespace_regexp: namespaces_endpoint = .format(self.kubeutil.kubernetes_api_url) self.log.debug( % namespaces_endpoint) namespaces = self.kubeutil.retrieve_json_auth(namespaces_endpoint).json() for namespace in namespaces.get(, []): name = namespace.get(, {}).get(, None) if name and self.k8s_namespace_regexp.match(name): k8s_namespaces.append(name) k8s_namespaces = set(k8s_namespaces) for event in event_items: event_ts = calendar.timegm(time.strptime(event.get(), )) involved_obj = event.get(, {}) if involved_obj.get(, ) not in k8s_namespaces: continue tags = self.kubeutil.extract_event_tags(event) tags.extend(instance.get(, [])) title = .format(involved_obj.get(), event.get(), node_name) message = event.get() source = event.get() k8s_event_type = event.get() alert_type = K8S_ALERT_MAP.get(k8s_event_type, ) if source: message += .format(source.get(, ), source.get(, )) msg_body = "%%%\n{}\n```\n{}\n```\n%%%".format(title, message) dd_event = { : event_ts, : node_ip, : EVENT_TYPE, : title, : msg_body, : EVENT_TYPE, : alert_type, : .format(involved_obj.get()), : tags, } self.event(dd_event)
Process kube events and send ddog events The namespace filtering is done here instead of KubeEventRetriever to avoid interfering with service discovery
def send(device_id, description, **kwargs): headers = { "X-Parse-Application-Id": settings.PARSE_APP_ID, "X-Parse-REST-API-Key": settings.PARSE_API_KEY, "User-Agent": "DBMail/%s" % get_version(), "Content-type": "application/json", } data = { "where": { "user_id": device_id, }, "data": { "alert": description, "title": kwargs.pop("event") } } _data = kwargs.pop(, None) if _data is not None: data.update(_data) http = HTTPSConnection(kwargs.pop("api_url", "api.parse.com")) http.request( "POST", "/1/push", headers=headers, body=dumps(data)) response = http.getresponse() if response.status != 200: raise ParseComError(response.reason) body = loads(response.read()) if body[]: raise ParseComError(body[]) return True
Site: http://parse.com API: https://www.parse.com/docs/push_guide#scheduled/REST Desc: Best app for system administrators
def add_group(self, groupname, statements): msg = OmapiMessage.open(b"group") msg.message.append(("create", struct.pack("!I", 1))) msg.obj.append(("name", groupname)) msg.obj.append(("statements", statements)) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add group failed")
Adds a group @type groupname: bytes @type statements: str
def _register_plotter(cls, identifier, module, plotter_name, plotter_cls=None): if plotter_cls is not None: def get_x(self): return self(plotter_cls) else: def get_x(self): return self(getattr(import_module(module), plotter_name)) setattr(cls, identifier, property(get_x, doc=( "List of data arrays that are plotted by :class:`%s.%s`" " plotters") % (module, plotter_name))) cls._registered_plotters[identifier] = (module, plotter_name)
Register a plotter in the :class:`Project` class to easy access it Parameters ---------- identifier: str Name of the attribute that is used to filter for the instances belonging to this plotter module: str The module from where to import the `plotter_name` plotter_name: str The name of the plotter class in `module` plotter_cls: type The imported class of `plotter_name`. If None, it will be imported when it is needed
def replace(self, text=None): if text is None or isinstance(text, bool): text = self.lineEditReplace.text() current_occurences = self._current_occurrence() occurrences = self.get_occurences() if current_occurences == -1: self.select_next() current_occurences = self._current_occurrence() try: try: self.editor.textChanged.disconnect(self.request_search) except (RuntimeError, TypeError): pass occ = occurrences[current_occurences] cursor = self.editor.textCursor() cursor.setPosition(occ[0]) cursor.setPosition(occ[1], cursor.KeepAnchor) len_to_replace = len(cursor.selectedText()) len_replacement = len(text) offset = len_replacement - len_to_replace cursor.insertText(text) self.editor.setTextCursor(cursor) self._remove_occurrence(current_occurences, offset) current_occurences -= 1 self._set_current_occurrence(current_occurences) self.select_next() self.cpt_occurences = len(self.get_occurences()) self._update_label_matches() self._update_buttons() return True except IndexError: return False finally: self.editor.textChanged.connect(self.request_search)
Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace.
def get_value(self, series, key): if not is_scalar(key): raise InvalidIndexError k = com.values_from_object(key) loc = self.get_loc(k) new_values = com.values_from_object(series)[loc] return new_values
we always want to get an index value, never a value
def get_nodes(): cfg_file = "/etc/nago/nago.ini" config = ConfigParser.ConfigParser() config.read(cfg_file) result = {} for section in config.sections(): if section in []: continue token = section node = Node(token) for key, value in config.items(token): node[key] = value result[token] = node return result
Returns all nodes in a list of dicts format
def get_imports(self, module, return_fqn=False): imports = set() raw_imports = ast_imports(module.path) for import_entry in raw_imports: full = ".".join(s for s in import_entry[:2] if s) import_level = import_entry[3] if import_level: intra = .join(module.fqn[:-import_level] + [full]) imported = self._get_imported_module(intra) else: imported = self._get_imported_module(full) if imported: if return_fqn: imports.add(.join(imported.fqn)) else: imports.add(imported.path) return imports
return set of imported modules that are in self :param module: PyModule :return: (set - str) of path names
def lru_cache(maxsize=128, typed=False): if maxsize is not None and not isinstance(maxsize, int): raise TypeError() sentinel = object() make_key = _make_key PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 def decorating_function(user_function): cache = {} hits = [0] misses = [0] full = [False] cache_get = cache.get lock = RLock() r[0] = oldroot[NEXT] oldkey = r[0][KEY] oldresult = r[0][RESULT] r[0][KEY] = r[0][RESULT] = None del cache[oldkey] cache[key] = oldroot else: last = r[0][PREV] link = [last, r[0], key, result] last[NEXT] = r[0][PREV] = cache[key] = link full[0] = (len(cache) >= maxsize) misses[0] += 1 return result def cache_info(): with lock: return _CacheInfo(hits[0], misses[0], maxsize, len(cache)) def cache_clear(): with lock: cache.clear() root[:] = [root, root, None, None] r[0] = root hits[0] = 0 misses[0] = 0 full[0] = False wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return update_wrapper(wrapper, user_function) return decorating_function
Least-recently-used cache decorator, which is a backport of the same function in Python >= 3.2. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
def fix_flags(self, flags): FlagsError = base_plugin.FlagsError if flags.version_tb: pass elif flags.inspect: if flags.logdir and flags.event_file: raise FlagsError( ) if not (flags.logdir or flags.event_file): raise FlagsError() elif not flags.db and not flags.logdir: raise FlagsError( ) if flags.path_prefix.endswith(): flags.path_prefix = flags.path_prefix[:-1]
Fixes standard TensorBoard CLI flags to parser.
def generate(inputfilename, outputfilename=, dump=0, **flags): if not outputfilename: if inputfilename[-2:] == : outputfilename = inputfilename[:-2] + else: raise Exception("Missing output filename") print , inputfilename print , outputfilename DIVIDER = preparser, postparser = None, None s = open(inputfilename, ).read() f = find(s, DIVIDER) if f >= 0: s, postparser = s[:f], + s[f + len(DIVIDER):] p = ParserDescription(ParserDescriptionScanner(s)) if not p: return t = wrap_error_reporter(p, ) if not t: return if preparser is not None: t.preparser = preparser if postparser is not None: t.postparser = postparser for f in t.options.keys(): for opt, _, _ in yapps_options: if f == opt: break else: print , f for f in flags.keys(): t.options[f] = flags[f] if dump: t.dump_information() else: t.output = open(outputfilename, ) t.generate_output()
Generate a grammar, given an input filename (X.g) and an output filename (defaulting to X.py).
def bond_initialize_canonical_averages( canonical_statistics, **kwargs ): spanning_cluster = ( in canonical_statistics.dtype.names ) ret = np.empty_like( canonical_statistics, dtype=canonical_averages_dtype(spanning_cluster=spanning_cluster), ) ret[] = 1 if spanning_cluster: ret[] = ( canonical_statistics[] ) ret[] = 0.0 ret[] = ( canonical_statistics[] ) ret[] = 0.0 ret[] = canonical_statistics[] ret[] = 0.0 return ret
Initialize the canonical averages from a single-run cluster statistics Parameters ---------- canonical_statistics : 1-D structured ndarray Typically contains the canonical statistics for a range of values of the occupation probability ``p``. The dtype is the result of `canonical_statistics_dtype`. Returns ------- ret : structured ndarray The dype is the result of `canonical_averages_dtype`. ret['number_of_runs'] : 1-D ndarray of int Equals ``1`` (initial run). ret['percolation_probability_mean'] : 1-D array of float Equals ``canonical_statistics['percolation_probability']`` (if ``percolation_probability`` is present) ret['percolation_probability_m2'] : 1-D array of float Each entry is ``0.0`` ret['max_cluster_size_mean'] : 1-D array of float Equals ``canonical_statistics['max_cluster_size']`` ret['max_cluster_size_m2'] : 1-D array of float Each entry is ``0.0`` ret['moments_mean'] : 2-D array of float Equals ``canonical_statistics['moments']`` ret['moments_m2'] : 2-D array of float Each entry is ``0.0`` See Also -------- canonical_averages_dtype bond_canonical_statistics
def get_grid(self): mentions, lines = _split_text_n_lines(self.elems) mentions.sort(key=lambda m: (m.yc_grid, m.xc)) grid = Grid(mentions, lines, self) return grid
Standardize the layout of the table into grids
def _identify_dict(core): if not core: return {}, 1, (), int core = core.copy() key = sorted(core.keys(), key=chaospy.poly.base.sort_key)[0] shape = numpy.array(core[key]).shape dtype = numpy.array(core[key]).dtype dim = len(key) return core, dim, shape, dtype
Specification for a dictionary.
def load(self, record_key, secret_key=): title = % self.__class__.__name__ input_fields = { : record_key, : secret_key } for key, value in input_fields.items(): if value: object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) file_path = % record_key try: metadata, response = self.dropbox.files_download(file_path) except Exception as err: if str(err).find("LookupError(") > -1: raise Exception( % (title, record_key)) else: raise DropboxConnectionError(title) record_data = response.content if secret_key: from labpack.encryption import cryptolab record_data = cryptolab.decrypt(record_data, secret_key) return record_data
a method to retrieve byte data of appdata record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body
def broken_seqs(ol,break_points): bps = list(break_points) length = ol.__len__() rgs = rangize(bps,length) rslt = [] for i in range(0,rgs.__len__()): si,ei = rgs[i] sec = ol[si:ei] rslt.append(sec) return(rslt)
ol = initRange(0,20,1) ol break_points = [1,6,14,9] secs = broken_seqs(ol,break_points) forEach(secs,print)
def MakeType(name, base_classes, namespace): precondition.AssertType(name, str) if PY2: name = name.encode("ascii") return type(name, base_classes, namespace)
A compatibility wrapper for the `type` built-in function. In Python 2 `type` (used as a type constructor) requires the name argument to be a `bytes` object whereas in Python 3 it is required to be an `unicode` object. Since class name is human readable text rather than arbitrary stream of bytes, the Python 3 behaviour is considered to be the sane one. Once support for Python 2 is dropped all invocations of this call can be replaced with the `type` built-in. Args: name: A name of the type to create. base_classes: A tuple of base classes that the returned type is supposed to derive from. namespace: A dictionary of methods and fields that the returned type is supposed to contain. Returns: A new type with specified parameters.
def OS_filter(x,h,N,mode=0): P = len(h) x = np.hstack((np.zeros(P-1),x)) L = N - P + 1 Nx = len(x) Nframe = int(np.ceil(Nx/float(L))) x = np.hstack((x,np.zeros(Nframe*L-Nx))) y = np.zeros(int(Nframe*N)) y_mat = np.zeros((Nframe,int(Nframe*N))) H = fft.fft(h,N) for k in range(Nframe): xk = x[k*L:k*L+N] Xk = fft.fft(xk,N) Yk = H*Xk yk = np.real(fft.ifft(Yk)) y[k*L+P-1:k*L+N] = yk[P-1:] y_mat[k,k*L:k*L+N] = yk if mode == 1: return y[P-1:Nx], y_mat[:,P-1:Nx] else: return y[P-1:Nx]
Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x = cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1)
def feeling_lucky(cls, obj): if isinstance(obj, six.string_types): return cls.from_str(obj) elif isinstance(obj, six.integer_types) and obj <= MAX_POSIX_TIMESTAMP: return cls.from_posix_timestamp(obj) elif isinstance(obj, datetime): return cls.from_datetime(obj) else: raise ValueError( u"Don{}'".format(obj) )
Tries to convert given object to an UTC timestamp is ms, based on its type.
def make_argument_subquery(arg): return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg
Decide when a Join argument needs to be wrapped in a subquery
def highlight_cell_surroundings(self, target_y, target_x): if target_y < 1: print("target too close to top") if target_y > self.world.grd.grid_height - 1: print("target too close to bottom") if target_x < 1: print("target too close to left") if target_x < self.world.grd.grid_width: print("target too close to right") self.world.grd.set_tile(target_y - 1, target_x - 1, ) self.world.grd.set_tile(target_y - 0, target_x - 1, ) self.world.grd.set_tile(target_y + 1, target_x - 1, ) self.world.grd.set_tile(target_y - 1, target_x - 0, ) self.world.grd.set_tile(target_y + 1, target_x - 0, ) self.world.grd.set_tile(target_y - 1, target_x + 1, ) self.world.grd.set_tile(target_y - 0, target_x + 1, ) self.world.grd.set_tile(target_y + 1, target_x + 1, )
highlights the cells around a target to make it simpler to see on a grid. Currently assumes the target is within the boundary by 1 on all sides
def get_objective_hierarchy_design_session(self, proxy): if not self.supports_objective_hierarchy_design(): raise errors.Unimplemented() return sessions.ObjectiveHierarchyDesignSession(proxy=proxy, runtime=self._runtime)
Gets the session for designing objective hierarchies. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveHierarchyDesignSession) - an ``ObjectiveHierarchyDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_hierarchy_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_hierarchy_design()`` is ``true``.*
def get_error(exc): if isinstance(exc, HTTPError): return exc.response.status_code, text(exc.response.content) if isinstance(exc, Timeout): return 400, exc return 500, exc
Return the appropriate HTTP status code according to the Exception/Error.
def get_top_segmentations(table, n): stroke_count = list(range(len(table))) topf = TopFinder(n) for curr_segmentation in all_segmentations(stroke_count): curr_seg_score = score_segmentation(curr_segmentation, table) topf.push(curr_segmentation, curr_seg_score) for el, score in topf: yield [normalize_segmentation(el), score]
Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned
def get_sequence_rule_enablers_by_search(self, sequence_rule_enabler_query, sequence_rule_enabler_search): if not self._can(): raise PermissionDenied() return self._provider_session.get_sequence_rule_enablers_by_search(sequence_rule_enabler_query, sequence_rule_enabler_search)
Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search
def cookies(self): return (self.get_query() .select(PageView.ip, PageView.headers[]) .where(PageView.headers[].is_null(False)) .tuples())
Retrieve the cookies header from all the users who visited.
def m2i(self, pkt, m): cls = Raw if pkt.type == 22: if len(m) >= 1: msgtype = orb(m[0]) cls = _tls_handshake_cls.get(msgtype, Raw) elif pkt.type == 20: cls = TLSChangeCipherSpec elif pkt.type == 21: cls = TLSAlert elif pkt.type == 23: cls = TLSApplicationData if cls is Raw: return Raw(m) else: try: return cls(m, tls_session=pkt.tls_session) except Exception: if conf.debug_dissector: raise return Raw(m)
Try to parse one of the TLS subprotocols (ccs, alert, handshake or application_data). This is used inside a loop managed by .getfield().
def as_dictlist(self): data = [] for row_i, row in enumerate(self.row_index): for col_i, col in enumerate(self.col_index): value = self.values_by_row[row_i][col_i] data.append({ "row": row, "col": col, "value": value, }) return data
Returns a dictlist with values [ { "row": "row_a", "col": "col_a", "value": 1, } ]
def sapm_effective_irradiance(self, poa_direct, poa_diffuse, airmass_absolute, aoi, reference_irradiance=1000): return sapm_effective_irradiance( poa_direct, poa_diffuse, airmass_absolute, aoi, self.module_parameters, reference_irradiance=reference_irradiance)
Use the :py:func:`sapm_effective_irradiance` function, the input parameters, and ``self.module_parameters`` to calculate effective irradiance. Parameters ---------- poa_direct : numeric The direct irradiance incident upon the module. poa_diffuse : numeric The diffuse irradiance incident on module. airmass_absolute : numeric Absolute airmass. aoi : numeric Angle of incidence in degrees. reference_irradiance : numeric, default 1000 Reference irradiance by which to divide the input irradiance. Returns ------- effective_irradiance : numeric The SAPM effective irradiance.
def print_portfolio_info(returns, avg_rets, weights): ret = (weights * avg_rets).sum() std = (weights * returns).sum(1).std() sharpe = ret / std print("Optimal weights:\n{}\n".format(weights)) print("Expected return: {}".format(ret)) print("Expected variance: {}".format(std**2)) print("Expected Sharpe: {}".format(sharpe))
Print information on expected portfolio performance.
def fcm_send_message( registration_id, title=None, body=None, icon=None, data=None, sound=None, badge=None, low_priority=False, condition=None, time_to_live=None, click_action=None, collapse_key=None, delay_while_idle=False, restricted_package_name=None, dry_run=False, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, extra_kwargs={}, api_key=None, json_encoder=None, **kwargs): if api_key is None: api_key = SETTINGS.get("FCM_SERVER_KEY") push_service = FCMNotification(api_key=api_key, json_encoder=json_encoder) result = push_service.notify_single_device( registration_id=registration_id, message_title=title, message_body=body, message_icon=icon, data_message=data, sound=sound, badge=badge, collapse_key=collapse_key, low_priority=low_priority, condition=condition, time_to_live=time_to_live, click_action=click_action, delay_while_idle=delay_while_idle, restricted_package_name=restricted_package_name, dry_run=dry_run, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, content_available=content_available, extra_kwargs=extra_kwargs, **kwargs ) return result
Copied from https://github.com/olucurious/PyFCM/blob/master/pyfcm/fcm.py: Send push notification to a single device Args: registration_id (str): FCM device registration IDs. body (str): Message string to display in the notification tray data (dict): Data message payload to send alone or with the notification message sound (str): The sound file name to play. Specify "Default" for device default sound. Keyword Args: collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to ``None``. delay_while_idle (bool, optional): If ``True`` indicates that the message should not be sent until the device becomes active. time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to ``None`` which uses the FCM default of 4 weeks. low_priority (boolean, optional): Whether to send notification with the low priority flag. Defaults to ``False``. restricted_package_name (str, optional): Package name of the application where the registration IDs must match in order to receive the message. Defaults to ``None``. dry_run (bool, optional): If ``True`` no message will be sent but request will be tested. Returns: :tuple:`multicast_id(long), success(int), failure(int), canonical_ids(int), results(list)`: Response from FCM server. Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it.
def parameter(self, parameter_id): for parametergroup, parameters in self.parameters: for parameter in parameters: if parameter.id == parameter_id: return parameter raise KeyError("No such parameter exists: " + parameter_id )
Return the specified global parameter (the entire object, not just the value)
def close_connection(self): if self.port: self.stop() self.logger.debug("Close port " % self.comport, extra={: }) self.port.close() self.port = False
Closes serial port connection. :return: Nothing
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs): nf = len(self.f_cb(*self.pre_process(xres[0], params))) xerr = np.empty((xres.shape[0], nf)) new_params = np.array(params) for idx, row in enumerate(xres): new_params[varied_idx] = varied_data[idx] xerr[idx, :] = self.f_cb(*self.pre_process(row, params)) return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
Analogous to :meth:`plot_series` but will plot residuals.
def signal_optimiser(d, analytes, min_points=5, threshold_mode=, threshold_mult=1., x_bias=0, weights=None, ind=None, mode=): errmsg = if isinstance(analytes, str): analytes = [analytes] if ind is None: ind = np.full(len(d.Time), True) if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points): errmsg = .format(min_points) return Bunch({: np.nan, : np.nan, : np.nan, : np.nan, : np.nan, : ind, : threshold_mode, : min_points, : analytes, : np.nan, : np.nan, : weights, : False, : errmsg}), errmsg msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias) if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat): errmsg = .format(min_points) return Bunch({: np.nan, : np.nan, : np.nan, : np.nan, : np.nan, : ind, : threshold_mode, : min_points, : analytes, : np.nan, : np.nan, : weights, : False, : errmsg}), errmsg valid = [, , , , ] n_under = 0 i = np.argwhere(np.array(valid) == threshold_mode)[0, 0] o_threshold_mode = threshold_mode while (n_under <= 0) & (i < len(valid)): if threshold_mode == : std_threshold = np.nanmedian(msstds) mean_threshold = np.nanmedian(msmeans) elif threshold_mode == : std_threshold = np.nanmean(msstds) mean_threshold = np.nanmean(msmeans) elif threshold_mode == : mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat) xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100) mdf = mkd.pdf(xm) mean_threshold = xm[np.argmax(mdf)] rkd = gaussian_kde(msstds[~np.isnan(msstds)]) xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100) rdf = rkd.pdf(xr) std_threshold = xr[np.argmax(rdf)] elif threshold_mode == : mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat) xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100) mdf = mkd.pdf(xm) inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] & np.r_[mdf[:-1] > mdf[1:], False] & (mdf > 0.25 * mdf.max())) mean_threshold = xm[np.min(inds)] rkd = gaussian_kde(msstds[~np.isnan(msstds)]) xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100) rdf = rkd.pdf(xr) inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] & np.r_[rdf[:-1] > rdf[1:], False] & (rdf > 0.25 * rdf.max())) std_threshold = xr[np.min(inds)] elif threshold_mode == : bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)]) std_threshold = bm.statistic bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)]) mean_threshold = bm.statistic elif callable(threshold_mode): std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten()) mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten()) else: try: mean_threshold, std_threshold = threshold_mode except: raise ValueError( + .join(valid) + ) if isinstance(threshold_mult, (int, float)): std_threshold *= threshold_mult mean_threshold *= threshold_mult elif len(threshold_mult) == 2: mean_threshold *= threshold_mult[0] std_threshold *= threshold_mult[1] else: raise ValueError() rind = (msstds < std_threshold) if mode == : mind = (msmeans < mean_threshold) else: mind = (msmeans > mean_threshold) ind = rind & mind n_under = ind.sum() if n_under == 0: i += 1 if i <= len(valid) - 1: threshold_mode = valid[i] else: errmsg = return Bunch({: np.nan, : np.nan, : np.nan, : np.nan, : np.nan, : ind, : threshold_mode, : min_points, : analytes, : np.nan, : np.nan, : weights, : False, : errmsg}), errmsg if i > 0: errmsg = "optimisation failed using threshold_mode=, falling back to ".format(o_threshold_mode, threshold_mode) passing = np.argwhere(ind) opt_n_points = passing[:, 0].max() opt_centre = passing[passing[:, 0] == opt_n_points, 1].min() opt_n_points += min_points if opt_n_points % 2 == 0: lims = (opt_centre - opt_n_points // 2, opt_centre + opt_n_points // 2) else: lims = (opt_centre - opt_n_points // 2, opt_centre + opt_n_points // 2 + 1) filt = np.zeros(d.Time.shape, dtype=bool) filt[lims[0]:lims[1]] = True return Bunch({: msmeans, : msstds, : mean_threshold, : std_threshold, : lims, : filt, : threshold_mode, : min_points, : analytes, : opt_centre, : opt_n_points, : weights, : True, : errmsg}), errmsg
Optimise data selection based on specified analytes. Identifies the longest possible contiguous data region in the signal where the relative standard deviation (std) and concentration of all analytes is minimised. Optimisation is performed via a grid search of all possible contiguous data regions. For each region, the mean std and mean scaled analyte concentration ('amplitude') are calculated. The size and position of the optimal data region are identified using threshold std and amplitude values. Thresholds are derived from all calculated stds and amplitudes using the method specified by `threshold_mode`. For example, using the 'kde_max' method, a probability density function (PDF) is calculated for std and amplitude values, and the threshold is set as the maximum of the PDF. These thresholds are then used to identify the size and position of the longest contiguous region where the std is below the threshold, and the amplitude is either below the threshold. All possible regions of the data that have at least `min_points` are considered. For a graphical demonstration of the action of signal_optimiser, use `optimisation_plot`. Parameters ---------- d : latools.D object An latools data object. analytes : str or array-like Which analytes to consider. min_points : int The minimum number of contiguous points to consider. threshold_mode : str The method used to calculate the optimisation thresholds. Can be 'mean', 'median', 'kde_max' or 'bayes_mvs', or a custom function. If a function, must take a 1D array, and return a single, real number. threshood_mult : float or tuple A multiplier applied to the calculated threshold before use. If a tuple, the first value is applied to the mean threshold, and the second is applied to the standard deviation threshold. Reduce this to make data selection more stringent. x_bias : float If non-zero, a bias is applied to the calculated statistics to prefer the beginning (if > 0) or end (if < 0) of the signal. Should be between zero and 1. weights : array-like of length len(analytes) An array of numbers specifying the importance of each analyte considered. Larger number makes the analyte have a greater effect on the optimisation. Default is None. ind : boolean array A boolean array the same length as the data. Where false, data will not be included. mode : str Whether to 'minimise' or 'maximise' the concentration of the elements. Returns ------- dict, str : optimisation result, error message
def pad_batch(features, batch_multiple): feature = list(features.items())[0][1] batch_size = tf.shape(feature)[0] mod = batch_size % batch_multiple has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32) batch_padding = batch_multiple * has_mod - mod padded_features = {} for k, feature in features.items(): rank = len(feature.shape) paddings = [[0, 0] for _ in range(rank)] paddings[0][1] = batch_padding padded_feature = tf.pad(feature, paddings) padded_features[k] = padded_feature return padded_features
Pad batch dim of features to nearest multiple of batch_multiple.
def deep_reload_hook(m): if not isinstance(m, ModuleType): raise TypeError("reload() argument must be module") name = m.__name__ if name not in sys.modules: raise ImportError("reload(): module %.200s not in sys.modules" % name) global modules_reloading try: return modules_reloading[name] except: modules_reloading[name] = m dot = name.rfind() if dot < 0: subname = name path = None else: try: parent = sys.modules[name[:dot]] except KeyError: modules_reloading.clear() raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) subname = name[dot+1:] path = getattr(parent, "__path__", None) try: with replace_import_hook(original_import): fp, filename, stuff = imp.find_module(subname, path) finally: modules_reloading.clear() try: newm = imp.load_module(name, fp, filename, stuff) except: sys.modules[name] = m raise finally: if fp: fp.close() modules_reloading.clear() return newm
Replacement for reload().
def check_password(raw_password, enc_password): algo, salt, hsh = enc_password.split() return enc_password == encrypt_password(raw_password, algorithm=algo, salt=salt)
Returns a boolean of whether the raw_password was correct. Handles encryption formats behind the scenes.
def readinto(self, data): try: data = self._ffi.from_buffer(data) except TypeError: pass size, rest = divmod(self._ffi.sizeof(data), self.elementsize) if rest: raise ValueError() return self._lib.PaUtil_ReadRingBuffer(self._ptr, data, size)
Read data from the ring buffer into a user-provided buffer. This advances the read index after reading; calling :meth:`advance_read_index` is *not* necessary. :param data: The memory where the data should be stored. :type data: CData pointer or buffer :returns: The number of elements read, which may be less than the size of *data*. :rtype: int
def _parse_uri(uri_as_string): if os.name == : return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string) elif parsed_uri.scheme in smart_open_ssh.SCHEMES: return _parse_uri_ssh(parsed_uri) else: raise NotImplementedError( "unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string) )
Parse the given URI from a string. Supported URI schemes are: * file * hdfs * http * https * s3 * s3a * s3n * s3u * webhdfs .s3, s3a and s3n are treated the same way. s3u is s3 but without SSL. Valid URI examples:: * s3://my_bucket/my_key * s3://my_key:my_secret@my_bucket/my_key * s3://my_key:my_secret@my_server:my_port@my_bucket/my_key * hdfs:///path/file * hdfs://path/file * webhdfs://host:port/path/file * ./local/path/file * ~/local/path/file * local/path/file * ./local/path/file.gz * file:///home/user/file * file:///home/user/file.bz2 * [ssh|scp|sftp]://username@host//path/file * [ssh|scp|sftp]://username@host/path/file
def open(self): self.h_info = SetupDiGetClassDevs(byref(self.guid), None, None, (DIGCF.PRESENT | DIGCF.DEVICEINTERFACE) ) return self.h_info
Calls SetupDiGetClassDevs to obtain a handle to an opaque device information set that describes the device interfaces supported by all the USB collections currently installed in the system. The application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE in the Flags parameter passed to SetupDiGetClassDevs.
def start(self, origin): self.start_time = time.time() self.pause_until = None self.data.update(self._get_struct(origin, )) self.data_stack.append(self.data) sys.settrace(self._trace) return self._trace
Start this Tracer. Return a Python function suitable for use with sys.settrace().
def insert_seperator_results(results): sepbench = BenchmarkResult(*[ * w for w in COLUMN_WIDTHS]) last_bm = None for r in results: if last_bm is None: last_bm = r.benchmark elif last_bm != r.benchmark: yield sepbench last_bm = r.benchmark yield r
Given a sequence of BenchmarkResults, return a new sequence where a "seperator" BenchmarkResult has been placed between differing benchmarks to provide a visual difference.
def getCoeff(self, name, light=None, date=None): d = self.coeffs[name] try: c = d[light] except KeyError: try: k, i = next(iter(d.items())) if light is not None: print( % (light, k)) except StopIteration: return None c = i except TypeError: c = d return _getFromDate(c, date)
try to get calibration for right light source, but use another if they is none existent
def read(self): buffer = BytesIO() for chunk in self.buffer_iter(): log.debug(, chunk) buffer.write(chunk) buffer.seek(0) return buffer.read()
Read buffer out as a single stream. .. warning:: Avoid using this function! **Why?** This is a *convenience* function; it doesn't encourage good memory management. All memory required for a mesh is duplicated, and returned as a single :class:`str`. So at best, using this function will double the memory required for a single model. **Instead:** Wherever possible, please use :meth:`buffer_iter`.
def _update_pwm(self): if self._is_on: values = self._get_pwm_values() else: values = [0] * len(self._driver.pins) self._driver.set_pwm(values)
Update the pwm values of the driver regarding the current state.
def eotvos(target, k, temperature=, critical_temperature=, molar_density=): r Tc = target[critical_temperature] T = target[temperature] Vm = 1/target[molar_density] value = k*(Tc-T)/(Vm**(2/3)) return value
r""" Missing description Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. k : float Constant parameter specific to fluid temperature : string The dictionary key containing the temperature values (K) critical_temperature : string The dictionary key containing the critical temperature values (K) molar_density : string The dictionary key containing the molar density values (K) TODO: Needs description, and improve definition of k
def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False): names = [] arrays = [] for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual): names.append(name) arrays.append(arrow_array_from_numpy_array(array)) return pyarrow.Table.from_arrays(arrays, names)
Implementation of Dataset.to_arrow_table
def image_predict_proba(self, X): self._check_image(X) probabilities = self.pixel_classifier.image_predict_proba(X) patches, _ = self._to_patches(probabilities) row_steps = self._image_size[0] // self.patch_size[0] col_steps = self._image_size[1] // self.patch_size[1] ps = self.patch_size[0] * self.patch_size[1] for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)): patches[k, i, j, 0] = np.sum(patches[k, i, j, 0]) / ps patches[k, i, j, 1] = np.sum(patches[k, i, j, 1]) / ps return probabilities
Predicts class probabilities for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes] Target probabilities