code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def message(self, message, item, extra): if item is not None: message += ": " + ascii(item) if extra is not None: message += " (" + str(extra) + ")" return message
Uses arguments to create the message.
def _construct_punctuation_token(self, d: Dict, nlp) -> List[Dict]: result = [] if not d["token"]: this_token = {attrs.IS_PUNCT: True} elif len(d["token"]) == 1: this_token = {attrs.ORTH: d["token"][0]} else: global FLAG_ID punct_set = set(d["token"]) def is_selected_punct(x): return x in punct_set FLAG_DICT[FLAG_ID] = nlp.vocab.add_flag(is_selected_punct) this_token = {FLAG_DICT[FLAG_ID]: True} FLAG_ID += 1 result.append(this_token) result = self._add_common_constrain(result, d) return result
Construct a shape token Args: d: Dict nlp Returns: List[Dict]
def genCompleteTypes( compoundSig ): i = 0 start = 0 end = len(compoundSig) def find_end( idx, b, e ): depth = 1 while idx < end: subc = compoundSig[idx] if subc == b: depth += 1 elif subc == e: depth -= 1 if depth == 0: return idx idx += 1 while i < end: c = compoundSig[i] if c == : x = find_end(i+1, , ) yield compoundSig[i:x+1] i = x elif c == : x = find_end(i+1, , ) yield compoundSig[i:x+1] i = x elif c == : start = i g = genCompleteTypes( compoundSig[i+1:] ) ct = six.next(g) i += len(ct) yield + ct else: yield c i += 1
Generator function used to iterate over each complete, top-level type contained in in a signature. Ex:: "iii" => [ 'i', 'i', 'i' ] "i(ii)i" => [ 'i', '(ii)', 'i' ] "i(i(ii))i" => [ 'i', '(i(ii))', 'i' ]
def from_json(f: TextIO) -> : o = json.load(f) return PrecalculatedTextMeasurer(o[], o[], o[])
Return a PrecalculatedTextMeasurer given a JSON stream. See precalculate_text.py for details on the required format.
def edit_message_media( self, chat_id: Union[int, str], message_id: int, media: InputMedia, reply_markup: "pyrogram.InlineKeyboardMarkup" = None ) -> "pyrogram.Message": style = self.html if media.parse_mode.lower() == "html" else self.markdown caption = media.caption if isinstance(media, InputMediaPhoto): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedPhoto( file=self.save_file(media.media) ) ) ) media = types.InputMediaPhoto( id=types.InputPhoto( id=media.photo.id, access_hash=media.photo.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaPhotoExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 2: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaPhoto( id=types.InputPhoto( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaVideo): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "video/mp4", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeVideo( supports_streaming=media.supports_streaming or None, duration=media.duration, w=media.width, h=media.height ), types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ) ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 4: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaAudio): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "audio/mpeg", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeAudio( duration=media.duration, performer=media.performer, title=media.title ), types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ) ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 9: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaAnimation): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "video/mp4", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeVideo( supports_streaming=True, duration=media.duration, w=media.width, h=media.height ), types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ), types.DocumentAttributeAnimated() ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 10: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaDocument): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "application/zip", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ) ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] not in (5, 10): media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) r = self.send( functions.messages.EditMessage( peer=self.resolve_peer(chat_id), id=message_id, reply_markup=reply_markup.write() if reply_markup else None, media=media, **style.parse(caption) ) ) for i in r.updates: if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)): return pyrogram.Message._parse( self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats} )
Use this method to edit audio, document, photo, or video messages. If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise, message type can be changed arbitrarily. When inline message is edited, new file can't be uploaded. Use previously uploaded file via its file_id or specify a URL. On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_id (``int``): Message identifier in the chat specified in chat_id. media (:obj:`InputMedia`) One of the InputMedia objects describing an animation, audio, document, photo or video. reply_markup (:obj:`InlineKeyboardMarkup`, *optional*): An InlineKeyboardMarkup object. Returns: On success, the edited :obj:`Message <pyrogram.Message>` is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def on(self, state): self._on = state cmd = self.command_set.off() if state: cmd = self.command_set.on() self.send(cmd)
Turn on or off. :param state: True (on) or False (off).
def replace_pyof_version(module_fullname, version): module_version = MetaStruct.get_pyof_version(module_fullname) if not module_version or module_version == version: return None return module_fullname.replace(module_version, version)
Replace the OF Version of a module fullname. Get's a module name (eg. 'pyof.v0x01.common.header') and returns it on a new 'version' (eg. 'pyof.v0x02.common.header'). Args: module_fullname (str): The fullname of the module (e.g.: pyof.v0x01.common.header) version (str): The version to be 'inserted' on the module fullname. Returns: str: module fullname The new module fullname, with the replaced version, on the format "pyof.v0x01.common.header". If the requested version is the same as the one of the module_fullname or if the module_fullname is not a 'OF version' specific module, returns None.
def list_features(self, **kwargs): params = { : util.language_code(kwargs.get()), : True } result = self.make_request(, {}, **params) if not util.check_result(result): return False, result.get(, ) values = util.response_list(result, ) return True, [emtype.ParkingFeature(**a) for a in values]
Obtain a list of parkings. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Parking]), or message string in case of error.
def clean_data(self, data, rename_col=None, drop_col=None, resample=True, freq=, resampler=, interpolate=True, limit=1, method=, remove_na=True, remove_na_how=, remove_outliers=True, sd_val=3, remove_out_of_bounds=True, low_bound=0, high_bound=float(), save_file=True): if not isinstance(data, pd.DataFrame): raise TypeError() clean_data_obj = Clean_Data(data) clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler, interpolate=interpolate, limit=limit, method=method, remove_na=remove_na, remove_na_how=remove_na_how, remove_outliers=remove_outliers, sd_val=sd_val, remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound) if rename_col: clean_data_obj.rename_columns(rename_col) if drop_col: clean_data_obj.drop_columns(drop_col) self.cleaned_data = clean_data_obj.cleaned_data self.result[] = { : rename_col, : drop_col, : resample, : freq, : resampler, : interpolate, : limit, : method, : remove_na, : remove_na_how, : remove_outliers, : sd_val, : remove_out_of_bounds, : low_bound, : str(high_bound) if high_bound == float() else high_bound, : save_file } if save_file: f = self.results_folder_name + + str(self.get_global_count()) + self.cleaned_data.to_csv(f) self.result[][] = f else: self.result[][] = return self.cleaned_data
Cleans dataframe according to user specifications and stores result in self.cleaned_data. Parameters ---------- data : pd.DataFrame() Dataframe to be cleaned. rename_col : list(str) List of new column names. drop_col : list(str) Columns to be dropped. resample : bool Indicates whether to resample data or not. freq : str Resampling frequency i.e. d, h, 15T... resampler : str Resampling type i.e. mean, max. interpolate : bool Indicates whether to interpolate data or not. limit : int Interpolation limit. method : str Interpolation method. remove_na : bool Indicates whether to remove NAs or not. remove_na_how : str Specificies how to remove NA i.e. all, any... remove_outliers : bool Indicates whether to remove outliers or not. sd_val : int Standard Deviation Value (specifices how many SDs away is a point considered an outlier) remove_out_of_bounds : bool Indicates whether to remove out of bounds datapoints or not. low_bound : int Low bound of the data. high_bound : int High bound of the data. save_file : bool Specifies whether to save file or not. Defaults to True. Returns ------- pd.DataFrame() Dataframe containing cleaned data.
def shutdown(self, exitcode=0, exitmsg=None): self.action_log_info() if hasattr(self, ) and hasattr(self.minion, ): self.minion.destroy() super(Minion, self).shutdown( exitcode, (.format( self.__class__.__name__, (exitmsg or )).strip()))
If sub-classed, run any shutdown operations on this method. :param exitcode :param exitmsg
def run(self): update_mininum_mongodb_version(None) self.main_conn = self.create_authed_client() LOG.always( "Source MongoDB version: %s", self.main_conn.admin.command("buildInfo")["version"], ) for dm in self.doc_managers: name = dm.__class__.__module__ module = sys.modules[name] version = "unknown" if hasattr(module, "__version__"): version = module.__version__ elif hasattr(module, "version"): version = module.version LOG.always("Target DocManager: %s version: %s", name, version) self.read_oplog_progress() conn_type = None try: self.main_conn.admin.command("isdbgrid") except pymongo.errors.OperationFailure: conn_type = "REPLSET" if conn_type == "REPLSET": is_master = self.main_conn.admin.command("isMaster") if "setName" not in is_master: LOG.error( "to run mongo-connector. Shutting down..." % self.address ) return self.main_conn.close() self.main_conn = self.create_authed_client(replicaSet=is_master["setName"]) self.update_version_from_client(self.main_conn) oplog = OplogThread( self.main_conn, self.doc_managers, self.oplog_progress, self.namespace_config, **self.kwargs ) self.shard_set[0] = oplog LOG.info("MongoConnector: Starting connection thread %s" % self.main_conn) oplog.start() while self.can_run: shard_thread = self.shard_set[0] if not (shard_thread.running and shard_thread.is_alive()): LOG.error( "MongoConnector: OplogThread" " %s unexpectedly stopped! Shutting down" % (str(self.shard_set[0])) ) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return self.write_oplog_progress() time.sleep(1) else: while self.can_run: for shard_doc in retry_until_ok( lambda: list(self.main_conn.config.shards.find()) ): shard_id = shard_doc["_id"] if shard_id in self.shard_set: shard_thread = self.shard_set[shard_id] if not (shard_thread.running and shard_thread.is_alive()): LOG.error( "MongoConnector: OplogThread " "%s unexpectedly stopped! Shutting " "down" % (str(self.shard_set[shard_id])) ) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return self.write_oplog_progress() time.sleep(1) continue try: repl_set, hosts = shard_doc["host"].split("/") except ValueError: cause = "The system only uses replica sets!" LOG.exception("MongoConnector: %s", cause) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return shard_conn = self.create_authed_client(hosts, replicaSet=repl_set) self.update_version_from_client(shard_conn) oplog = OplogThread( shard_conn, self.doc_managers, self.oplog_progress, self.namespace_config, mongos_client=self.main_conn, **self.kwargs ) self.shard_set[shard_id] = oplog msg = "Starting connection thread" LOG.info("MongoConnector: %s %s" % (msg, shard_conn)) oplog.start() if self.signal is not None: LOG.info("recieved signal %s: shutting down...", self.signal) self.oplog_thread_join() self.write_oplog_progress()
Discovers the mongo cluster and creates a thread for each primary.
def list_semod(): * helptext = __salt__[]().splitlines() semodule_version = for line in helptext: if line.strip().startswith(): semodule_version = if semodule_version == : mdata = __salt__[]().splitlines() ret = {} for line in mdata: if not line.strip(): continue comps = line.split() if len(comps) == 4: ret[comps[1]] = {: False, : None} else: ret[comps[1]] = {: True, : None} else: mdata = __salt__[]().splitlines() ret = {} for line in mdata: if not line.strip(): continue comps = line.split() if len(comps) == 3: ret[comps[0]] = {: False, : comps[1]} else: ret[comps[0]] = {: True, : comps[1]} return ret
Return a structure listing all of the selinux modules on the system and what state they are in CLI Example: .. code-block:: bash salt '*' selinux.list_semod .. versionadded:: 2016.3.0
def distance_to_point(self, p): if self.start <= p <= self.end: return 0 else: return min(abs(self.start - p), abs(self.end - p))
Returns the distance from the point to the interval. Zero if the point lies inside the interval.
def y_ticks(self, *ticks): if ticks: for tick in ticks: if not is_numeric(tick): raise TypeError(" is not a numeric tick" % str(tick)) self._y_ticks = tuple(sorted(ticks)) else: if self._y_ticks: return self._y_ticks else: return determine_ticks(self.y_lower_limit(), self.y_upper_limit())
The points on the y-axis for which there are markers and grid lines. There are default ticks, but you can pass values to this method to override the defaults. Otherwise the method will return the ticks. :param \*ticks: if given, these will be chart's x-ticks. :rtype: ``tuple``
def _create_kube_dns_instance(self, instance): kube_dns_instance = deepcopy(instance) kube_dns_instance[] = instance.get(, None) kube_dns_instance.update( { : , : [ { : , : , : , : , } ], : instance.get(, False), : instance.get(, False), } ) return kube_dns_instance
Set up kube_dns instance so it can be used in OpenMetricsBaseCheck
def import_class(klass): mod = __import__(klass.rpartition()[0]) for segment in klass.split()[1:-1]: mod = getattr(mod, segment) return getattr(mod, klass.rpartition()[2])
Import the named class and return that class
def get_input_message(message): try: if isinstance(message, int): return types.InputMessageID(message) elif message.SUBCLASS_OF_ID == 0x54b6bcc5: return message elif message.SUBCLASS_OF_ID == 0x790009e3: return types.InputMessageID(message.id) except AttributeError: pass _raise_cast_fail(message, )
Similar to :meth:`get_input_peer`, but for input messages.
def output_files(self): for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep][] for dep_file in dep_rule.output_files: yield self.translate_path(dep_file, dep_rule).lstrip()
Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot.
def sg_mean(tensor, opt): r return tf.reduce_mean(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
r"""Computes the mean of elements across axis of a tensor. See `tf.reduce_mean()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
def clear_history(pymux, variables): " Clear scrollback buffer. " pane = pymux.arrangement.get_active_pane() if pane.display_scroll_buffer: raise CommandException() else: pane.process.screen.clear_history()
Clear scrollback buffer.
def prefetch_users(persistent_course_grades): users = User.objects.filter( id__in=[grade.user_id for grade in persistent_course_grades] ) return { user.id: user for user in users }
Prefetch Users from the list of user_ids present in the persistent_course_grades. Arguments: persistent_course_grades (list): A list of PersistentCourseGrade. Returns: (dict): A dictionary containing user_id to user mapping.
def simxAuxiliaryConsoleOpen(clientID, title, maxLines, mode, position, size, textColor, backgroundColor, operationMode): consoleHandle = ct.c_int() if (sys.version_info[0] == 3) and (type(title) is str): title=title.encode() if position != None: c_position = (ct.c_int*2)(*position) else: c_position = None if size != None: c_size = (ct.c_int*2)(*size) else: c_size = None if textColor != None: c_textColor = (ct.c_float*3)(*textColor) else: c_textColor = None if backgroundColor != None: c_backgroundColor = (ct.c_float*3)(*backgroundColor) else: c_backgroundColor = None return c_AuxiliaryConsoleOpen(clientID, title, maxLines, mode, c_position, c_size, c_textColor, c_backgroundColor, ct.byref(consoleHandle), operationMode), consoleHandle.value
Please have a look at the function description/documentation in the V-REP user manual
def from_networkx_graph(cls, G, vartype=None, node_attribute_name=, edge_attribute_name=): if vartype is None: if not hasattr(G, ): msg = ("either argument must be provided or " "the given graph should have a vartype attribute.") raise ValueError(msg) vartype = G.vartype linear = G.nodes(data=node_attribute_name, default=0) quadratic = G.edges(data=edge_attribute_name, default=0) offset = getattr(G, , 0) return cls(linear, quadratic, offset, vartype)
Create a binary quadratic model from a NetworkX graph. Args: G (:obj:`networkx.Graph`): A NetworkX graph with biases stored as node/edge attributes. vartype (:class:`.Vartype`/str/set, optional): Variable type for the binary quadratic model. Accepted input values: * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}`` * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}`` If not provided, the `G` should have a vartype attribute. If `vartype` is provided and `G.vartype` exists then the argument overrides the property. node_attribute_name (hashable, optional, default='bias'): Attribute name for linear biases. If the node does not have a matching attribute then the bias defaults to 0. edge_attribute_name (hashable, optional, default='bias'): Attribute name for quadratic biases. If the edge does not have a matching attribute then the bias defaults to 0. Returns: :obj:`.BinaryQuadraticModel` Examples: >>> import networkx as nx ... >>> G = nx.Graph() >>> G.add_node('a', bias=.5) >>> G.add_edge('a', 'b', bias=-1) >>> bqm = dimod.BinaryQuadraticModel.from_networkx_graph(G, 'SPIN') >>> bqm.adj['a']['b'] -1
def insert_id(self, sname, skind, skinds, stype): index = self.lookup_symbol(sname, skinds) if index == None: index = self.insert_symbol(sname, skind, stype) return index else: raise SemanticException("Redefinition of " % sname)
Inserts a new identifier at the end of the symbol table, if possible. Returns symbol index, or raises an exception if the symbol alredy exists sname - symbol name skind - symbol kind skinds - symbol kinds to check for stype - symbol type
def trim(args): from jcvi.algorithms.maxsum import max_sum p = OptionParser(trim.__doc__) p.add_option("-c", dest="min_length", type="int", default=64, help="minimum sequence length after trimming") p.add_option("-s", dest="score", default=QUAL, help="quality trimming cutoff [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, newfastafile = args qualfile = get_qual(fastafile) newqualfile = get_qual(newfastafile, check=False) logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \ (fastafile, newfastafile)) fw = must_open(newfastafile, "w") fw_qual = open(newqualfile, "w") dropped = trimmed = 0 for rec in iter_fasta_qual(fastafile, qualfile, modify=True): qv = [x - opts.score for x in \ rec.letter_annotations["phred_quality"]] msum, trim_start, trim_end = max_sum(qv) score = trim_end - trim_start + 1 if score < opts.min_length: dropped += 1 continue if score < len(rec): trimmed += 1 rec = rec[trim_start:trim_end + 1] write_fasta_qual(rec, fw, fw_qual) print("A total of %d sequences modified." % trimmed, file=sys.stderr) print("A total of %d sequences dropped (length < %d)." % \ (dropped, opts.min_length), file=sys.stderr) fw.close() fw_qual.close()
%prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum
def _try_passwordless_paramiko(server, keyfile): if paramiko is None: msg = "Paramiko unavaliable, " if sys.platform == : msg += "Paramiko is required for ssh tunneled connections on Windows." else: msg += "use OpenSSH." raise ImportError(msg) username, server, port = _split_server(server) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True) except paramiko.AuthenticationException: return False else: client.close() return True
Try passwordless login with paramiko.
def set_context(self, new_context): self._context = new_context if hasattr(self, ): self._set_logging_context(self._context)
Assigns the new context to the member variable ``_context``.
def get_stp_mst_detail_output_last_instance_instance_id(self, **kwargs): config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") last_instance = ET.SubElement(output, "last-instance") instance_id = ET.SubElement(last_instance, "instance-id") instance_id.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def EndEdit(self, row, col, grid, val=None): "Complete the editing of the current cell. Returns True if changed" changed = False val = self._tc.GetStringSelection() print "val", val, row, col, self.startValue if val != self.startValue: changed = True grid.GetTable().SetValue(row, col, val) self.startValue = self._tc.SetStringSelection() return changed
Complete the editing of the current cell. Returns True if changed
def get_boards(self): if in self.config: for boardid in self.config.get(, to_type=aslist): yield self.api_request( "/1/boards/{id}".format(id=boardid), fields=) else: boards = self.api_request("/1/members/me/boards", fields=) for board in boards: yield board
Get the list of boards to pull cards from. If the user gave a value to trello.include_boards use that, otherwise ask the Trello API for the user's boards.
def manage_host_check_result_brok(self, b): host_name = b.data.get(, None) if not host_name: return logger.debug("host check result: %s", host_name) if host_name not in self.hosts_cache and not self.ignore_unknown: logger.warning("received host check result for an unknown host: %s", host_name) return metrics = self.get_metrics_from_perfdata(, b.data[]) if not metrics: logger.debug("no metrics to send ...") return if self.ignore_latency_limit >= b.data[] > 0: check_time = int(b.data[]) - int(b.data[]) else: check_time = int(b.data[]) hname = sanitize_name(host_name) if host_name in self.hosts_cache: if self.hosts_cache[host_name].get(, None): hname = ".".join((self.hosts_cache[host_name].get(), hname)) if self.hosts_cache[host_name].get(, None): hname = ".".join((self.hosts_cache[host_name].get(), hname)) if self.graphite_data_source: path = .join((hname, self.graphite_data_source)) if self.hostcheck: path = .join((hname, self.graphite_data_source, self.hostcheck)) else: path = .join((hname, self.hostcheck)) if self.realms_prefix and self.hosts_cache[host_name].get(, None): path = .join((self.hosts_cache[host_name].get(), path)) realm_name = None if host_name in self.hosts_cache: realm_name = self.hosts_cache[host_name].get(, None) self.send_to_tsdb(realm_name, host_name, self.hostcheck, metrics, check_time, path)
An host check result brok has just arrived...
def xml_to_json(root): j = {} if len(root) == 0: return _maybe_intify(root.text) if len(root) == 1 and root[0].tag.startswith( + NS_GML): return gml_to_geojson(root[0]) if root.tag == : j[] = {: root.get()} for elem in root: name = elem.tag if name == and elem.get(): name = elem.get() + if name == : name = if root.tag == : j[][name] = elem.get() continue elif name.startswith( + NS_PROTECTED): name = + name[name.index() + 1:] elif name[0] == : name = + name[name.index() + 1:] if name in j: continue elif elem.tag == and not elem.text: j[name] = elem.get() elif len(elem): if name == : j[name] = [xml_link_to_json(child, to_dict=False) for child in elem] elif name in (, ): j[name] = [xml_link_to_json(child, to_dict=True) for child in elem] elif all((name == pluralize(child.tag) for child in elem)): j[name] = [xml_to_json(child) for child in elem] else: j[name] = xml_to_json(elem) else: if root.tag == and name.endswith() and not elem.text: j[name] = [] else: j[name] = _maybe_intify(elem.text) return j
Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.
def setY(self,Y,standardize=False): assert Y.shape[0]==self.N, assert Y.shape[1]==self.P, if standardize: Y=preprocess.standardize(Y) assert (~(SP.isnan(Y).any(axis=1))==self.Iok).all(), self.Y = Y self.vd.setPheno(Y) self.optimum = None self.cache[] = None self.cache[] = None self.cache[] = None self.cache[]= None
Set phenotype matrix Args: Y: phenotype matrix [N, P] standardize: if True, phenotype is standardized (zero mean, unit variance)
def _parse_options(opts, delim): options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") if key.lower() == : options.setdefault(key, []).append(value) else: if key in options: warnings.warn("Duplicate URI option ." % (key,)) options[key] = unquote_plus(value) if in options: for implicit_option in _IMPLICIT_TLSINSECURE_OPTS: if implicit_option in options: warn_msg = "URI option overrides value implied by ." warnings.warn(warn_msg % (options.cased_key(implicit_option), options.cased_key())) continue options[implicit_option] = options[] return options
Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion and the use of the tlsInsecure option.
def decodeLength(length): bytes_length = len(length) if bytes_length < 2: offset = b XOR = 0 elif bytes_length < 3: offset = b XOR = 0x8000 elif bytes_length < 4: offset = b XOR = 0xC00000 elif bytes_length < 5: offset = b XOR = 0xE0000000 else: raise ConnectionError(.format(length)) decoded = unpack(, (offset + length))[0] decoded ^= XOR return decoded
Decode length based on given bytes. :param length: Bytes string to decode. :return: Decoded length.
def create_correlation_matrix_plot(correlation_matrix, title, feature_list): chart = SimpleChart(title) ax1 = chart.get_ax() ax1.set_xticks(list(range(len(feature_list)))) ax1.set_xticklabels([feature_list[i] for i in range(len(feature_list))], rotation=90) ax1.set_yticks(list(range(len(feature_list)))) ax1.set_yticklabels([feature_list[i] for i in range(len(feature_list))]) cax = ax1.imshow(correlation_matrix, interpolation="nearest", cmap=cm.get_cmap("jet", 30)) chart.get_fig().colorbar(cax, ticks=np.linspace(-1, 1, 21)) plt.gcf().subplots_adjust(bottom=0.25)
Creates plot for correlation matrix :param correlation_matrix: Correlation matrix of features :param title: Title of plot :param feature_list: List of names of features :return: Shows the given correlation matrix as image
def loadOFF(filename, c="gold", alpha=1, wire=False, bc=None): if not os.path.exists(filename): colors.printc("~noentry Error in loadOFF: Cannot find", filename, c=1) return None f = open(filename, "r") lines = f.readlines() f.close() vertices = [] faces = [] NumberOfVertices = None i = -1 for text in lines: if len(text) == 0: continue if text == : continue if " continue if "OFF" in text: continue ts = text.split() n = len(ts) if not NumberOfVertices and n > 1: NumberOfVertices, NumberOfFaces = int(ts[0]), int(ts[1]) continue i += 1 if i < NumberOfVertices and n == 3: x, y, z = float(ts[0]), float(ts[1]), float(ts[2]) vertices.append([x, y, z]) ids = [] if NumberOfVertices <= i < (NumberOfVertices + NumberOfFaces + 1) and n > 2: ids += [int(x) for x in ts[1:]] faces.append(ids) return Actor(buildPolyData(vertices, faces), c, alpha, wire, bc)
Read OFF file format.
def validate_offset(reference_event, estimated_event, t_collar=0.200, percentage_of_length=0.5): if in reference_event and in estimated_event: annotated_length = reference_event[] - reference_event[] return math.fabs(reference_event[] - estimated_event[]) <= max(t_collar, percentage_of_length * annotated_length) elif in reference_event and in estimated_event: annotated_length = reference_event[] - reference_event[] return math.fabs(reference_event[] - estimated_event[]) <= max(t_collar, percentage_of_length * annotated_length)
Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool
def extract_detections(detections, templates, archive, arc_type, extract_len=90.0, outdir=None, extract_Z=True, additional_stations=[]): all_delays = [] all_stachans = [] for template in templates: templatestream = template[1].sort([]) stachans = [(tr.stats.station, tr.stats.channel) for tr in templatestream] mintime = templatestream[0].stats.starttime delays = [tr.stats.starttime - mintime for tr in templatestream] all_delays.append((template[0], delays)) all_stachans.append((template[0], stachans)) detections.sort(key=lambda d: d.detect_time) detection_days = [detection.detect_time.date for detection in detections] detection_days = list(set(detection_days)) detection_days.sort() detection_days = [UTCDateTime(d) for d in detection_days] detection_wavefiles = [] if extract_Z: new_all_stachans = [] new_all_delays = [] for t, template in enumerate(all_stachans): stachans = template[1] delays = all_delays[t][1] new_stachans = [] new_delays = [] j = 0 for i, stachan in enumerate(stachans): if j == 1: new_stachans.append((stachan[0], stachan[1][0] + )) new_delays.append(delays[i]) new_stachans.append(stachan) new_delays.append(delays[i]) j = 0 else: new_stachans.append(stachan) new_delays.append(delays[i]) j += 1 new_all_stachans.append((template[0], new_stachans)) new_all_delays.append((template[0], new_delays)) all_delays = new_all_delays all_stachans = new_all_stachans if not len(additional_stations) == 0: print() for t, template in enumerate(all_stachans): av_delay = np.mean(all_delays[t][1]) for sta in additional_stations: if sta not in template[1]: print( + .join(sta)) template[1].append(sta) all_delays[t][1].append(av_delay) del stachans for detection_day in detection_days: print( + str(detection_day)) stachans = list(set([stachans[1] for stachans in all_stachans][0])) st = read_data(archive=archive, arc_type=arc_type, day=detection_day, stachans=stachans) st.merge(fill_value=) day_detections = [detection for detection in detections if UTCDateTime(detection.detect_time.date) == detection_day] del stachans, delays for detection in day_detections: print( + detection.detect_time.strftime()) detect_wav = st.copy() for tr in detect_wav: t1 = UTCDateTime(detection.detect_time) - extract_len / 2 t2 = UTCDateTime(detection.detect_time) + extract_len / 2 tr.trim(starttime=t1, endtime=t2) if outdir: if not os.path.isdir(os.path.join(outdir, detection.template_name)): os.makedirs(os.path.join(outdir, detection.template_name)) detect_wav.write(os.path.join(outdir, detection.template_name, detection.detect_time. strftime() + ), format=) print( % .join([outdir, detection.template_name, detection.detect_time. strftime() + ])) if not outdir: detection_wavefiles.append(detect_wav) del detect_wav del st if outdir: detection_wavefiles = [] if not outdir: return detection_wavefiles else: return
Extract waveforms associated with detections Takes a list of detections for the template, template. Waveforms will be returned as a list of :class:`obspy.core.stream.Stream` containing segments of extract_len. They will also be saved if outdir is set. The default is unset. The default extract_len is 90 seconds per channel. :type detections: list :param detections: List of :class:`eqcorrscan.core.match_filter.Detection`. :type templates: list :param templates: A list of tuples of the template name and the template Stream used to detect detections. :type archive: str :param archive: Either name of archive or path to continuous data, see :func:`eqcorrscan.utils.archive_read` for details :type arc_type: str :param arc_type: Type of archive, either seishub, FDSN, day_vols :type extract_len: float :param extract_len: Length to extract around the detection (will be equally cut around the detection time) in seconds. Default is 90.0. :type outdir: str :param outdir: Default is None, with None set, no files will be saved, if set each detection will be saved into this directory with files named according to the detection time, NOT than the waveform start time. Detections will be saved into template subdirectories. Files written will be multiplexed miniseed files, the encoding will be chosen automatically and will likely be float. :type extract_Z: bool :param extract_Z: Set to True to also extract Z channels for detections delays will be the same as horizontal channels, only applies if only horizontal channels were used in the template. :type additional_stations: list :param additional_stations: List of tuples of (station, channel) to also extract data for using an average delay. :returns: list of :class:`obspy.core.streams.Stream` :rtype: list .. rubric: Example >>> from eqcorrscan.utils.clustering import extract_detections >>> from eqcorrscan.core.match_filter import Detection >>> from obspy import read, UTCDateTime >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> # Use some dummy detections, you would use real one >>> detections = [Detection( ... template_name='temp1', detect_time=UTCDateTime(2012, 3, 26, 9, 15), ... no_chans=2, chans=['WHYM', 'EORO'], detect_val=2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', threshold_input=8.0), ... Detection( ... template_name='temp2', detect_time=UTCDateTime(2012, 3, 26, 18, 5), ... no_chans=2, chans=['WHYM', 'EORO'], detect_val=2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', threshold_input=8.0)] >>> archive = os.path.join(TEST_PATH, 'day_vols') >>> template_files = [os.path.join(TEST_PATH, 'temp1.ms'), ... os.path.join(TEST_PATH, 'temp2.ms')] >>> templates = [('temp' + str(i), read(filename)) ... for i, filename in enumerate(template_files)] >>> extracted = extract_detections(detections, templates, ... archive=archive, arc_type='day_vols') Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Cutting for detections at: 2012/03/26 18:05:00 >>> print(extracted[0].sort()) 2 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples >>> print(extracted[1].sort()) 2 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T18:04:15.000000Z - 2012-03-26T18:05:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T18:04:15.000000Z - 2012-03-26T18:05:45.000000Z |\ 1.0 Hz, 91 samples >>> # Extract from stations not included in the detections >>> extracted = extract_detections( ... detections, templates, archive=archive, arc_type='day_vols', ... additional_stations=[('GOVA', 'SHZ')]) Adding additional stations Added station GOVA.SHZ Added station GOVA.SHZ Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Cutting for detections at: 2012/03/26 18:05:00 >>> print(extracted[0].sort()) 3 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.GOVA..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples >>> # The detections can be saved to a file: >>> extract_detections(detections, templates, archive=archive, ... arc_type='day_vols', ... additional_stations=[('GOVA', 'SHZ')], outdir='.') Adding additional stations Added station GOVA.SHZ Added station GOVA.SHZ Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Written file: ./temp1/2012-03-26_09-15-00.ms Cutting for detections at: 2012/03/26 18:05:00 Written file: ./temp2/2012-03-26_18-05-00.ms
def __sum(self, line): splitted_line = line.split() try: ret = sum(map(int, splitted_line[1:(self.cpu_number + 1)])) except ValueError: ret = 0 return ret
Return the IRQ sum number. IRQ line samples: 1: 44487 341 44 72 IO-APIC 1-edge i8042 LOC: 33549868 22394684 32474570 21855077 Local timer interrupts FIQ: usb_fiq
def watch_instances(self, flag): lib.EnvSetDefclassWatchInstances(self._env, int(flag), self._cls)
Whether or not the Class Instances are being watched.
def Value(self, p): if p < 0 or p > 1: raise ValueError() if p == 0: return self.xs[0] if p == 1: return self.xs[-1] index = bisect.bisect(self.ps, p) if p == self.ps[index - 1]: return self.xs[index - 1] else: return self.xs[index]
Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value
def upload_file(self, filename, file_type=FILE_TYPE_FREESURFER_DIRECTORY): if file_type != FILE_TYPE_FREESURFER_DIRECTORY: raise ValueError( + file_type) return self.upload_freesurfer_archive(filename)
Create an anatomy object on local disk from the given file. Currently, only Freesurfer anatomy directories are supported. Expects a tar file. Parameters ---------- filename : string Name of the (uploaded) file file_type : string File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY) Returns ------- SubjectHandle Handle for created subject in database
def download(self, filename, representation, overwrite=False): download(self.input, filename, representation, overwrite, self.resolvers, self.get3d, **self.kwargs)
Download the resolved structure as a file. :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file
def open(self): request = urllib.request.Request(self.uri) if self.server_data.authorization_header() is not None: request.add_header(, self.server_data.authorization_header()) request.add_header(, ) return urllib.request.urlopen(request)
Opens the URL associated with the GPFile and returns a file-like object with three extra methods: * geturl() - return the ultimate URL (can be used to determine if a redirect was followed) * info() - return the meta-information of the page, such as headers * getcode() - return the HTTP status code of the response
def list(region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) buckets = conn.list_buckets() if not bool(buckets.get()): log.warning() if in buckets: del buckets[] return buckets except ClientError as e: return {: __utils__[](e)}
List all buckets owned by the authenticated sender of the request. Returns list of buckets CLI Example: .. code-block:: yaml Owner: {...} Buckets: - {...} - {...}
def _get_default_tempdir(): namer = _RandomNameSequence() dirlist = _candidate_tempdir_list() for dir in dirlist: if dir != _os.curdir: dir = _os.path.abspath(dir) for seq in range(100): name = next(namer) filename = _os.path.join(dir, name) try: fd = _os.open(filename, _bin_openflags, 0o600) try: try: with _io.open(fd, , closefd=False) as fp: fp.write(b) finally: _os.close(fd) finally: _os.unlink(filename) return dir except FileExistsError: pass except OSError: break raise FileNotFoundError(_errno.ENOENT, "No usable temporary directory found in %s" % dirlist)
Calculate the default directory to use for temporary files. This routine should be called exactly once. We determine whether or not a candidate temp dir is usable by trying to create and write to a file in that directory. If this is successful, the test file is deleted. To prevent denial of service, the name of the test file must be randomized.
def _get_line(self) -> str: line = self.in_lines[self.index] self.index += 1 return line
Returns the current line from the file while incrementing the index.
def to_dict(self): if not self.record_type == : raise Exception() author_list = [] authors = self.article.get(, []) for x in range(len(authors)): author = authors[x] first_name = None middle_names = None if author.get(): names = author[].split() first_name = names[0] middle_names = (.join(names[1:])) or None author_list.append( dict( AuthorOrder = x + 1, FirstName = first_name, MiddleNames = middle_names, Surname = author.get() ) ) return dict( Title = self.article.get(), PublicationName = self.issue.get(), Volume = self.issue.get(), Issue = self.issue.get(), StartPage = self.article.get(), EndPage = self.article.get(), PublicationYear = self.get_year(), PublicationDate = self.get_earliest_date(), RIS = None, DOI = self.doi, PubMedID = self.get_pubmed_id(), URL = % self.doi, ISSN = None, authors = author_list, RecordType = DOI.record_types.get(self.record_type) )
A representation of that publication data that matches the schema we use in our databases.
def diff(iterable): a, b = tee(iterable) next(b, None) return (i - j for i, j in izip(a, b))
Diff elements of a sequence: s -> s0 - s1, s1 - s2, s2 - s3, ...
def oem_name(self, value): if value == self._defaults[] and in self._values: del self._values[] else: self._values[] = value
The oem_name property. Args: value (string). the property value.
def _run1(self): if self.check_update_J(): self.update_J() else: if self.check_Broyden_J(): self.update_Broyden_J() if self.check_update_eig_J(): self.update_eig_J() delta_vals = self.find_LM_updates(self.calc_grad()) er1 = self.update_function(self.param_vals + delta_vals) good_step = (find_best_step([self.error, er1]) == 1) if not good_step: er0 = self.update_function(self.param_vals) if np.abs(er0 -self.error)/er0 > 1e-7: raise RuntimeError() CLOG.debug() CLOG.debug( % (self.error, er1)) grad = self.calc_grad() for _try in range(self._max_inner_loop): self.increase_damping() delta_vals = self.find_LM_updates(grad) er1 = self.update_function(self.param_vals + delta_vals) good_step = (find_best_step([self.error, er1]) == 1) if good_step: break else: er0 = self.update_function(self.param_vals) CLOG.warn() if np.abs(er0 -self.error)/er0 > 1e-7: raise RuntimeError() if good_step: self._last_error = self.error self.error = er1 CLOG.debug( % (self._last_error, self.error)) self.update_param_vals(delta_vals, incremental=True) self.decrease_damping()
workhorse for do_run_1
def set_proxy(self, host, port, user, password): 192.168.0.100 if user and password: proxy_string = .format(user, password, host, port) else: proxy_string = .format(host, port) self.proxies = {: .format(proxy_string), : .format(proxy_string)}
Sets the proxy server host and port for the HTTP CONNECT Tunnelling. Note that we set the proxies directly on the request later on rather than using the session object as requests has a bug where session proxy is ignored in favor of environment proxy. So, auth will not work unless it is passed directly when making the request as this overrides both. :param str host: Address of the proxy. Ex: '192.168.0.100' :param int port: Port of the proxy. Ex: 6000 :param str user: User for proxy authorization. :param str password: Password for proxy authorization.
def adjust_for_isolated(self): if self.user_params.isolated.value: remove_plugins = [ ("prebuild_plugins", "check_and_set_rebuild"), ("prebuild_plugins", "stop_autorebuild_if_disabled") ] for when, which in remove_plugins: self.pt.remove_plugin(when, which, )
Remove certain plugins in order to handle the "isolated build" scenario.
def moderate(self, comment, content_object, request): if self.akismet_check: akismet_result = akismet_check(comment, content_object, request) if akismet_result: if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \ self.akismet_check_action in (, , ): comment.is_removed = True return True if super(FluentCommentsModerator, self).moderate(comment, content_object, request): return True if self.moderate_bad_words: input_words = split_words(comment.comment) if self.moderate_bad_words.intersection(input_words): return True if self.akismet_check and self.akismet_check_action not in (, ): if akismet_check(comment, content_object, request): return True return False
Determine whether a given comment on a given object should be allowed to show up immediately, or should be marked non-public and await approval. Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
def uninstall_signal_trap(signums = None): if signums is None: signums = origactions.keys() for signum in signums: signal.signal(signum, origactions.pop(signum))
Undo the effects of install_signal_trap(). Restores the original signal handlers. If signums is a sequence of signal numbers only the signal handlers for those signals will be restored (KeyError will be raised if one of them is not one that install_signal_trap() installed a handler for, in which case some undefined number of handlers will have been restored). If signums is None (the default) then all signals that have been modified by previous calls to install_signal_trap() are restored. Note: this function is called by put_connection_filename() and discard_connection_filename() whenever they remove a scratch file and there are then no more scrach files in use.
def show_updates(self): dists = Distributions() if self.project_name: pkg_list = [self.project_name] else: pkg_list = get_pkglist() found = None for pkg in pkg_list: for (dist, active) in dists.get_distributions("all", pkg, dists.get_highest_installed(pkg)): (project_name, versions) = \ self.pypi.query_versions_pypi(dist.project_name) if versions: newest = get_highest_version(versions) if newest != dist.version: if pkg_resources.parse_version(dist.version) < \ pkg_resources.parse_version(newest): found = True print(" %s %s (%s)" % (project_name, dist.version, newest)) if not found and self.project_name: self.logger.info("You have the latest version installed.") elif not found: self.logger.info("No newer packages found at The Cheese Shop") return 0
Check installed packages for available updates on PyPI @param project_name: optional package name to check; checks every installed pacakge if none specified @type project_name: string @returns: None
def update_frontend(self, info): headers = {: } if info.get(): info[] = info[].isoformat() requests.post(self.base_url + , data=json.dumps(info), headers=headers)
Updates frontend with info from the log :param info: dict - Information from a line in the log. i.e regular line, new step.
def all_table_names_in_database(self, cache=False, cache_timeout=None, force=False): if not self.allow_multi_schema_metadata_fetch: return [] return self.db_engine_spec.fetch_result_sets(self, )
Parameters need to be passed as keyword arguments.
def export_configuration(self): if self._export_configuration is None: self._export_configuration = ExportConfigurationList(self) return self._export_configuration
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationList
def _check_relation(self, relation): selection = [val[0] for val in self.selection] if relation not in selection: raise ValueError( ("The value supplied doesn{selection}{field_name}' field").format( value=relation, selection=selection, field_name=self.name, )) return relation
Raise a `ValueError` if `relation` is not allowed among the possible values.
def print_inplace(*args, **kwargs): kwargs.setdefault(, sys.stdout) kwargs.setdefault(, ) pos_save(file=kwargs[]) delay = None with suppress(KeyError): delay = kwargs.pop() if delay is None: print(*args, **kwargs) else: for c in kwargs.get(, ).join(str(a) for a in args): kwargs[].write(c) kwargs[].flush() sleep(delay) if kwargs[]: kwargs[].write(kwargs[]) pos_restore(file=kwargs[]) kwargs[].flush()
Save cursor position, write some text, and then restore the position. Arguments: Same as `print()`. Keyword Arguments: Same as `print()`, except `end` defaults to '' (empty str), and these: delay : Time in seconds between character writes.
def build_model(self): with tf.variable_scope( "model", reuse=None, initializer=self.initializer): self._create_placeholders() self._create_rnn_cells() self._create_initstate_and_embeddings() self._create_rnn_architecture() self._create_optimizer_node()
Build the model's computational graph.
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails): instance = self._backend.find(instance_id) binding = self._backend.find(binding_id, instance) if not binding.isProvisioned(): raise ErrBindingDoesNotExist() self._backend.unbind(binding)
Unbinding the instance see openbrokerapi documentation Raises: ErrBindingDoesNotExist: Binding does not exist.
def to_csv(df, filepath, sep=, header=True, index=True): df.to_pandas().to_csv(filepath, sep=sep, header=header, index=index)
Save DataFrame as csv. Note data is expected to be evaluated. Currently delegates to Pandas. Parameters ---------- df : DataFrame filepath : str sep : str, optional Separator used between values. header : bool, optional Whether to save the header. index : bool, optional Whether to save the index columns. Returns ------- None See Also -------- pandas.DataFrame.to_csv : https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html
def find_serial_devices(serial_matcher="ED"): objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator") objSWbemServices = objWMIService.ConnectServer(".", "root\cimv2") items = objSWbemServices.ExecQuery("SELECT * FROM Win32_USBControllerDevice") ids = (item.Dependent.strip()[-8:] for item in items) return [e for e in ids if e.startswith(serial_matcher)]
Finds a list of USB devices where the serial number (partially) matches the given string. :param str serial_matcher (optional): only device IDs starting with this string are returned :rtype: List[str]
def serialize(self): segment = hangouts_pb2.Segment( type=self.type_, text=self.text, formatting=hangouts_pb2.Formatting( bold=self.is_bold, italic=self.is_italic, strikethrough=self.is_strikethrough, underline=self.is_underline, ), ) if self.link_target is not None: segment.link_data.link_target = self.link_target return segment
Serialize this segment to a ``Segment`` message. Returns: ``Segment`` message.
def closest_distance_to(self, position: Union[Unit, Point2, Point3]) -> Union[int, float]: assert self.exists if isinstance(position, Unit): position = position.position return position.distance_to_closest( [u.position for u in self] )
Returns the distance between the closest unit from this group to the target unit
def first(self, skipna=None, keep_attrs=None): return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)
Return the first element of each group along the group dimension
def assignment_to_plan(assignment): return { : 1, : [{: t_p[0], : t_p[1], : replica } for t_p, replica in six.iteritems(assignment)] }
Convert an assignment to the format used by Kafka to describe a reassignment plan.
def add_update_date(self, item): updated = unixtime_to_datetime(item[]) timestamp = unixtime_to_datetime(item[]) item[] = updated.isoformat() item[] = timestamp.isoformat()
All item['updated_on'] from perceval is epoch
def execute_command_with_path_in_process(command, path, shell=False, cwd=None, logger=None): if logger is None: logger = _logger logger.debug("Opening path with command: {0} {1}".format(command, path)) args = shlex.split(.format(command, path)) try: subprocess.Popen(args, shell=shell, cwd=cwd) return True except OSError as e: logger.error(.format(e)) return False
Executes a specific command in a separate process with a path as argument. :param command: the command to be executed :param path: the path as first argument to the shell command :param bool shell: Whether to use a shell :param str cwd: The working directory of the command :param logger: optional logger instance which can be handed from other module :return: None
def command_line(self, input_path=None): if self.min_orf_length: orfm_arg_l = " -m %d" % self.min_orf_length else: orfm_arg_l = if self.restrict_read_length: orfm_arg_l = " -l %d" % self.restrict_read_length cmd = % orfm_arg_l if input_path: cmd += input_path logging.debug("OrfM command chunk: %s" % cmd) return cmd
Return a string to run OrfM with, assuming sequences are incoming on stdin and printed to stdout Parameters ---------- input_path: str path to the input path, or None for STDIN being the input
def dbg_repr(self): output = [ ] for obj in self.project.loader.all_objects: for section in obj.sections: if section.memsize == 0: continue min_addr, max_addr = section.min_addr, section.max_addr output.append(" output.append(" pos = min_addr while pos < max_addr: try: addr, thing = self.floor_item(pos) output.append("% if thing.size == 0: pos += 1 else: pos += thing.size except KeyError: pos += 1 output.append("") return "\n".join(output)
The debugging representation of this CFBlanket. :return: The debugging representation of this CFBlanket. :rtype: str
def manage_options(self): self.parser = self.create_parser() self.options, self.args = self.parser.parse_args(self.argv) self.do_imports() if self.options.callback and not callable(self.options.callback): self.parser.error() self.logger_level = None if self.options.logger_level: if self.options.logger_level.isdigit(): self.options.logger_level = int(self.options.logger_level) else: try: self.options.logger_level = getattr(logging, self.options.logger_level.upper()) except: self.parser.error( % self.options.logger_level) if self.options.max_loops is not None and self.options.max_loops < 0: self.parser.error( % self.options.max_loops) if self.options.max_duration is not None and self.options.max_duration < 0: self.parser.error( % self.options.max_duration) if self.options.timeout is not None and self.options.timeout < 0: self.parser.error( % self.options.timeout) if self.options.fetch_priorities_delay is not None and self.options.fetch_priorities_delay <= 0: self.parser.error( % self.options.fetch_priorities_delay) if self.options.fetch_delayed_delay is not None and self.options.fetch_delayed_delay <= 0: self.parser.error( % self.options.fetch_delayed_delay) if self.options.requeue_times is not None and self.options.requeue_times < 0: self.parser.error( % self.options.requeue_times) if self.options.requeue_delay_delta is not None and self.options.requeue_delay_delta < 0: self.parser.error( % self.options.requeue_delay_delta) self.database_config = None if self.options.database: host, port, db = self.options.database.split() self.database_config = dict(host=host, port=int(port), db=int(db)) self.update_title = self.options.update_title
Create a parser given the command-line arguments, creates a parser Return True if the programme must exit.
async def ltrim(self, name, start, end): return await self.execute_command(, name, start, end)
Trim the list ``name``, removing all values not within the slice between ``start`` and ``end`` ``start`` and ``end`` can be negative numbers just like Python slicing notation
def find_stringy_lines(tree: ast.AST, first_line_no: int) -> Set[int]: str_footprints = set() for node in ast.walk(tree): if isinstance(node, ast.Str): str_footprints.update(build_footprint(node, first_line_no)) return str_footprints
Finds all lines that contain a string in a tree, usually a function. These lines will be ignored when searching for blank lines.
def stream(self): if self._stream is None: self._stream = tempfile.NamedTemporaryFile(delete=False) try: self._stream.write(self.client.open(self.filename, view=).read()) except: pass return self._stream
the stream to write the log content too. @return:
def index(self, weighted=True, prune=False): warnings.warn( "CrunchCube.index() is deprecated. Use CubeSlice.index_table().", DeprecationWarning, ) return Index.data(self, weighted, prune)
Return cube index measurement. This function is deprecated. Use index_table from CubeSlice.
def next_line(self): self.line = next(self.lines) self.values = self.line.split()
Read the next line from the line generator and split it
def get_responsibles_data(self, reports): if not reports: return [] recipients = [] recipient_names = [] for num, report in enumerate(reports): ar = report.getAnalysisRequest() report_recipient_names = [] responsibles = ar.getResponsible() for manager_id in responsibles.get("ids", []): responsible = responsibles["dict"][manager_id] name = responsible.get("name") email = responsible.get("email") record = { "name": name, "email": email, "valid": True, } if record not in recipients: recipients.append(record) report_recipient_names.append(name) recipient_names.append(report_recipient_names) common_names = set(recipient_names[0]).intersection(*recipient_names) for recipient in recipients: if recipient.get("name") not in common_names: recipient["valid"] = False return recipients
Responsibles data to be used in the template
def get_field_from_args_or_session(config, args, field_name): rez = getattr(args, field_name, None) if (rez != None): return rez rez = config.get_session_field("default_%s"%field_name, exception_if_not_found=False) if (rez): return rez raise Exception("Fail to get default_%s from config, should specify %s via --%s parameter"%(field_name, field_name, field_name.replace("_","-")))
We try to get field_name from diffent sources: The order of priorioty is following: - command line argument (--<field_name>) - current session configuration (default_<filed_name>)
def ipv6_generate_random(total=100): count = 0 yielded = set() while count < total: address = str(IPv6Address(random.randint(0, 2**128-1))) if not ipv6_is_defined(address)[0] and address not in yielded: count += 1 yielded.add(address) yield address
The generator to produce random, unique IPv6 addresses that are not defined (can be looked up using ipwhois). Args: total (:obj:`int`): The total number of IPv6 addresses to generate. Yields: str: The next IPv6 address.
def get_comment_book_assignment_session(self): if not self.supports_comment_book_assignment(): raise errors.Unimplemented() return sessions.CommentBookAssignmentSession(runtime=self._runtime)
Gets the session for assigning comment to book mappings. return: (osid.commenting.CommentBookAssignmentSession) - a ``CommentBookAssignmentSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book_assignment()`` is ``true``.*
def font(self, container): typeface = self.get_style(, container) weight = self.get_style(, container) slant = self.get_style(, container) width = self.get_style(, container) return typeface.get_font(weight=weight, slant=slant, width=width)
The :class:`Font` described by this single-styled text's style. If the exact font style as described by the `font_weight`, `font_slant` and `font_width` style attributes is not present in the `typeface`, the closest font available is returned instead, and a warning is printed.
def clan_badge_url(self): if self.clan_tag is None: return None url = self.raw_data.get().get().get() if not url: return None return "http://api.cr-api.com" + url
Returns clan badge url
def str_to_v1_str(xml_str): if str_is_v1(xml_str): return xml_str etree_obj = str_to_etree(xml_str) strip_v2_elements(etree_obj) etree_replace_namespace(etree_obj, d1_common.types.dataoneTypes_v1.Namespace) return etree_to_str(etree_obj)
Convert a API v2 XML doc to v1 XML doc. Removes elements that are only valid for v2 and changes namespace to v1. If doc is already v1, it is returned unchanged. Args: xml_str : str API v2 XML doc. E.g.: ``SystemMetadata v2``. Returns: str : API v1 XML doc. E.g.: ``SystemMetadata v1``.
def close(self, **kw): if self._closing_deferred: d = defer.Deferred() def closed(arg): d.callback(arg) return arg self._closing_deferred.addBoth(closed) return d self._closing_deferred = defer.Deferred() def close_command_is_queued(*args): return self._closing_deferred d = self._torstate.close_circuit(self.id, **kw) d.addCallback(close_command_is_queued) return d
This asks Tor to close the underlying circuit object. See :meth:`txtorcon.torstate.TorState.close_circuit` for details. You may pass keyword arguments to take care of any Flags Tor accepts for the CLOSECIRCUIT command. Currently, this is only "IfUnused". So for example: circ.close(IfUnused=True) :return: Deferred which callbacks with this Circuit instance ONLY after Tor has confirmed it is gone (not simply that the CLOSECIRCUIT command has been queued). This could be a while if you included IfUnused.
def are_expparam_dtypes_consistent(self, expparams): if self.is_n_outcomes_constant: return True if expparams.size > 0: domains = self.domain(expparams) first_dtype = domains[0].dtype return all(domain.dtype == first_dtype for domain in domains[1:]) else: return True
Returns ``True`` iff all of the given expparams correspond to outcome domains with the same dtype. For efficiency, concrete subclasses should override this method if the result is always ``True``. :param np.ndarray expparams: Array of expparamms of type ``expparams_dtype`` :rtype: ``bool``
def current(self): event = request.headers.get() data = dict([elem.split() for elem in event.split()]) return Current(**data)
A namedtuple contains `uuid`, `project`, `action`. Example:: @app.route('/webhook/broadcast-news') def broadcast_news(): if rio.current.action.startswith('news-'): broadcast(request.get_json())
def handle_error(program_name, cmd, log=None): print(, % program_name, ) print( % (program_name, cmd)) if log is not None: print( % log) print() print( % program_name, ve fixed any problems with the input files) c - continue on with the script (probably afteryou\, ) print() print() while True: choice = input() if choice not in (, , , ): choice = break if choice == : print() sys.exit(1) elif choice == : print() util.delete_all() if log is not None: os.remove(log) sys.exit(1) elif choice == : print() break_now = True elif choice == : print( % program_name) break_now = False return break_now
Subprocess program error handling Args: program_name (str): name of the subprocess program Returns: break_now (bool): indicate whether calling program should break out of loop
def send_sync(self, body, exchange, key): callback_queue = self.declare_queue(exclusive=True, auto_delete=True) self._channel.basic_consume(self.on_response, no_ack=True, queue=callback_queue) corr_id = str(uuid.uuid4()) self.data[corr_id] = { : False, : None, : callback_queue } self._channel.basic_publish( exchange=exchange, routing_key=key, body=body, properties=pika.BasicProperties( reply_to=callback_queue, correlation_id=corr_id, ) ) while not self.data[corr_id][]: self._connection.process_data_events() time.sleep(0.3) continue logger.info("Got the RPC server response => {}".format(self.data[corr_id][])) return self.data[corr_id][]
发送并同步接受回复消息 :return:
def _add_workflow(mcs, field_name, state_field, attrs): attrs[field_name] = StateProperty(state_field.workflow, field_name)
Attach a workflow to the attribute list (create a StateProperty).
def find_transfer_consensus_hash( name_rec, block_id, vtxindex, nameop_consensus_hash ): for historic_block_number in reversed(sorted(name_rec[].keys())): for historic_state in reversed(name_rec[][historic_block_number]): if historic_state[] > block_id or (historic_state[] == block_id and historic_state[] > vtxindex): continue if historic_state[] in [NAME_REGISTRATION, NAME_IMPORT]: return nameop_consensus_hash if historic_state[] == NAME_UPDATE: assert historic_state[] is not None, .format(historic_state) return historic_state[] return nameop_consensus_hash
Given a name record, find the last consensus hash set by a non-NAME_TRANSFER operation. @name_rec is the current name record, before this NAME_TRANSFER. @block_id is the current block height. @vtxindex is the relative index of this transaction in this block. @nameop_consensus_hash is the consensus hash given in the NAME_TRANSFER. This preserves compatibility from a bug prior to 0.14.x where the consensus hash from a NAME_TRANSFER is ignored in favor of the last consensus hash (if any) supplied by an operation to the affected name. This method finds that consensus hash (if present). The behavior emulated comes from the fact that in the original release of this software, the fields from a name operation fed into the block's consensus hash included the consensus hashes given in each of the a name operations' transactions. However, a quirk in the behavior of the NAME_TRANSFER-handling code prevented this from happening consistently for NAME_TRANSFERs. Specifically, the only time a NAME_TRANSFER's consensus hash was used to calculate the block's new consensus hash was if the name it affected had never been affected by a prior state transition other than a NAME_TRANSFER. If the name was affected by a prior state transition that set a consensus hash, then that prior state transition's consensus hash (not the NAME_TRANSFER's) would be used in the block consensus hash calculation. If the name was NOT affected by a prior state transition that set a consensus hash (back to the point of its last NAME_REGISTRATION), then the consensus hash fed into the block would be that from the NAME_TRANSFER itself. In practice, the only name operation that consistently sets a consensus hash is NAME_UPDATE. As for the others: * NAME_REGISTRATION sets it to None * NAME_IMPORT sets it to None * NAME_RENEWAL doesn't set it at all; it just takes what was already there * NAME_TRANSFER only sets it if there were no prior NAME_UPDATEs between now and the last NAME_REGISTRATION or NAME_IMPORT. Here are some example name histories, and the consensus hash that should be used to calculate this block's consensus hash: NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_RENEWAL, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_UPDATE, NAME_TRANSFER whatever it was from the last NAME_UPDATE NAME_IMPORT, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_TRANSFER, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
def ask(self, question, default=False): choices = % ( if default else , if default else ) while True: response = raw_input( % (question, choices)).strip() if not response: return default elif response in : return True elif response in : return False
Ask a y/n question to the user.
def _new_population_genalg(population, fitnesses, mutation_chance=0.02, crossover_chance=0.7, selection_function=gaoperators.tournament_selection, crossover_function=gaoperators.one_point_crossover): intermediate_population = selection_function(population, fitnesses) new_population = _crossover(intermediate_population, crossover_chance, crossover_function) gaoperators.random_flip_mutate(new_population, mutation_chance) return new_population
Perform all genetic algorithm operations on a population, and return a new population. population must have an even number of chromosomes. Args: population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]] fitness: A list of fitnesses that correspond with chromosomes in the population, ex. [1.2, 10.8] mutation_chance: the chance that a bit will be flipped during mutation crossover_chance: the chance that two parents will be crossed during crossover selection_function: A function that will select parents for crossover and mutation crossover_function: A function that will cross two parents Returns: list; A new population of chromosomes, that should be more fit.
def set_meta_rdf(self, rdf, fmt=): evt = self._client._request_entity_meta_set(self.__lid, rdf, fmt=fmt) self._client._wait_and_except_if_failed(evt)
Set the metadata for this Thing in RDF fmt Advanced users who want to manipulate the RDF for this Thing directly without the [ThingMeta](ThingMeta.m.html#IoticAgent.IOT.ThingMeta.ThingMeta) helper object Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `fmt` (optional) (string) The format of RDF you have sent. Valid formats are: "xml", "n3", "turtle"
def path(self, startLayer, endLayer): next = {startLayer.name : startLayer} visited = {} while next != {}: for item in list(next.items()): visited[item[0]] = item[1] del next[item[0]] for connection in self.connections: if connection.fromLayer.name == item[0]: if connection.toLayer.name == endLayer.name: return 1 elif connection.toLayer.name in next: pass elif connection.toLayer.name in visited: pass else: next[connection.toLayer.name] = connection.toLayer return 0
Used in error checking with verifyArchitecture() and in prop_from().
def create_review(self, review, pub_name, ext_name): route_values = {} if pub_name is not None: route_values[] = self._serialize.url(, pub_name, ) if ext_name is not None: route_values[] = self._serialize.url(, ext_name, ) content = self._serialize.body(review, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, content=content) return self._deserialize(, response)
CreateReview. [Preview API] Creates a new review for an extension :param :class:`<Review> <azure.devops.v5_0.gallery.models.Review>` review: Review to be created for the extension :param str pub_name: Name of the publisher who published the extension :param str ext_name: Name of the extension :rtype: :class:`<Review> <azure.devops.v5_0.gallery.models.Review>`
def parse_list(cls, data): results = ResultSet() data = data or [] for obj in data: if obj: results.append(cls.parse(obj)) return results
Parse a list of JSON objects into a result set of model instances.