code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def compare_files(path1, path2): diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in [, , ]]
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
def _docf(self, tag, val): if tag == : return .format(val) elif tag == : if self.args.route_method: return % self.args.route_method.format( ns=self.cur_namespace.name, route=fmt_func(val)) else: return val elif tag == : anchor, link = val.rsplit(, 1) return .format(anchor, link) elif tag == : if val == : return elif val == or val == : return .format(val.capitalize()) else: return val elif tag == : return .format(val) else: raise RuntimeError( % tag)
Callback used as the handler argument to process_docs(). This converts Stone doc references to Sphinx-friendly annotations.
def map_exception_codes(): werkex = inspect.getmembers(exceptions, lambda x: getattr(x, , None)) return {e.code: e for _, e in werkex}
Helper function to intialise CODES_TO_EXCEPTIONS.
def session(self, sid, namespace=None): class _session_context_manager(object): def __init__(self, server, sid, namespace): self.server = server self.sid = sid self.namespace = namespace self.session = None def __enter__(self): self.session = self.server.get_session(sid, namespace=namespace) return self.session def __exit__(self, *args): self.server.save_session(sid, self.session, namespace=namespace) return _session_context_manager(self, sid, namespace)
Return the user session for a client with context manager syntax. :param sid: The session id of the client. This is a context manager that returns the user session dictionary for the client. Any changes that are made to this dictionary inside the context manager block are saved back to the session. Example usage:: @sio.on('connect') def on_connect(sid, environ): username = authenticate_user(environ) if not username: return False with sio.session(sid) as session: session['username'] = username @sio.on('message') def on_message(sid, msg): with sio.session(sid) as session: print('received message from ', session['username'])
def reset_index(self): self.index = list(range(self.__len__())) self.index_name =
Resets the index of the Series to simple integer list and the index name to 'index'. :return: nothing
def get_output_nodes(G: nx.DiGraph) -> List[str]: return [n for n, d in G.out_degree() if d == 0]
Get all output nodes from a network.
def get_most_recent_versions(self, group, artifact, limit, remote=False, integration=False): if limit < 1: raise ValueError("Releases limit must be positive") url = self._base_url + params = {: group, : artifact, : self._repo, : int(remote)} self._logger.debug("Using all version API at %s - params %s", url, params) response = self._session.get(url, params=params) response.raise_for_status() json = response.json() versions = [ item[] for item in json[] if item[] is integration] versions.sort(key=distutils.version.LooseVersion, reverse=True) return versions[:limit]
Get a list of the version numbers of the most recent artifacts (integration or non-integration), ordered by the version number, for a particular group and artifact combination. :param str group: Group of the artifact to get versions of :param str artifact: Name of the artifact to get versions of :param int limit: Fetch only this many of the most recent releases :param bool remote: Should remote repositories be searched to find the latest versions? Note this can make the request much slower. Default is false. :param bool integration: If true, fetch only "integration versions", otherwise fetch only non-integration versions. :return: Version numbers of the most recent artifacts :rtype: list :raises requests.exceptions.HTTPError: For any non-success HTTP responses from the Artifactory API. :raises ValueError: If limit is 0 or negative.
def logodds(args): from math import log from jcvi.formats.base import DictFile p = OptionParser(logodds.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cnt1, cnt2 = args d = DictFile(cnt2) fp = open(cnt1) for row in fp: scf, c1 = row.split() c2 = d[scf] c1, c2 = float(c1), float(c2) c1 += 1 c2 += 1 score = int(100 * (log(c1) - log(c2))) print("{0}\t{1}".format(scf, score))
%prog logodds cnt1 cnt2 Compute log likelihood between two db.
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol, packet_callback=None, event_callback=None, disconnect_callback=None, ignore=None, loop=None): protocol = partial( protocol, loop=loop if loop else asyncio.get_event_loop(), packet_callback=packet_callback, event_callback=event_callback, disconnect_callback=disconnect_callback, ignore=ignore if ignore else [], ) if host: conn = loop.create_connection(protocol, host, port) else: baud = baud conn = create_serial_connection(loop, protocol, port, baud) return conn
Create Rflink manager class, returns transport coroutine.
def model_builds(self): if self._model_builds is None: self._model_builds = ModelBuildList(self._version, assistant_sid=self._solution[], ) return self._model_builds
Access the model_builds :returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList :rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
def _get_dep_to_dot_name_mapping(dependencies): dot_name_to_deps = {} for dep in dependencies: dot_name = dep.name if dot_name not in dot_name_to_deps: dot_name_to_deps[dot_name] = [dep] else: dot_name_to_deps[dot_name].append(dep) dep_to_dot_name = {} for dot_name, deps in dot_name_to_deps.items(): if len(deps) == 1: dep_to_dot_name[deps[0]] = dot_name continue for idx, dep in enumerate(deps): dep_to_dot_name[dep] = dot_name + str(idx) return dep_to_dot_name
Creates mapping between Dependency classes and names used in DOT graph
def nth(iterable, n, default=None): return next(itertools.islice(iterable, n, None), default)
Returns the nth item or a default value. Example:: >>> nth([0, 1, 2], 1) 1 >>> nth([0, 1, 2], 100) None **中文文档** 取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。
def suggest_pairs(top_n=10, per_n=3, ignore_before=300): db = sqlite3.connect("ratings.db") data = db.execute("select model_winner, model_loser from wins").fetchall() bucket_ids = [id[0] for id in db.execute( "select id from models where bucket = ?", (fsdb.models_dir(),)).fetchall()] bucket_ids.sort() data = [d for d in data if d[0] in bucket_ids and d[1] in bucket_ids] ratings = [(model_num_for(k), v[0], v[1]) for k, v in compute_ratings(data).items()] ratings.sort() ratings = ratings[ignore_before:] ratings.sort(key=lambda r: r[2], reverse=True) res = [] for p1 in ratings[:top_n]: candidate_p2s = sorted(ratings, key=lambda p2_tup: abs(p1[1] - p2_tup[1]))[1:20] choices = random.sample(candidate_p2s, per_n) print("Pairing {}, sigma {:.2f} (Rating {:.2f})".format(p1[0], p1[2], p1[1])) for p2 in choices: res.append([p1[0], p2[0]]) print(" {}, ratings delta {:.2f}".format(p2[0], abs(p1[1] - p2[1]))) return res
Find the maximally interesting pairs of players to match up First, sort the ratings by uncertainty. Then, take the ten highest players with the highest uncertainty For each of them, call them `p1` Sort all the models by their distance from p1's rating and take the 20 nearest rated models. ('candidate_p2s') Choose pairings, (p1, p2), randomly from this list. `top_n` will pair the top n models by uncertainty. `per_n` will give each of the top_n models this many opponents `ignore_before` is the model number to `filter` off, i.e., the early models. Returns a list of *model numbers*, not model ids.
def load(self, yr=None, doy=None, date=None, fname=None, fid=None, verifyPad=False): if date is not None: self._set_load_parameters(date=date, fid=None) inc = pds.DateOffset(days=1) curr = date elif (yr is not None) & (doy is not None): date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1)) self._set_load_parameters(date=date, fid=None) inc = pds.DateOffset(days=1) curr = self.date elif fname is not None: self._set_load_parameters(date=None, fid=self.files.get_index(fname)) inc = 1 curr = self._fid.copy() elif fid is not None: self._set_load_parameters(date=None, fid=fid) inc = 1 curr = fid else: estr = estr = .format(estr) raise TypeError(estr) self.orbits._reset() loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0) if (self.pad is not None) | self.multi_file_day: if self._next_data.empty & self._prev_data.empty: print() self._prev_data, self._prev_meta = self._load_prev() self._curr_data, self._curr_meta = \ self._load_data(date=self.date, fid=self._fid) self._next_data, self._next_meta = self._load_next() else: if self._next_data_track == curr: del self._prev_data self._prev_data = self._curr_data self._prev_meta = self._curr_meta self._curr_data = self._next_data self._curr_meta = self._next_meta self._next_data, self._next_meta = self._load_next() elif self._prev_data_track == curr: del self._next_data self._next_data = self._curr_data self._next_meta = self._curr_meta self._curr_data = self._prev_data self._curr_meta = self._prev_meta self._prev_data, self._prev_meta = self._load_prev() else: del self._prev_data del self._curr_data del self._next_data self._prev_data, self._prev_meta = self._load_prev() self._curr_data, self._curr_meta = \ self._load_data(date=self.date, fid=self._fid) self._next_data, self._next_meta = self._load_next() if not self._prev_data.index.is_monotonic_increasing: self._prev_data.sort_index(inplace=True) if not self._curr_data.index.is_monotonic_increasing: self._curr_data.sort_index(inplace=True) if not self._next_data.index.is_monotonic_increasing: self._next_data.sort_index(inplace=True) self._next_data_track = curr + inc self._prev_data_track = curr - inc if not self._curr_data.empty: self.data = self._curr_data.copy() self.meta = self._curr_meta.copy() else: self.data = DataFrame(None) if self._load_by_date: first_time = self.date first_pad = self.date - loop_pad last_time = self.date + pds.DateOffset(days=1) last_pad = self.date + pds.DateOffset(days=1) + loop_pad want_last_pad = False "multi_file_day and load by file.") if (not self._prev_data.empty) & (not self.data.empty): padLeft = self._prev_data.loc[first_pad : self.data.index[0]] if len(padLeft) > 0: if (padLeft.index[-1] == self.data.index[0]) : padLeft = padLeft.iloc[:-1, :] self.data = pds.concat([padLeft, self.data]) if (not self._next_data.empty) & (not self.data.empty): padRight = self._next_data.loc[self.data.index[-1] : last_pad] if len(padRight) > 0: if (padRight.index[0] == self.data.index[-1]) : padRight = padRight.iloc[1:, :] self.data = pds.concat([self.data, padRight]) self.data = self.data.ix[first_pad : last_pad] if not self.empty: if (self.data.index[-1] == last_pad) & (not want_last_pad): self.data = self.data.iloc[:-1, :] else: self.data, meta = self._load_data(date=self.date, fid=self._fid) if not self.data.empty: self.meta = meta if self.meta.data.empty: self.meta[self.data.columns] = {self.name_label: self.data.columns, self.units_label: [] * len(self.data.columns)} if not self._load_by_date: if self.pad is not None: temp = first_time else: temp = self.data.index[0] self.date = pds.datetime(temp.year, temp.month, temp.day) self.yr, self.doy = utils.getyrdoy(self.date) if not self.data.empty: self._default_rtn(self) if (not self.data.empty) & (self.clean_level != ): self._clean_rtn(self) if not self.data.empty: self.custom._apply_all(self) if (self.pad is not None) & (not self.data.empty) & (not verifyPad): self.data = self.data[first_time: last_time] if not self.empty: if (self.data.index[-1] == last_time) & (not want_last_pad): self.data = self.data.iloc[:-1, :] self.meta.transfer_attributes_to_instrument(self) sys.stdout.flush() return
Load instrument data into Instrument object .data. Parameters ---------- yr : integer year for desired data doy : integer day of year date : datetime object date to load fname : 'string' filename to be loaded verifyPad : boolean if True, padding data not removed (debug purposes) Returns -------- Void. Data is added to self.data Note ---- Loads data for a chosen instrument into .data. Any functions chosen by the user and added to the custom processing queue (.custom.add) are automatically applied to the data before it is available to user in .data.
def get(self, event): self.log("Schemarequest for", event.data, "from", event.user, lvl=debug) if event.data in schemastore: response = { : , : , : l10n_schemastore[event.client.language][event.data] } self.fireEvent(send(event.client.uuid, response)) else: self.log("Unavailable schema requested!", lvl=warn)
Return a single schema
def mangle_coverage(local_path, log): with open(local_path, mode=) as handle: if handle.read(13) != b: log.debug(, local_path) return handle.seek(0) handle.write(file_contents)
Edit .coverage file substituting Windows file paths to Linux paths. :param str local_path: Destination path to save file to. :param logging.Logger log: Logger for this function. Populated by with_log() decorator.
def encode_basestring(s): if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode() def replace(match): return ESCAPE_DCT[match.group(0)] return u + ESCAPE.sub(replace, s) + u
Return a JSON representation of a Python string
def _bucket_time(self, event_time): event_time = kronos_time_to_epoch_time(event_time) return event_time - (event_time % self._bucket_width)
The seconds since epoch that represent a computed bucket. An event bucket is the time of the earliest possible event for that `bucket_width`. Example: if `bucket_width = timedelta(minutes=10)`, bucket times will be the number of seconds since epoch at 12:00, 12:10, ... on each day.
def derive_iobject_type(self, embedding_ns, embedded_ns, elt_name): ns_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, self.namespace_dict.get(embedding_ns, "")) if not ns_info: ns_info = {} iobject_family_name = ns_info.get(,None) if not iobject_family_name: iobject_family_name = "" family_info = {} if ns_info.get(,None) in [, ]: family_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, self.namespace_dict.get(ns_info[], "")) if family_info: iobject_family_revision_name = family_info["revision"] else: iobject_family_revision_name = None else: iobject_family_revision_name = ns_info.get("revision",None) if not iobject_family_revision_name: iobject_family_revision_name = if embedded_ns: namespace_uri = self.namespace_dict.get(embedded_ns, "") type_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, namespace_uri) if not type_info: type_info = {} if type_info and type_info.get(,None) in [, , ]: iobject_type_name = elt_name iobject_type_namespace_uri = ns_info[] iobject_type_revision_name = ns_info[] else: iobject_type_namespace_uri = type_info.get(,"%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,embedded_ns)) iobject_type_name = type_info.get(,embedded_ns) iobject_type_revision_name = type_info.get(,) else: iobject_type_name = elt_name iobject_type_revision_name = iobject_family_revision_name iobject_type_namespace_uri = ns_info.get("iotype_ns", "%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,elt_name)) if not iobject_type_revision_name: iobject_type_revision_name = logger.debug("Results of datatype extraction for ns %s, embedded ns %s and element name %s" % ( embedding_ns, embedded_ns, elt_name)) logger.debug("Family Name: %s" % iobject_family_name) logger.debug("Family Revision %s" % iobject_family_revision_name) logger.debug("Type Name %s" % iobject_type_name) logger.debug("Type NS URI %s" % iobject_type_namespace_uri) logger.debug("Type Revision %s" % iobject_type_revision_name) return {: iobject_type_name, : iobject_type_revision_name, : iobject_type_namespace_uri, : iobject_family_name, : iobject_family_revision_name}
Derive type of information object stemming from an embedded element based on namespace information of embedding element, the embedded element itself, and the name of the element.
def _prm_write_shared_array(self, key, data, hdf5_group, full_name, flag, **kwargs): if flag == HDF5StorageService.ARRAY: self._prm_write_into_array(key, data, hdf5_group, full_name, **kwargs) elif flag in (HDF5StorageService.CARRAY, HDF5StorageService.EARRAY, HDF5StorageService.VLARRAY): self._prm_write_into_other_array(key, data, hdf5_group, full_name, flag=flag, **kwargs) else: raise RuntimeError( % (flag, key, full_name)) self._hdf5file.flush()
Creates and array that can be used with an HDF5 array object
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120): with self.using(tube_name) as inserter: return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
Insert a new job into a specific queue. Wrapper around :func:`put_job`. :param tube_name: Tube name :type tube_name: str :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job()` Put a job into whatever the current tube is :func:`using()` Insert a job using an external guard
def volume_to_distance_with_errors(vol, vol_err): dist = (vol * 3.0/4.0/numpy.pi) ** (1.0/3.0) ehigh = ((vol + vol_err) * 3.0/4.0/numpy.pi) ** (1.0/3.0) - dist delta = numpy.where(vol >= vol_err, vol - vol_err, 0) elow = dist - (delta * 3.0/4.0/numpy.pi) ** (1.0/3.0) return dist, ehigh, elow
Return the distance and standard deviation upper and lower bounds Parameters ---------- vol: float vol_err: float Returns ------- dist: float ehigh: float elow: float
def _tr_paren(line_info): "Translate lines escaped with: /" return % (line_info.pre, line_info.ifun, ", ".join(line_info.the_rest.split()))
Translate lines escaped with: /
def _CreateRoutePatternsFolder(self, parent, route, style_id=None, visible=True): pattern_id_to_trips = route.GetPatternIdTripDict() if not pattern_id_to_trips: return None pattern_trips = pattern_id_to_trips.values() pattern_trips.sort(lambda a, b: cmp(len(b), len(a))) folder = self._CreateFolder(parent, , visible) for n, trips in enumerate(pattern_trips): trip_ids = [trip.trip_id for trip in trips] name = % (n+1, len(trips)) description = % ( len(trips), .join(trip_ids)) placemark = self._CreatePlacemark(folder, name, style_id, visible, description) coordinates = [(stop.stop_lon, stop.stop_lat) for stop in trips[0].GetPattern()] self._CreateLineString(placemark, coordinates) return folder
Create a KML Folder containing placemarks for each pattern in the route. A pattern is a sequence of stops used by one of the trips in the route. If there are not patterns for the route then no folder is created and None is returned. Args: parent: The parent ElementTree.Element instance. route: The transitfeed.Route instance. style_id: The id of a style to use if not None. visible: Whether the folder is initially visible or not. Returns: The Folder ElementTree.Element instance or None if there are no patterns.
def get_metaData(self, dcmlist, series_number): ifile = 0 if len(dcmlist) == 0: return {} logger.debug("Filename: " + dcmlist[ifile]) data1 = self._read_file(dcmlist[ifile]) try: voxeldepth = self._get_slice_location_difference(dcmlist, ifile) voxeldepth_end = self._get_slice_location_difference(dcmlist, -2) if voxeldepth != voxeldepth_end: logger.warning("Depth of slices is not the same in beginning and end of the sequence") voxeldepth_1 = self._get_slice_location_difference(dcmlist, 1) voxeldepth = np.median([voxeldepth, voxeldepth_end, voxeldepth_1]) except Exception: logger.warning() logger.debug(traceback.format_exc()) try: voxeldepth = float(data1.SliceThickness) except Exception: logger.warning( + traceback.format_exc()) voxeldepth = 0 try: pixelsize_mm = data1.PixelSpacing except: logger.warning() pixelsize_mm = [1, 1] voxelsize_mm = [ voxeldepth, float(pixelsize_mm[0]), float(pixelsize_mm[1]), ] metadata = {: voxelsize_mm, : data1.Modality, : series_number } try: metadata[] = data1.SeriesDescription except: logger.info( + str(data1.SeriesNumber)) try: metadata[] = data1.ImageComments except: logger.info( ) metadata = attr_to_dict(data1, "AcquisitionDate", metadata) metadata = attr_to_dict(data1, "StudyDate", metadata) metadata = attr_to_dict(data1, "StudyID", metadata) metadata = attr_to_dict(data1, "StudyDescription", metadata) metadata = attr_to_dict(data1, "RequestedProcedureDescription", metadata) metadata = attr_to_dict(data1, "PatientSex", metadata) metadata = attr_to_dict(data1, "PatientAge", metadata) metadata = attr_to_dict(data1, "PatientID", metadata) metadata = attr_to_dict(data1, "PatientName", metadata) metadata[] = dcmlist return metadata
Get metadata. Voxel size is obtained from PixelSpacing and difference of SliceLocation of two neighboorhoding slices (first have index ifile). Files in are used.
def horizon_main_nav(context): if not in context: return {} current_dashboard = context[].horizon.get(, None) dashboards = [] for dash in Horizon.get_dashboards(): if dash.can_access(context): if callable(dash.nav) and dash.nav(context): dashboards.append(dash) elif dash.nav: dashboards.append(dash) return {: dashboards, : context[].user, : current_dashboard, : context[]}
Generates top-level dashboard navigation entries.
def mailto_to_envelope(mailto_str): from alot.db.envelope import Envelope headers, body = parse_mailto(mailto_str) return Envelope(bodytext=body, headers=headers)
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
def quantile_gaussianize(x): from scipy.stats import norm, rankdata x = asarray(x, float).copy() ok = isfinite(x) x[ok] *= -1 y = empty_like(x) y[ok] = rankdata(x[ok]) y[ok] = norm.isf(y[ok] / (sum(ok) + 1)) y[~ok] = x[~ok] return y
Normalize a sequence of values via rank and Normal c.d.f. Args: x (array_like): sequence of values. Returns: Gaussian-normalized values. Example: .. doctest:: >>> from scipy_sugar.stats import quantile_gaussianize >>> print(quantile_gaussianize([-1, 0, 2])) [-0.67448975 0. 0.67448975]
def handle(self, *args, **options): notifications = self.retrieve_old_notifications() count = len(notifications) if count > 0: self.output( % count) notifications.delete() self.output( % count) else: self.output()
Purge notifications
def _get_token_possibilities(cls, token, mode): token = token.lower().strip() possibilities = {} try: as_int = int(token) if mode != Mode.TIME: if 1 <= as_int <= 9999 and (len(token) == 2 or len(token) == 4): possibilities[Component.YEAR] = as_int if 1 <= as_int <= 12: possibilities[Component.MONTH] = as_int if 1 <= as_int <= 31: possibilities[Component.DAY] = as_int if mode != Mode.DATE: if 0 <= as_int <= 23: possibilities[Component.HOUR] = as_int if 0 <= as_int <= 59: possibilities[Component.MINUTE] = as_int if 0 <= as_int <= 59: possibilities[Component.SECOND] = as_int if len(token) == 3 or len(token) == 6 or len(token) == 9: nano = 0 if len(token) == 3: nano = as_int * 1000000 elif len(token) == 6: nano = as_int * 1000 elif len(token) == 9: nano = as_int possibilities[Component.NANO] = nano if len(token) == 4: hour = as_int // 100 minute = as_int - (hour * 100) if 1 <= hour <= 24 and 1 <= minute <= 59: possibilities[Component.HOUR_AND_MINUTE] = as_int except ValueError: if mode != Mode.TIME: month = MONTHS_BY_ALIAS.get(token, None) if month is not None: possibilities[Component.MONTH] = month if mode != Mode.DATE: is_am_marker = token == "am" is_pm_marker = token == "pm" if is_am_marker or is_pm_marker: possibilities[Component.AM_PM] = cls.AM if is_am_marker else cls.PM if token == "z": possibilities[Component.OFFSET] = 0 return possibilities
Returns all possible component types of a token without regard to its context. For example "26" could be year, date or minute, but can't be a month or an hour. :param token: the token to classify :param mode: the parse mode :return: the dict of possible types and values if token was of that type
def _read_config_list(): with codecs.open(, , encoding=) as f1: conf_list = [conf for conf in f1.read().split() if conf != ] return conf_list
配置列表读取
def parse_answers(html_question): def parse_answer_container(update_info): container_info = {} created = update_info[0] answered_at = created.abbr.attrs["title"] container_info[] = str(str_to_datetime(answered_at).timestamp()) container_info[] = AskbotParser.parse_user_info(created) try: update_info[1] except IndexError: pass else: updated = update_info[1] updated_at = updated.abbr.attrs["title"] container_info[] = str(str_to_datetime(updated_at).timestamp()) if AskbotParser.parse_user_info(updated): container_info[] = AskbotParser.parse_user_info(updated) return container_info answer_list = [] bs_question = bs4.BeautifulSoup(html_question, "html.parser") bs_answers = bs_question.select("div.answer") for bs_answer in bs_answers: answer_id = bs_answer.attrs["data-post-id"] votes_element = bs_answer.select("div.vote-number")[0].text accepted_answer = bs_answer.select("div.answer-img-accept")[0].get().endswith("correct") body = bs_answer.select("div.post-body") update_info = body[0].select("div.post-update-info") answer_container = parse_answer_container(update_info) body[0].div.extract().select("div.post-update-info-container") body = body[0].get_text(strip=True) answer = {: answer_id, : votes_element, : body, : accepted_answer } answer.update(answer_container) answer_list.append(answer) return answer_list
Parse the answers of a given HTML question. The method parses the answers related with a given HTML question, as well as all the comments related to the answer. :param html_question: raw HTML question element :returns: a list with the answers
def callback_read_char(self): u if self.keyboard_poll(): line = self.get_line_buffer() + u self.add_history(self.mode.l_buffer) self.callback(line)
u'''Reads a character and informs the readline callback interface when a line is received
def pull_raw(url, name, verify=False): return _pull_image(, url, name, verify=verify)
Execute a ``machinectl pull-raw`` to download a .qcow2 or raw disk image, and add it to /var/lib/machines as a new container. .. note:: **Requires systemd >= 219** url URL from which to download the container name Name for the new container verify : False Perform signature or checksum verification on the container. See the ``machinectl(1)`` man page (section titled "Image Transfer Commands") for more information on requirements for image verification. To perform signature verification, use ``verify=signature``. For checksum verification, use ``verify=checksum``. By default, no verification will be performed. CLI Examples: .. code-block:: bash salt myminion nspawn.pull_raw http://ftp.halifax.rwth-aachen.de/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.raw.xz fedora21
def countdown_timer(seconds=10): tick = 0.1 n_ticks = int(seconds / tick) widgets = [, progressbar.ETA(), , progressbar.Bar()] pbar = progressbar.ProgressBar( widgets=widgets, max_value=n_ticks ).start() for i in range(n_ticks): pbar.update(i) sleep(tick) pbar.finish()
Show a simple countdown progress bar Parameters ---------- seconds Period of time the progress bar takes to reach zero.
def variance(self): alpha = self.__success + self.__default_alpha beta = self.__failure + self.__default_beta try: variance = alpha * beta / ((alpha + beta) ** 2) * (alpha + beta + 1) except ZeroDivisionError: variance = 0.0 return variance
Compute variance. Returns: variance.
def _check_field_value(field_value, pattern): if isinstance(field_value, list): return any(re.search(pattern, str(value), re.I) for value in field_value) else: return re.search(pattern, str(field_value), re.I)
Check a song metadata field value for a pattern.
def es_query_template(path): if not is_text(path): Log.error("expecting path to be a string") if path != ".": f0 = {} f1 = {} output = wrap({ "query": es_and([ f0, {"nested": { "path": path, "query": f1, "inner_hits": {"size": 100000} }} ]), "from": 0, "size": 0, "sort": [] }) return output, wrap([f0, f1]) else: f0 = {} output = wrap({ "query": es_and([f0]), "from": 0, "size": 0, "sort": [] }) return output, wrap([f0])
RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE :param path: THE NESTED PATH (NOT INCLUDING TABLE NAME) :return: (es_query, es_filters) TUPLE
def TaxonomicAmendmentStore(repos_dict=None, repos_par=None, with_caching=True, assumed_doc_version=None, git_ssh=None, pkey=None, git_action_class=TaxonomicAmendmentsGitAction, mirror_info=None, infrastructure_commit_author=): global _THE_TAXONOMIC_AMENDMENT_STORE if _THE_TAXONOMIC_AMENDMENT_STORE is None: _THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore(repos_dict=repos_dict, repos_par=repos_par, with_caching=with_caching, assumed_doc_version=assumed_doc_version, git_ssh=git_ssh, pkey=pkey, git_action_class=git_action_class, mirror_info=mirror_info, infrastructure_commit_author=infrastructure_commit_author) return _THE_TAXONOMIC_AMENDMENT_STORE
Factory function for a _TaxonomicAmendmentStore object. A wrapper around the _TaxonomicAmendmentStore class instantiation for the most common use case: a singleton _TaxonomicAmendmentStore. If you need distinct _TaxonomicAmendmentStore objects, you'll need to call that class directly.
def cli(parser): parser.add_argument(, , action=, help=) parser.add_argument(, nargs=, help=) opts = parser.parse_args() for package in opts.packages: install(package, execute=not opts.dry_run)
Currently a cop-out -- just calls easy_install
def _get_item_cache(self, item): cache = self._item_cache res = cache.get(item) if res is None: values = self._data.get(item) res = self._box_item_values(item, values) cache[item] = res res._set_as_cached(item, self) res._is_copy = self._is_copy return res
Return the cached item, item represents a label indexer.
def meta_set(self, key, metafield, value): self._meta.setdefault(key, {})[metafield] = value if key in self: self[key] = self[key]
Set the meta field for a key to a new value. This triggers the on-change handler for existing keys.
def bbox_transform(ex_rois, gt_rois, box_stds): assert ex_rois.shape[0] == gt_rois.shape[0], ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0 ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0 ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0) ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0) gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0 gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0 gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0) gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0) targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14) / box_stds[0] targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14) / box_stds[1] targets_dw = np.log(gt_widths / ex_widths) / box_stds[2] targets_dh = np.log(gt_heights / ex_heights) / box_stds[3] targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose() return targets
compute bounding box regression targets from ex_rois to gt_rois :param ex_rois: [N, 4] :param gt_rois: [N, 4] :return: [N, 4]
def p_sysargs(self, p): p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
sysargs : sysargs COMMA sysarg
def get_winner_number(self): if self.__ccore_som_pointer is not None: self._award = wrapper.som_get_awards(self.__ccore_som_pointer) winner_number = 0 for i in range(self._size): if self._award[i] > 0: winner_number += 1 return winner_number
! @brief Calculates number of winner at the last step of learning process. @return (uint) Number of winner.
def call_on_each_endpoint(self, callback): if not in self.swagger_dict: return for path, d in list(self.swagger_dict[].items()): for method, op_spec in list(d.items()): data = EndpointData(path, method) if not in op_spec: if in op_spec: log.info("Skipping generation of %s %s" % (method, path)) continue else: raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path)) data.handler_server = op_spec[] if not in op_spec: raise Exception("Swagger api has no section for %s %s" % (method, path)) if len(op_spec[]) != 1: raise Exception("Expecting only one type under for %s %s" % (method, path)) if op_spec[][0] == : data.produces_json = True elif op_spec[][0] == : data.produces_html = True else: raise Exception("Only or are supported. See %s %s" % (method, path)) if in op_spec: data.handler_client = op_spec[] if in op_spec: data.decorate_server = op_spec[] if in op_spec: data.decorate_request = op_spec[] data.operation = Operation.from_spec(self.spec, path, method, op_spec) if in op_spec: params = op_spec[] for p in params: if p[] == : data.param_in_body = True if p[] == : data.param_in_query = True if p[] == : data.param_in_path = True if data.param_in_path: data.path = data.path.replace(, ).replace(, ) if data.param_in_body and data.param_in_query: raise Exception("Cannot support params in both body and param (%s %s)" % (method, path)) else: data.no_params = True callback(data)
Find all server endpoints defined in the swagger spec and calls 'callback' for each, with an instance of EndpointData as argument.
def solve(self, reaction_1, reaction_2): self._prob.set_objective(self._vbow(reaction_1)) if self._reaction_constr is not None: self._reaction_constr.delete() self._reaction_constr, = self._prob.add_linear_constraints( self._vbow(reaction_2) == 1) results = [] for sense in (lp.ObjectiveSense.Minimize, lp.ObjectiveSense.Maximize): try: result = self._prob.solve(sense) except lp.SolverError: results.append(None) else: results.append(result.get_value(self._vbow(reaction_1))) return tuple(results)
Return the flux coupling between two reactions The flux coupling is returned as a tuple indicating the minimum and maximum value of the v1/v2 reaction flux ratio. A value of None as either the minimum or maximum indicates that the interval is unbounded in that direction.
def clear_alarms(alarm): * if _TRAFFICCTL: cmd = _traffic_ctl(, , alarm) else: cmd = _traffic_line(, alarm) return _subprocess(cmd)
Clear (acknowledge) an alarm event. The arguments are “all” for all current alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier (e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘). .. code-block:: bash salt '*' trafficserver.clear_alarms [all | #event | name]
def uinit(self, ushape): if self.opt[] is None: return np.zeros(ushape, dtype=self.dtype) else: return np.zeros(ushape, dtype=self.dtype)
Return initialiser for working variable U.
def _make_operator(method_name): def operator(self, other): float_operator = getattr(float, method_name) try: float_other = float(other) except (ValueError, TypeError): return False return float_operator(float(self), float_other) return operator
Return an operator method that takes parameters of type :class:`Dimension`, evaluates them, and delegates to the :class:`float` operator with name `method_name`
def get_out_ip_addr(cls, tenant_id): if tenant_id not in cls.serv_obj_dict: LOG.error("Fabric not prepared for tenant %s", tenant_id) return tenant_obj = cls.serv_obj_dict.get(tenant_id) return tenant_obj.get_out_ip_addr()
Retrieves the 'out' service subnet attributes.
def destroy(self): for k in list(self.keys()): del self[k] if getattr(self, , None): current_app.kvsession_store.delete(self.sid_s) self.sid_s = None self.modified = False self.new = False
Destroys a session completely, by deleting all keys and removing it from the internal store immediately. This allows removing a session for security reasons, e.g. a login stored in a session will cease to exist if the session is destroyed.
def spread_value(value: Decimal, spread_p: Decimal) -> Tuple[Decimal, Decimal]: upper = value * (1 + spread_p) lower = value / (1 + spread_p) return lower, upper
Returns a lower and upper value separated by a spread percentage
def prepare_relationship(config, model_name, raml_resource): if get_existing_model(model_name) is None: plural_route = + pluralize(model_name.lower()) route = + model_name.lower() for res in raml_resource.root.resources: if res.method.upper() != : continue if res.path.endswith(plural_route) or res.path.endswith(route): break else: raise ValueError( .format(model_name)) setup_data_model(config, res, model_name)
Create referenced model if it doesn't exist. When preparing a relationship, we check to see if the model that will be referenced already exists. If not, it is created so that it will be possible to use it in a relationship. Thus the first usage of this model in RAML file must provide its schema in POST method resource body schema. :param model_name: Name of model which should be generated. :param raml_resource: Instance of ramlfications.raml.ResourceNode for which :model_name: will be defined.
def new_rsa_key(key_size=2048, kid=, use=, public_exponent=65537): _key = rsa.generate_private_key(public_exponent=public_exponent, key_size=key_size, backend=default_backend()) _rk = RSAKey(priv_key=_key, use=use, kid=kid) if not kid: _rk.add_kid() return _rk
Creates a new RSA key pair and wraps it in a :py:class:`cryptojwt.jwk.rsa.RSAKey` instance :param key_size: The size of the key :param kid: The key ID :param use: What the is supposed to be used for. 2 choices 'sig'/'enc' :param public_exponent: The value of the public exponent. :return: A :py:class:`cryptojwt.jwk.rsa.RSAKey` instance
def iter_predecessors(self, graph, dest, branch, turn, tick, *, forward=None): if self.db._no_kc: yield from self._adds_dels_sucpred(self.predecessors[graph, dest], branch, turn, tick)[0] return if forward is None: forward = self.db._forward yield from self._get_origcache(graph, dest, branch, turn, tick, forward=forward)
Iterate over predecessors to a given destination node at a given time.
def bidi(request): from django.utils import translation from django.utils.safestring import mark_safe if translation.get_language_bidi(): extra_context = { :, :, :, : mark_safe(), } else: extra_context = { :, :, :, : mark_safe(), } return extra_context
Adds to the context BiDi related variables LANGUAGE_DIRECTION -- Direction of current language ('ltr' or 'rtl') LANGUAGE_START -- Start of language layout ('right' for rtl, 'left' for 'ltr') LANGUAGE_END -- End of language layout ('left' for rtl, 'right' for 'ltr') LANGUAGE_MARKER -- Language marker entity ('&rlm;' for rtl, '&lrm' for ltr)
def _int_to_pos(self, flat_position): return flat_position % self.env.action_space.screen_shape[0],\ flat_position % self.env.action_space.screen_shape[1]
Returns x, y from flat_position integer. Args: flat_position: flattened position integer Returns: x, y
def _decrypt(self, fp, password=None): if AES is None: raise ImportError("PyCrypto required") if password is None: password = self.password if password is None: raise ValueError( "Password need to be provided to extract encrypted archives") user_salt = fp.readline().strip() user_salt = binascii.a2b_hex(user_salt) ck_salt = fp.readline().strip() ck_salt = binascii.a2b_hex(ck_salt) rounds = fp.readline().strip() rounds = int(rounds) iv = fp.readline().strip() iv = binascii.a2b_hex(iv) master_key = fp.readline().strip() master_key = binascii.a2b_hex(master_key) user_key = PBKDF2(password, user_salt, dkLen=256 // 8, count=rounds) cipher = AES.new(user_key, mode=AES.MODE_CBC, IV=iv) master_key = bytearray(cipher.decrypt(master_key)) l = master_key.pop(0) master_iv = bytes(master_key[:l]) master_key = master_key[l:] l = master_key.pop(0) mk = bytes(master_key[:l]) master_key = master_key[l:] l = master_key.pop(0) master_ck = bytes(master_key[:l]) utf8mk = self.encode_utf8(mk) calc_ck = PBKDF2(utf8mk, ck_salt, dkLen=256//8, count=rounds) assert calc_ck == master_ck cipher = AES.new(mk, mode=AES.MODE_CBC, IV=master_iv) off = fp.tell() fp.seek(0, 2) length = fp.tell() - off fp.seek(off) if self.stream: def decrypt(data): data = bytearray(cipher.decrypt(data)) if fp.tell() - off >= length: pad = data[-1] assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:]) data = data[:-pad] return data return Proxy(decrypt, fp, cipher.block_size) else: data = bytearray(cipher.decrypt(fp.read())) pad = data[-1] assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:]) data = data[:-pad] return io.BytesIO(data)
Internal decryption function Uses either the password argument for the decryption, or, if not supplied, the password field of the object :param fp: a file object or similar which supports the readline and read methods :rtype: Proxy
def array( item_processor, alias=None, nested=None, omit_empty=False, hooks=None ): processor = _Array(item_processor, alias, nested, omit_empty) return _processor_wrap_if_hooks(processor, hooks)
Create an array processor that can be used to parse and serialize array data. XML arrays may be nested within an array element, or they may be embedded within their parent. A nested array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <nested-array> <array-item>0</array-item> <array-item>1</array-item> </nested-array> </root-element> The corresponding embedded array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <array-item>0</array-item> <array-item>1</array-item> </root-element> An array is considered required when its item processor is configured as being required. :param item_processor: A declxml processor object for the items of the array. :param alias: If specified, the name given to the array when read from XML. If not specified, then the name of the item processor is used instead. :param nested: If the array is a nested array, then this should be the name of the element under which all array items are located. If not specified, then the array is treated as an embedded array. Can also be specified using supported XPath syntax. :param omit_empty: If True, then nested arrays will be omitted when serializing if they are empty. Only valid when nested is specified. Note that an empty array may only be omitted if it is not itself contained within an array. That is, for an array of arrays, any empty arrays in the outer array will always be serialized to prevent information about the original array from being lost when serializing. :param hooks: A Hooks object. :return: A declxml processor object.
def rand_ssn(): return "%s-%s-%s" % (rand_str(3, string.digits), rand_str(2, string.digits), rand_str(4, string.digits))
Random SSN. (9 digits) Example:: >>> rand_ssn() 295-50-0178
def update(self): parensemble = self.analysis_evensen() obsensemble = self.forecast(parensemble=parensemble) if True: self.obsensemble = obsensemble self.parensemble = parensemble self.iter_num += 1
update performs the analysis, then runs the forecast using the updated self.parensemble. This can be called repeatedly to iterate...
def create_widget(self, place, type, file=None, **kwargs): widget_class = self.widget_types.get(type, self.widget_types[]) kwargs.update(place=place, type=type) try: element = widget_class(**kwargs) except TypeError as e: message = e.args[0] if e.args else if ( in message or in message ): raise WidgetParameterException( % (type, message, widget_class._fields) ) raise e if file and any(map(callable, element)): return self._resolve_widget(file, element) return element
Create a widget object based on given arguments. If file object is provided, callable arguments will be resolved: its return value will be used after calling them with file as first parameter. All extra `kwargs` parameters will be passed to widget constructor. :param place: place hint where widget should be shown. :type place: str :param type: widget type name as taken from :attr:`widget_types` dict keys. :type type: str :param file: optional file object for widget attribute resolving :type type: browsepy.files.Node or None :returns: widget instance :rtype: object
def export_element(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="", who="", add_join=False): node_type = node[1][consts.Consts.type] node_classification = nodes_classification[node[0]] outgoing_flows = node[1].get(consts.Consts.outgoing_flow) if node_type != consts.Consts.parallel_gateway and consts.Consts.default in node[1] \ and node[1][consts.Consts.default] is not None: default_flow_id = node[1][consts.Consts.default] else: default_flow_id = None if BpmnDiagramGraphCsvExport.classification_join in node_classification and not add_join: if node_type == consts.Consts.task or node_type == consts.Consts.subprocess: return node else: outgoing_flow_id = outgoing_flows[0] outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id) outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref]) return outgoing_node else: if node_type == consts.Consts.task: export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name], "Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""}) elif node_type == consts.Consts.subprocess: export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name], "Condition": condition, "Who": who, "Subprocess": "yes", "Terminated": ""}) if BpmnDiagramGraphCsvExport.classification_split in node_classification: next_node = None alphabet_suffix_index = 0 for outgoing_flow_id in outgoing_flows: outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id) outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref]) suffix = string.ascii_lowercase[alphabet_suffix_index] next_prefix = prefix + str(order) + suffix alphabet_suffix_index += 1 if node_type != consts.Consts.parallel_gateway and consts.Consts.name in outgoing_flow[2] \ and outgoing_flow[2][consts.Consts.name] is not None: condition = outgoing_flow[2][consts.Consts.name] else: condition = "" if BpmnDiagramGraphCsvExport.classification_join in nodes_classification[outgoing_node[0]]: export_elements.append( {"Order": next_prefix + str(1), "Activity": "goto " + prefix + str(order + 1), "Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""}) elif outgoing_flow_id == default_flow_id: tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node, nodes_classification, 1, next_prefix, "else", who) if tmp_next_node is not None: next_node = tmp_next_node else: tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node, nodes_classification, 1, next_prefix, condition, who) if tmp_next_node is not None: next_node = tmp_next_node if next_node is not None: return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, next_node, nodes_classification, order=(order + 1), prefix=prefix, who=who, add_join=True) elif len(outgoing_flows) == 1: outgoing_flow_id = outgoing_flows[0] outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id) outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref]) return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node, nodes_classification, order=(order + 1), prefix=prefix, who=who) else: return None
Export a node with "Element" classification (task, subprocess or gateway) :param bpmn_graph: an instance of BpmnDiagramGraph class, :param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that will be used in exported CSV document, :param node: networkx.Node object, :param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels, :param order: the order param of exported node, :param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify the branch :param condition: the condition param of exported node, :param who: the condition param of exported node, :param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV. :return: None or the next node object if the exported node was a gateway join.
def partial_autocorrelation(x, param): max_demanded_lag = max([lag["lag"] for lag in param]) n = len(x) if n <= 1: pacf_coeffs = [np.nan] * (max_demanded_lag + 1) else: if (n <= max_demanded_lag): max_lag = n - 1 else: max_lag = max_demanded_lag pacf_coeffs = list(pacf(x, method="ld", nlags=max_lag)) pacf_coeffs = pacf_coeffs + [np.nan] * max(0, (max_demanded_lag - max_lag)) return [("lag_{}".format(lag["lag"]), pacf_coeffs[lag["lag"]]) for lag in param]
Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and :math:`x_{t-k}`, adjusted for the intermediate variables :math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]). Following [2], it can be defined as .. math:: \\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})} {\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}} with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})` being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`. It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p` and zero for `k>p`." With this property, it is used to determine the lag of an AR-Process. .. rubric:: References | [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015). | Time series analysis: forecasting and control. John Wiley & Sons. | [2] https://onlinecourses.science.psu.edu/stat510/node/62 :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned :type param: list :return: the value of this feature :return type: float
def get(self, key=None, **kwargs): clone = copy.deepcopy(self) if self._start: clone.adapter.set_params(start=self._start) if self._rows: clone.adapter.set_params(rows=self._rows) if key: data, key = clone.adapter.get(key) elif kwargs: data, key = clone.filter(**kwargs).adapter.get() else: data, key = clone.adapter.get() if clone._cfg[] == ReturnType.Object: return data, key return self._make_model(data, key)
Ensures that only one result is returned from DB and raises an exception otherwise. Can work in 3 different way. - If no argument is given, only does "ensuring about one and only object" job. - If key given as only argument, retrieves the object from DB. - if query filters given, implicitly calls filter() method. Raises: MultipleObjectsReturned: If there is more than one (1) record is returned.
def listar_por_equipamento(self, id_equipment): if not is_valid_int_param(id_equipment): raise InvalidParameterError( u) url = + str(id_equipment) + code, map = self.submit(None, , url) key = return get_list_map(self.response(code, map, [key]), key)
List all Script related Equipment. :param id_equipment: Identifier of the Equipment. Integer value and greater than zero. :return: Dictionary with the following structure: :: {script': [{‘id’: < id >, ‘nome’: < nome >, ‘descricao’: < descricao >, ‘id_tipo_roteiro’: < id_tipo_roteiro >, ‘nome_tipo_roteiro’: < nome_tipo_roteiro >, ‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]} :raise InvalidParameterError: The identifier of Equipment is null and invalid. :raise EquipamentoNaoExisteError: Equipment not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def enter_event_loop(self): patterns = self.dispatch_patterns() self.logger.debug() while 1: try: data = self._sock_file.readline() except socket.error: data = None if not data: self.logger.info() self.close() return True data = data.rstrip() for pattern, callback in patterns: match = pattern.match(data) if match: callback(**match.groupdict())
\ Main loop of the IRCConnection - reads from the socket and dispatches based on regex matching
def create_page(cls, webdriver=None, **kwargs): if not webdriver: webdriver = WTF_WEBDRIVER_MANAGER.get_driver() return PageFactory.create_page(cls, webdriver=webdriver, **kwargs)
Class method short cut to call PageFactory on itself. Use it to instantiate this PageObject using a webdriver. Args: webdriver (Webdriver): Instance of Selenium Webdriver. Returns: PageObject Raises: InvalidPageError
def indicate_fulfillment_ficon(self, control_unit, unit_address): unit_address_2 = format(int(unit_address, 16), ) body = { : control_unit.uri, : unit_address_2, } self.manager.session.post( self.uri + , body=body)
TODO: Add ControlUnit objects etc for FICON support. Indicate completion of :term:`fulfillment` for this ECKD (=FICON) storage volume and provide identifying information (control unit and unit address) about the actual storage volume on the storage subsystem. Manually indicating fulfillment is required for all ECKD volumes, because they are not auto-discovered by the CPC. This method performs the "Fulfill FICON Storage Volume" HMC operation. Upon successful completion of this operation, the "fulfillment-state" property of this storage volume object will have been set to "complete". That is necessary for the CPC to be able to address and connect to the volume. If the "fulfillment-state" properties of all storage volumes in the owning storage group are "complete", the owning storage group's "fulfillment-state" property will also be set to "complete". Parameters: control_unit (:class:`~zhmcclient.ControlUnit`): Logical control unit (LCU) in which the backing ECKD volume is defined. unit_address (:term:`string`): Unit address of the backing ECKD volume within its logical control unit, as a hexadecimal number of up to 2 characters in any lexical case. Authorization requirements: * Object-access permission to the storage group owning this storage volume. * Task permission to the "Configure Storage - Storage Administrator" task. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def bread(stream): if hasattr(stream, "read"): return bdecode(stream.read()) else: handle = open(stream, "rb") try: return bdecode(handle.read()) finally: handle.close()
Decode a file or stream to an object.
def minowski(h1, h2, p = 2): r h1, h2 = __prepare_histogram(h1, h2) if 0 == p: raise ValueError() elif int == type(p): if p > 0 and p < 25: return __minowski_low_positive_integer_p(h1, h2, p) elif p < 0 and p > -25: return __minowski_low_negative_integer_p(h1, h2, p) return math.pow(scipy.sum(scipy.power(scipy.absolute(h1 - h2), p)), 1./p)
r""" Minowski distance. With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance, and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`. The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p \right)^{\frac{1}{p}} *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \sqrt[p]{2}]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. p : float The :math:`p` value in the Minowksi distance formula. Returns ------- minowski : float Minowski distance. Raises ------ ValueError If ``p`` is zero.
def missing_values(self): s hand, based on when they have passed. :return: a list of sets, each one containing the values that must be missing from the corresponding player missing = [set() for _ in self.hands] board = dominoes.SkinnyBoard() player = self.starting_player for move in self.moves: if move is None: missing[player].update([board.left_end(), board.right_end()]) else: board.add(*move) player = next_player(player) return missing
Computes the values that must be missing from each player's hand, based on when they have passed. :return: a list of sets, each one containing the values that must be missing from the corresponding player's hand
def is_defined_by_module(item, module, parent=None): flag = False if isinstance(item, types.ModuleType): if not hasattr(item, ): try: import utool as ut name = ut.get_modname_from_modpath(module.__file__) flag = name in str(item) except: flag = False else: item_modpath = os.path.realpath(dirname(item.__file__)) mod_fpath = module.__file__.replace(, ) if not mod_fpath.endswith(): flag = False else: modpath = os.path.realpath(dirname(mod_fpath)) modpath = modpath.replace(, ) flag = item_modpath.startswith(modpath) elif hasattr(item, ): orig_func = item._utinfo[] flag = is_defined_by_module(orig_func, module, parent) else: if isinstance(item, staticmethod): item = item.__func__ try: func_globals = meta_util_six.get_funcglobals(item) func_module_name = func_globals[] if func_module_name == : valid_names = dir(module) if parent is not None: valid_names += dir(parent) if item.func_name in valid_names: if len(item.func_name) > 6: flag = True elif func_module_name == module.__name__: flag = True except AttributeError: if hasattr(item, ): flag = item.__module__ == module.__name__ return flag
Check if item is directly defined by a module. This check may be prone to errors.
def describeSObject(self, sObjectsType): self._setHeaders() return self._sforce.service.describeSObject(sObjectsType)
Describes metadata (field list and object properties) for the specified object.
def can_start_on_cluster(nodes_status, nodes, start, walltime): candidates = [] for node, status in nodes_status.items(): reservations = status.get("reservations", []) overlapping_reservations = [] for reservation in reservations: queue = reservation.get("queue") if queue == "besteffort": continue r_start = reservation.get("started_at", reservation.get("scheduled_at")) if r_start is None: break r_start = int(r_start) r_end = r_start + int(reservation["walltime"]) _intersect = min(r_end, start + walltime) - max(r_start, start) if _intersect > 0: overlapping_reservations.append(reservation) if len(overlapping_reservations) == 0: candidates.append(node) if len(candidates) >= nodes: return True return False
Check if #nodes can be started on a given cluster. This is intended to give a good enough approximation. This can be use to prefiltered possible reservation dates before submitting them on oar.
def new_from_tokens(self, *args, **kwargs): if kwargs.get(): args = [ x.strip() for x in args if not re.match(r, x) ] else: args = [ x.strip() for x in args if not re.match(r, x) ] if len(args) > 2: self.detect_and_fix_two_part_surname(args) self.first = self.last = if len(args): if self.is_an_honorific(args[-1]): self.honorific = args.pop() if not self.honorific[-1] == : self.honorific += if self.is_a_suffix(args[-1]): self.suffix = args.pop() if re.match(r, self.suffix, re.IGNORECASE): self.suffix += if self.is_a_nickname(args[-1]): self.nick = args.pop() self.last = args.pop() num_remaining_parts = len(args) if num_remaining_parts == 3: return self
Takes in a name that has been split by spaces. Names which are in [last, first] format need to be preprocessed. The nickname must be in double quotes to be recognized as such. This can take name parts in in these orders: first, middle, last, nick, suffix, honorific first, middle, last, nick, suffix first, middle, last, suffix, honorific first, middle, last, honorific first, middle, last, suffix first, middle, last, nick first, last, honorific first, last, suffix first, last, nick first, middle, last first, last last
def write(self, *data): for l in data: for i in str(l): if i == : self.current_line_number += 1 pass pass pass return super(LineMapWalker, self).write(*data)
Augment write routine to keep track of current line
def remove_not_valid_selections(self): value = [v for v in self.get_value() if self.valid_selection(v)] self.set_value(value)
update the value to remove any that are (no longer) valid
def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays): dcmlist = dcmfilelist for i in range(len(dcmlist)): onefile = dcmlist[i] logger.info(onefile) data = dicom.read_file(onefile) for i_overlay in overlays.keys(): overlay3d = overlays[i_overlay] data = self.encode_overlay_slice(data, overlay3d[-1 - i, :, :], i_overlay) head, tail = os.path.split(os.path.normpath(onefile)) filename_out = os.path.join(out_dir, tail) data.save_as(filename_out)
Function make 3D data from dicom file slices :dcmfilelist list of sorted .dcm files :overlays dictionary of binary overlays. {1:np.array([...]), 3:...} :out_dir output directory
def cluster(self, window): pivot_ifo = self.attrs[] fixed_ifo = self.attrs[] if len(self.data[ % pivot_ifo]) == 0 or len(self.data[ % fixed_ifo]) == 0: return self from pycbc.events import cluster_coincs interval = self.attrs[] cid = cluster_coincs(self.stat, self.data[ % pivot_ifo], self.data[ % fixed_ifo], self.timeslide_id, interval, window) return self.select(cid)
Cluster the dict array, assuming it has the relevant Coinc colums, time1, time2, stat, and timeslide_id
def p_namespace(self, p): p[0] = ast.Namespace(scope=p[2], name=p[3], lineno=p.lineno(1))
namespace : NAMESPACE namespace_scope IDENTIFIER
def search_all(self, quiet=False): t require a query Parameters ========== quiet: if quiet is True, we only are using the function to return rows of results. sizemb%sMBsizemb'] results.append([obj.key, datestr, size ]) if len(results) == 0: bot.info("No container collections found.") sys.exit(1) if not quiet: bot.info("Containers") bot.table(results) return results
a "show all" search that doesn't require a query Parameters ========== quiet: if quiet is True, we only are using the function to return rows of results.
def get_topclasses(cls): bases = [c for c in inspect.getmro(cls) if c.__module__.startswith() and c != cls] return .join([.format(c.__module__, c.__name__) for c in bases])
Gets the base classes that are in pycbc.
def linear_set_layer(layer_size, inputs, context=None, activation_fn=tf.nn.relu, dropout=0.0, name=None): with tf.variable_scope( name, default_name="linear_set_layer", values=[inputs]): outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv") if context is not None: if len(context.get_shape().as_list()) == 2: context = tf.expand_dims(context, axis=1) cont_tfm = conv1d( context, layer_size, 1, activation=None, name="cont_conv") outputs += cont_tfm if activation_fn is not None: outputs = activation_fn(outputs) if dropout != 0.0: outputs = tf.nn.dropout(outputs, 1.0 - dropout) return outputs
Basic layer type for doing funky things with sets. Applies a linear transformation to each element in the input set. If a context is supplied, it is concatenated with the inputs. e.g. One can use global_pool_1d to get a representation of the set which can then be used as the context for the next layer. TODO: Add bias add (or control the biases used). Args: layer_size: Dimension to transform the input vectors to. inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. context: A tensor of shape [batch_size, context_dims] containing a global statistic about the set. activation_fn: The activation function to use. dropout: Dropout probability. name: name. Returns: Tensor of shape [batch_size, sequence_length, output_dims] containing the sequences of transformed vectors.
def _read_para_puzzle(self, code, cbit, clen, *, desc, length, version): if version == 1 and clen != 12: raise ProtocolError(f) _numk = self._read_unpack(1) _time = self._read_unpack(1) _opak = self._read_fileng(2) _rand = self._read_unpack(clen-4) puzzle = dict( type=desc, critical=cbit, length=clen, number=_numk, lifetime=2 ** (_time - 32), opaque=_opak, random=_rand, ) _plen = length - clen if _plen: self._read_fileng(_plen) return puzzle
Read HIP PUZZLE parameter. Structure of HIP PUZZLE parameter [RFC 5201][RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | #K, 1 byte | Lifetime | Opaque, 2 bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Random #I, RHASH_len / 8 bytes | / / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 puzzle.type Parameter Type 1 15 puzzle.critical Critical Bit 2 16 puzzle.length Length of Contents 4 32 puzzle.number Number of Verified Bits 5 40 puzzle.lifetime Lifetime 6 48 puzzle.opaque Opaque 8 64 puzzle.random Random Number
def xline(self): if self.unstructured: raise ValueError(self._unstructured_errmsg) if self._xline is not None: return self._xline self._xline = Line(self, self.xlines, self._xline_length, self._xline_stride, self.offsets, , ) return self._xline
Interact with segy in crossline mode Returns ------- xline : Line or None Raises ------ ValueError If the file is unstructured Notes ----- .. versionadded:: 1.1
def makeelement(E, tagname, **kwargs): if sys.version < : try: kwargs2 = {} for k,v in kwargs.items(): kwargs2[k.encode()] = v.encode() return E._makeelement(tagname.encode(), **kwargs2 ) except ValueError as e: try: e = E._makeelement(tagname.encode()) for k,v in kwargs.items(): e.attrib[k.encode()] = v return e except ValueError: print(e,file=stderr) print("tagname=",tagname,file=stderr) print("kwargs=",kwargs,file=stderr) raise e else: return E._makeelement(tagname,**kwargs)
Internal function
def zero_pad_features(features: List[np.ndarray], target_shape: tuple) -> List[np.ndarray]: pad_features = [] for feature in features: feature_shape = feature.shape if len(feature_shape) < len(target_shape): for i in range(len(target_shape) - len(feature_shape)): feature = np.expand_dims(feature, axis=len(feature.shape) + 1) feature_shape = feature.shape elif len(feature_shape) > len(target_shape): raise ValueError("Provided target shape must be bigger then the original " "shape. (provided: {}, original {})".format(len(target_shape), len(feature_shape))) diff_shape = np.subtract(target_shape, feature_shape) if np.any(diff_shape < 0): raise ValueError("Provided target values must be bigger then the original " "values for each dimension. (provided: {}, original {})".format(target_shape, feature_shape)) diff_shape = [[0, d] for d in diff_shape] p = np.pad(feature, diff_shape, , constant_values=0) pad_features.append(p) return pad_features
Zero pad to numpy array. :param features: List of numpy arrays. :param target_shape: Target shape of each numpy array in the list feat. Note: target_shape should be greater that the largest shapes in feat. :return: A list of padded numpy arrays.
def clickable(self): if not isinstance(self.submenu,Container): return self.submenu.name == self.submenu.menu.activeSubMenu and self.submenu.menu.name == self.window.activeMenu and self.enabled else: return self.submenu.clickable and self.enabled
Property used for determining if the widget should be clickable by the user. This is only true if the submenu of this widget is active and this widget is enabled. The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute.
def gen_random_company_name(): region_info = ("北京,上海,广州,深圳,天津,成都,杭州,苏州,重庆,武汉,南京,大连,沈阳,长沙,郑州,西安,青岛," "无锡,济南,宁波,佛山,南通,哈尔滨,东莞,福州,长春,石家庄,烟台,合肥,唐山,常州,太原,昆明," "潍坊,南昌,泉州,温州,绍兴,嘉兴,厦门,贵阳,淄博,徐州,南宁,扬州,呼和浩特,鄂尔多斯,乌鲁木齐," "金华,台州,镇江,威海,珠海,东营,大庆,中山,盐城,包头,保定,济宁,泰州,廊坊,兰州,洛阳,宜昌," "沧州,临沂,泰安,鞍山,邯郸,惠州,江门,襄阳,湖州,吉林,芜湖,德州,聊城,漳州,株洲,淮安,榆林," "常德,咸阳,衡阳,滨州,柳州,遵义,菏泽,南阳,新乡,湛江,岳阳,郴州,许昌,连云港,枣庄,茂名,周口," "宿迁") middle_word = ("泰宏本晶辉昌昌本同永康洪皇贵久圣正裕如恒长佳协义晶合优荣汇洪千东祥复昌皇久丰兴昌国裕亚大" "荣康通仁元裕厚瑞如弘升久隆旺吉德谦长贵百久汇百伟升隆复飞佳隆浩发丰亨公荣复光福美禄欣丰大" "祥晶宏中仁宏华隆盈旺仁顺春满美中谦瑞和圣多信合盛千亚晶祥鑫隆飞鑫优合本旺发久国汇百恒佳东" "洪通恒大公中优广宝盈泰如合丰捷本伟华春元亚广中晶如浩仁汇亚永凯富富裕茂华中飞浩台美佳圣仁" "成全润金庆百贵康仁茂皇东广荣宏荣新元康公升亨洪福伟永义巨国升进合耀巨润巨元发洪源寿仁发光" "顺升凯全全辉欣成公裕康合禄兴汇顺浩贵晶捷东飞益福宏国禄元昌弘和满发巨宝生耀隆大欣昌佳本兴" "吉生宝凯润新高和元亨巨久光益旺春巨鑫进东晶中飞兴中美丰同晶复耀进洪全兴汇宝捷伟仁安宏多庆" "益生和干干福亚新复吉亚恒亚春德飞伟利庆华丰宏合德瑞进顺祥信合康富益全巨茂台谦厚台成福捷浩" "信长飞长金利美昌满丰干佳美金洪昌富千和美旺旺晶春仁华中凯浩鼎泰辉新干高进辉同欣广庆吉益德" "浩中润和春元生高进皇茂利同盈复复晶多巨圣弘捷公宝汇鑫成高新正和和巨祥光宏大丰欣恒昌昌厚合" "庆泰丰干益和金洪复元顺捷金万辉全吉庆德瑞优长鼎顺汇顺欣飞浩荣祥光泰多春凯信进公优飞昌协美" "多发中盈协成祥益昌汇泰春满千鼎东光优谦仁中飞生恒伟福晶宝信辉金皇升飞亨鑫安伟华元旺益大寿" "皇元康耀久荣满协信凯谦宝巨丰正光发康康捷中源国多多康公利顺光辉如茂晶永大高成生裕裕和万干" "飞全洪伟同发禄升欣盈高欣谦亨裕康宝复庆光皇源凯凯圣发东本辉寿捷茂和庆丰多宏亚万益公福捷升" "福茂宝捷同复合隆中汇禄鑫中新德昌新大皇安东信瑞元皇皇洪瑞弘捷本鑫中亨亚广昌永宏润同成高利" "台中生如百康旺巨福德春元通国成浩永康泰盛泰利生茂巨久昌佳复富隆通盈同庆皇顺如辉全旺捷皇长" "全富广源恒鼎顺汇本百洪鼎进欣吉凯汇欣义东长禄捷浩益旺复弘昌生发伟荣高亨元聚广新复多富千中" "兴佳升康成同贵宝生捷晶全泰全永旺发富康仁兴谦利茂亨洪佳洪元鼎全国本丰亨鑫弘富干寿春贵国成" "盛大发久弘国大金生久高久益浩晶盈益瑞正丰百浩泰台合德昌昌美皇合隆裕东广亚国升益福旺高贵信" "生汇多泰元厚瑞飞千顺盛如大德润新新顺润飞瑞优源宏千盛吉高大耀进信欣信利瑞荣升亨盈盛千合复" "隆贵丰义公优荣宏广福华洪洪捷吉进盛盛裕国洪浩祥晶弘吉欣鼎德佳成和满台光复汇佳通浩昌欣康瑞" "万亚谦兴福利千元皇瑞润禄信合长润捷中旺成金益公隆宏康亚禄隆通光广国义中优多富复盛庆千长永" "国源安永千中正康发复协利皇亚协鑫义巨源中润旺高进巨新高协兴生福恒富国协捷盛同复巨千益长洪" "亚欣美复康洪全高安进千汇通益美耀美台耀万康合洪禄中宏百凯华优鑫协泰兴裕欣进安茂丰光飞全飞" "高康进同大洪永祥飞美满兴丰谦和鑫贵百洪通裕升干永升亨光德盛永金东鼎永裕佳和德仁荣辉同瑞恒" "聚谦长广鑫金久庆国吉禄弘顺汇恒汇瑞隆洪光鼎复公鼎泰盛佳恒鼎中飞聚亚宏盈光安谦成合巨洪飞庆" "久瑞正茂信协百生盛合国圣盛同同盈信宏禄仁大中皇宝德金台优长成成亚盛公美荣成昌久禄泰亚进台" "辉佳凯安久本荣飞晶隆晶弘同丰辉华高光兴庆贵如耀飞仁宏欣皇洪宏金满鼎耀巨义德昌源中洪裕祥晶" "本国金洪昌金源恒福万义久多谦高佳欣和凯本泰春贵大浩永寿昌禄金弘仁美久升亨辉久茂皇弘泰德成" "宏美辉辉禄仁华晶春干圣长同耀光庆华晶生新辉鑫金满中千谦瑞祥昌茂复长新祥祥福同优佳恒千如兴" "裕华凯康全贵巨旺祥捷厚贵富宏义盛谦同盛同益谦润东广千进辉升复昌聚吉飞飞元公台本华升美久长" "庆亚升东正高弘亚庆和寿宏满万优伟浩新合聚庆万广寿东恒光圣润同高谦昌兴义仁安本捷公进康益金" "庆正进正千辉和升本益高广中百新庆金同如鼎寿茂鼎庆茂瑞全禄辉美贵优丰益同信兴聚浩新协宝耀圣" "晶盈飞安荣富千祥成源裕合兴佳裕旺金长禄亨本大德成亨皇通全华贵弘成福聚信福光盛丰满宏福益国" "弘生弘源新万泰成生伟兴兴辉和大元和协通千宝协伟荣长禄晶盛欣隆新本复正盛和皇升万益高盈义裕" "成仁巨弘千亚耀吉庆厚国新高利和润中捷亚信百合亨佳佳多信鑫永复公千佳捷元东宝协大贵本满泰长" "协耀圣仁旺生干盛恒义多宏益协润长皇伟晶茂大辉谦多台高恒巨兴辉台华升满公升成元利利厚隆裕厚" "高公通浩凯金皇庆新发宏大本谦升欣升华益巨益百辉亨辉成欣庆同晶瑞义久成佳利优进满康信盈东盛" "华义公贵美宝信丰正谦旺华皇吉如鑫泰协全优福寿中生厚成生亚公弘顺千信祥和圣金华康德台顺全厚" "协亨美万瑞美东飞万飞如长仁高全汇升宏利吉泰益发谦亚汇亚恒耀恒飞浩益通捷亨新恒百佳中成公圣" "宏满鑫成旺禄元福凯百永东源庆耀万鼎公春昌广润全聚德旺洪隆宝伟亨合满隆进升盛东正新多进浩康" "长合大耀和美厚如寿鑫禄德仁发庆光通义荣盈昌升荣优华国成欣大宏丰光亚复万光春鼎汇旺和辉辉伟" "捷汇通寿耀益皇盛晶隆义同合益春通万飞弘如安信本利安复协庆吉新永久公鑫广同富源公宏台长辉耀" "光千佳宝康祥盛富升顺亚吉皇美润仁广仁台瑞干隆美信优伟安生如成耀盛润升正升新公荣宏恒洪圣泰" "弘升美益顺隆大生新茂复丰亚华恒仁弘富公美昌干永满汇如洪昌荣飞新谦万百丰进宝禄贵千生进大润" "禄祥公金祥聚兴和旺盈晶百义协巨顺裕中发千辉亨美本元丰金盈盛新全国源和协富谦发万耀福大发浩" "隆正宏升弘旺长德百发鼎金满春新成新台正弘润晶大盈茂厚富泰通厚协百源复广恒欣合圣本巨复多正" "伟润高满凯仁凯高禄万本复信满德升茂金如富谦旺佳美盈千发宝禄进兴鼎丰圣广公进昌东润进优祥生" "辉茂安顺正伟圣宝优庆厚新益亚鑫皇浩兴顺多生寿金益千丰旺义东光庆泰全协吉兴千瑞丰兴茂泰庆捷" "丰升弘茂鼎润复永发多成美聚福贵合光亚聚庆大大万顺贵进光国顺飞耀佳合巨洪源祥聚百汇兴本洪荣" "利春庆协成昌瑞同厚春百光国如升同仁佳合成复凯佳汇升鼎宝宝进洪和信昌康润源圣巨康同欣浩辉正" "永汇泰禄弘鼎多厚和佳进荣如茂全贵祥飞祥祥汇禄合源盈如和庆利寿旺汇春盈荣洪宏凯宝润如洪金鼎" "聚安和吉宏捷亚伟美洪元吉厚谦吉凯汇晶中义升协吉大益祥中鑫成正盛福满辉成亨福富益洪厚禄佳益" "亨巨圣辉厚皇") service_type = ("咨询,中介,科技服务,文化交流服务,技术服务,信息服务,零售贸易,制造,批发贸易,集团,餐饮服务," "餐饮管理,旅游质询,人事服务") company_type = "股份有限公司,有限责任公司" company_pattern = return company_pattern.format(region_info=random.choice(region_info.split()), middle_word=.join([random.choice(middle_word) for _ in range(random.randint(2, 5))]), service_type=random.choice(service_type.split()), company_type=random.choice(company_type.split()))
随机生成一个公司名称 :returns: * company_name: (string) 银行名称 举例如下:: print('--- gen_random_company_name demo ---') print(gen_random_company_name()) print('---') 输出结果:: --- gen_random_company_name demo --- 上海大升旅游质询有限责任公司 ---
def data(self): try: return self._pyregf_value.data except IOError as exception: raise errors.WinRegistryValueError( .format( self._pyregf_value.name, exception))
bytes: value data as a byte string. Raises: WinRegistryValueError: if the value data cannot be read.
def get_config(self): self.update_network_description() result = dict(self.__dict__) result[] = None result[] = None result[] = None result[] = None return result
serialize to a dict all attributes except model weights Returns ------- dict
def _initialize(self, runtime): if runtime is None: raise NullArgument() if self._my_runtime is not None: raise IllegalState() self._my_runtime = runtime config = runtime.get_configuration() cf_public_key_param_id = Id() cf_private_key_param_id = Id() cf_keypair_id_param_id = Id() cf_private_key_file_param_id = Id() cf_distro_param_id = Id() cf_distro_id_param_id = Id() s3_public_key_param_id = Id() s3_private_key_param_id = Id() s3_bucket_param_id = Id() cf_public_key = config.get_value_by_parameter(cf_public_key_param_id).get_string_value() cf_private_key = config.get_value_by_parameter(cf_private_key_param_id).get_string_value() cf_keypair_id = config.get_value_by_parameter(cf_keypair_id_param_id).get_string_value() cf_private_key_file = config.get_value_by_parameter( cf_private_key_file_param_id).get_string_value() cf_distro = config.get_value_by_parameter(cf_distro_param_id).get_string_value() cf_distro_id = config.get_value_by_parameter(cf_distro_id_param_id).get_string_value() s3_public_key = config.get_value_by_parameter(s3_public_key_param_id).get_string_value() s3_private_key = config.get_value_by_parameter(s3_private_key_param_id).get_string_value() s3_bucket = config.get_value_by_parameter(s3_bucket_param_id).get_string_value() self._config_map[] = cf_public_key self._config_map[] = cf_private_key self._config_map[] = cf_keypair_id self._config_map[] = cf_private_key_file self._config_map[] = cf_distro self._config_map[] = cf_distro_id self._config_map[] = s3_public_key self._config_map[] = s3_private_key self._config_map[] = s3_bucket
Common initializer for OsidManager and OsidProxyManager
def new(self, rows, columns=None): from riak.ts_object import TsObject return TsObject(self._client, self, rows, columns)
A shortcut for manually instantiating a new :class:`~riak.ts_object.TsObject` :param rows: An list of lists with timeseries data :type rows: list :param columns: An list of Column names and types. Optional. :type columns: list :rtype: :class:`~riak.ts_object.TsObject`
def predict(self, u=0): self.x = dot(self.F, self.x) + dot(self.B, u) _, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T) self._P1_2 = P2[:self.dim_x, :self.dim_x].T self.x_prior = np.copy(self.x) self._P1_2_prior = np.copy(self._P1_2)
Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array, optional Optional control vector. If non-zero, it is multiplied by B to create the control input into the system.
def start(name): * cmd = .format(name) retcode = __salt__[](cmd, python_shell=False) if not retcode: return True if retcode == 3: clear_cmd = .format(name) __salt__[](clear_cmd, python_shell=False) return not __salt__[](cmd, python_shell=False) return False
Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name>
def settings(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v)
Pass a load of settings into the canvas
async def stream_as_text(stream): async for data in stream: if not isinstance(data, six.text_type): data = data.decode(, ) yield data
Given a stream of bytes or text, if any of the items in the stream are bytes convert them to text. This function can be removed once we return text streams instead of byte streams.
def pcpool(name, cvals): name = stypes.stringToCharP(name) lenvals = ctypes.c_int(len(max(cvals, key=len)) + 1) n = ctypes.c_int(len(cvals)) cvals = stypes.listToCharArray(cvals, lenvals, n) libspice.pcpool_c(name, n, lenvals, cvals)
This entry point provides toolkit programmers a method for programmatically inserting character data into the kernel pool. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html :param name: The kernel pool name to associate with cvals. :type name: str :param cvals: An array of strings to insert into the kernel pool. :type cvals: Array of str