code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def depth(self, value): for command in self.subcommands.values(): command.depth = value + 1 del command.argparser._defaults[self.arg_label_fmt % self._depth] command.argparser._defaults[self.arg_label_fmt % value] = command self._depth = value
Update ourself and any of our subcommands.
def load(self, filepath): try: self._config.read(filepath) import ast self.connection.timeout = \ self._config.getint("Connection", "timeout") self.connection.verify = \ self._config.getboolean("Connection", "verify") self.connection.cert = \ self._config.get("Connection", "cert") self.proxies.proxies = \ ast.literal_eval(self._config.get("Proxies", "proxies")) self.proxies.use_env_settings = \ self._config.getboolean("Proxies", "env_settings") self.redirect_policy.allow = \ self._config.getboolean("RedirectPolicy", "allow") self.redirect_policy.max_redirects = \ self._config.getint("RedirectPolicy", "max_redirects") except (ValueError, EnvironmentError, NoOptionError): error = "Supplied config file incompatible." raise_with_traceback(ValueError, error) finally: self._clear_config()
Load configuration from existing file. :param str filepath: Path to existing config file. :raises: ValueError if supplied config file is invalid.
def queryModelIDs(self): jobID = self.getJobID() modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID) modelIDs = tuple(x[0] for x in modelCounterPairs) return modelIDs
Queuries DB for model IDs of all currently instantiated models associated with this HyperSearch job. See also: _iterModels() Parameters: ---------------------------------------------------------------------- retval: A sequence of Nupic modelIDs
def checkppolicy(self, **params): self._check_auth(must_admin=False, redir_login=False) keys = list(params.keys()) if len(keys) != 1: cherrypy.response.status = 400 return "bad argument" password = params[keys[0]] is_admin = self._check_admin() ret = self._checkppolicy(password) if ret[]: cherrypy.response.status = 200 else: cherrypy.response.status = 200 return json.dumps(ret, separators=(, ))
search user page
def metrics(self, *metrics): for m in metrics: self._cauldron.use(self._shelf.find(m, Metric)) self.dirty = True return self
Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list
def get_restore_path(self, status=None): status = self.get_status() if status is None else status return config.get_restore_path(status.name.lower())
get_restore_path: get path to restoration file Args: status (str): step to get restore file (optional) Returns: string path to restoration file
def patch_cluster_custom_object_status(self, group, version, plural, name, body, **kwargs): kwargs[] = True if kwargs.get(): return self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) else: (data) = self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) return data
patch_cluster_custom_object_status # noqa: E501 partially update status of the specified cluster scoped custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param UNKNOWN_BASE_TYPE body: (required) :return: object If the method is called asynchronously, returns the request thread.
def persistent_attributes(self, persistent_attributes): if not self._persistence_adapter: raise AttributesManagerException( "Cannot set PersistentAttributes without persistence adapter!") self._persistence_attributes = persistent_attributes self._persistent_attributes_set = True
Overwrites and caches the persistent attributes value. Note that the persistent attributes will not be saved to persistence layer until the save_persistent_attributes method is called. :param persistent_attributes: attributes in persistence layer :type persistent_attributes: Dict[str, object] :raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException` if trying to set persistent attributes without persistence adapter
def parsed_aggregate_reports_to_csv(reports): def to_str(obj): return str(obj).lower() fields = ["xml_schema", "org_name", "org_email", "org_extra_contact_info", "report_id", "begin_date", "end_date", "errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo", "source_ip_address", "source_country", "source_reverse_dns", "source_base_domain", "count", "disposition", "dkim_alignment", "spf_alignment", "policy_override_reasons", "policy_override_comments", "envelope_from", "header_from", "envelope_to", "dkim_domains", "dkim_selectors", "dkim_results", "spf_domains", "spf_scopes", "spf_results"] csv_file_object = StringIO(newline="\n") writer = DictWriter(csv_file_object, fields) writer.writeheader() if type(reports) == OrderedDict: reports = [reports] for report in reports: xml_schema = report["xml_schema"] org_name = report["report_metadata"]["org_name"] org_email = report["report_metadata"]["org_email"] org_extra_contact = report["report_metadata"]["org_extra_contact_info"] report_id = report["report_metadata"]["report_id"] begin_date = report["report_metadata"]["begin_date"] end_date = report["report_metadata"]["end_date"] errors = "|".join(report["report_metadata"]["errors"]) domain = report["policy_published"]["domain"] adkim = report["policy_published"]["adkim"] aspf = report["policy_published"]["aspf"] p = report["policy_published"]["p"] sp = report["policy_published"]["sp"] pct = report["policy_published"]["pct"] fo = report["policy_published"]["fo"] report_dict = dict(xml_schema=xml_schema, org_name=org_name, org_email=org_email, org_extra_contact_info=org_extra_contact, report_id=report_id, begin_date=begin_date, end_date=end_date, errors=errors, domain=domain, adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo) for record in report["records"]: row = report_dict row["source_ip_address"] = record["source"]["ip_address"] row["source_country"] = record["source"]["country"] row["source_reverse_dns"] = record["source"]["reverse_dns"] row["source_base_domain"] = record["source"]["base_domain"] row["count"] = record["count"] row["disposition"] = record["policy_evaluated"]["disposition"] row["spf_alignment"] = record["policy_evaluated"]["spf"] row["dkim_alignment"] = record["policy_evaluated"]["dkim"] policy_override_reasons = list(map( lambda r: r["type"], record["policy_evaluated"] ["policy_override_reasons"])) policy_override_comments = list(map( lambda r: r["comment"] or "none", record["policy_evaluated"] ["policy_override_reasons"])) row["policy_override_reasons"] = ",".join( policy_override_reasons) row["policy_override_comments"] = "|".join( policy_override_comments) row["envelope_from"] = record["identifiers"]["envelope_from"] row["header_from"] = record["identifiers"]["header_from"] envelope_to = record["identifiers"]["envelope_to"] row["envelope_to"] = envelope_to dkim_domains = [] dkim_selectors = [] dkim_results = [] for dkim_result in record["auth_results"]["dkim"]: dkim_domains.append(dkim_result["domain"]) if "selector" in dkim_result: dkim_selectors.append(dkim_result["selector"]) dkim_results.append(dkim_result["result"]) row["dkim_domains"] = ",".join(map(to_str, dkim_domains)) row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors)) row["dkim_results"] = ",".join(map(to_str, dkim_results)) spf_domains = [] spf_scopes = [] spf_results = [] for spf_result in record["auth_results"]["spf"]: spf_domains.append(spf_result["domain"]) spf_scopes.append(spf_result["scope"]) spf_results.append(spf_result["result"]) row["spf_domains"] = ",".join(map(to_str, spf_domains)) row["spf_scopes"] = ",".join(map(to_str, spf_scopes)) row["spf_results"] = ",".join(map(to_str, dkim_results)) writer.writerow(row) csv_file_object.flush() return csv_file_object.getvalue()
Converts one or more parsed aggregate reports to flat CSV format, including headers Args: reports: A parsed aggregate report or list of parsed aggregate reports Returns: str: Parsed aggregate report data in flat CSV format, including headers
def get_vboxes(self): vbox_list = [] vbox_max = None for node in self.nodes: if node[] == : vbox_list.append(node[]) if len(vbox_list) > 0: vbox_max = max(vbox_list) return vbox_max
Get the maximum ID of the VBoxes :return: Maximum VBox ID :rtype: int
def verify_light_chains(self, threshold=0.9): lseqs = [l.light for l in self.lights] clusters = cluster(lseqs, threshold=threshold) clusters.sort(key=lambda x: x.size, reverse=True) verified_ids = clusters[0].ids for p in self.lights: p.verified = True if p.name in verified_ids else False
Clusters the light chains to identify potentially spurious (non-lineage) pairings. Following clustering, all pairs in the largest light chain cluster are assumed to be correctly paired. For each of those pairs, the <verified> attribute is set to True. For pairs not in the largest light chain cluster, the <verified> attribute is set to False. Inputs (optional) ----------------- threshold: CD-HIT clustering threshold. Default is 0.9.
def AddDischargingBattery(self, device_name, model_name, percentage, seconds_to_empty): path = + device_name self.AddObject(path, DEVICE_IFACE, { : dbus.Boolean(True, variant_level=1), : dbus.Boolean(True, variant_level=1), : dbus.String(model_name, variant_level=1), : dbus.Double(percentage, variant_level=1), : dbus.Int64(seconds_to_empty, variant_level=1), : dbus.Double(100.0, variant_level=1), : dbus.Double(percentage, variant_level=1), : dbus.UInt32(2, variant_level=1), : dbus.UInt32(2, variant_level=1), }, []) self.EmitSignal(MAIN_IFACE, , self.device_sig_type, [path]) return path
Convenience method to add a discharging battery object You have to specify a device name which must be a valid part of an object path, e. g. "mock_ac", an arbitrary model name, the charge percentage, and the seconds until the battery is empty. Please note that this does not set any global properties such as "on-battery". Returns the new object path.
def appendBPoint(self, type=None, anchor=None, bcpIn=None, bcpOut=None, bPoint=None): if bPoint is not None: if type is None: type = bPoint.type if anchor is None: anchor = bPoint.anchor if bcpIn is None: bcpIn = bPoint.bcpIn if bcpOut is None: bcpOut = bPoint.bcpOut type = normalizers.normalizeBPointType(type) anchor = normalizers.normalizeCoordinateTuple(anchor) if bcpIn is None: bcpIn = (0, 0) bcpIn = normalizers.normalizeCoordinateTuple(bcpIn) if bcpOut is None: bcpOut = (0, 0) bcpOut = normalizers.normalizeCoordinateTuple(bcpOut) self._appendBPoint(type, anchor, bcpIn=bcpIn, bcpOut=bcpOut)
Append a bPoint to the contour.
def dump2json(self, obj, filepath, override=False, **kwargs): if isinstance(obj, dict): pass else: raise _NotADictionary( "This function only accepts dictionaries as input") if str(filepath[-4:]) == : pass else: filepath = ".".join((str(filepath), "json")) if override is False: if os.path.isfile(filepath): raise _FileAlreadyExists( "The file {0} already exists. Use a different filepath, " "or set the kwarg to True.".format(filepath)) with open(filepath, ) as json_file: json.dump(obj, json_file, **kwargs)
Dump a dictionary into a JSON dictionary. Uses the json.dump() function. Parameters ---------- obj : :class:`dict` A dictionary to be dumpped as JSON file. filepath : :class:`str` The filepath for the dumped file. override : :class:`bool` If True, any file in the filepath will be override. (default=False)
def create_pg_notify_event(notif): if notif.channel not in _CHANNEL_MAPPER: cls = _CHANNEL_MAPPER[None] else: cls = _CHANNEL_MAPPER[notif.channel] return cls(notif)
A factory for creating a Postgres Notification Event (an object inheriting from `cnxpublishing.events.PGNotifyEvent`) given `notif`, a `psycopg2.extensions.Notify` object.
def int_attribute(element, attribute, default=0): attribute_value = element.get(attribute) if attribute_value: try: return int(attribute_value) except (TypeError, ValueError): return default return default
Returns the int value of an attribute, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param attribute: The name of the attribute to evaluate :type attribute: basestring :param default: The default value to return if the attribute is not defined :type default: int :rtype: int
def extract_time(match): hour = int(match.group()) minute = int(match.group()) return dt.time(hour, minute)
extract time from a time_re match.
def RecordEvent(self, metric_name, value, fields=None): self._event_metrics[metric_name].Record(value, fields)
See base class.
async def put(self, path=): self.log.info("Attempt publishing to %s", path) if path == or path == : raise web.HTTPError(400, "Must provide a path for publishing") model = self.get_json_body() if model: await self._publish(model, path.lstrip()) else: raise web.HTTPError(400, "Cannot publish an empty model")
Publish a notebook on a given path. The payload directly matches the contents API for PUT.
def find_any_reports(self, usage_page = 0, usage_id = 0): items = [ (HidP_Input, self.find_input_reports(usage_page, usage_id)), (HidP_Output, self.find_output_reports(usage_page, usage_id)), (HidP_Feature, self.find_feature_reports(usage_page, usage_id)), ] return dict([(t, r) for t, r in items if r])
Find any report type referencing HID usage control/data item. Results are returned in a dictionary mapping report_type to usage lists.
def project_usls_on_dictionary(usls, allowed_terms=None): cells_to_usls = defaultdict(set) tables = set() for u in usls: for t in u.objects(Term): for c in t.singular_sequences: if not cells_to_usls[c]: tables.update(c.relations.contained) cells_to_usls[c].add(u) if allowed_terms: allowed_terms = set(allowed_terms) tables = tables & allowed_terms cells_to_usls = {c: l for c, l in cells_to_usls.items() if c in allowed_terms} tables_to_usls = { table: list(set(u for c in table.singular_sequences for u in cells_to_usls[c])) for table in tables if not isinstance(table, TableSet) } return tables_to_usls
`usls` is an iterable of usl. return a mapping term -> usl list
def count(self, flag_message, padding=None, force=False): if self.should_log(self.COUNT) or force: flag_message = flag_message \ if isinstance(flag_message, (int, float)) else \ str(len(flag_message)) self._print_message( flag_message=flag_message, padding=padding, reverse=True, color=colors.timing_color)
Log Level: :attr:COUNT @flag_message: time-like #float @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("Total apps").count(3) # Total apps (3) logg().count([0, 1, 2, 3]) # (4) ..
def add_texts(self, reference_id, texts): self.add_words(reference_id, chain(*(self._tokenize(t) for t in texts)))
\ Adds the words from the provided iterable `texts` to the corpus. The strings will be tokenized. `reference_id` The reference identifier of the cable. `texts` An iterable of strings.
def _eq(self, T, P): if T <= 273.16: ice = _Ice(T, P) gw = ice["g"] else: water = IAPWS95(T=T, P=P) gw = water.g def f(parr): rho, a = parr if a > 1: a = 1 fa = self._fav(T, rho, a) muw = fa["fir"]+rho*fa["fird"]-a*fa["fira"] return gw-muw, rho**2*fa["fird"]/1000-P rinput = fsolve(f, [1, 0.95], full_output=True) Asat = rinput[0][1] return Asat
Procedure for calculate the composition in saturation state Parameters ---------- T : float Temperature [K] P : float Pressure [MPa] Returns ------- Asat : float Saturation mass fraction of dry air in humid air [kg/kg]
def get(self, sid): return CredentialContext( self._version, account_sid=self._solution[], credential_list_sid=self._solution[], sid=sid, )
Constructs a CredentialContext :param sid: The unique id that identifies the resource to fetch. :returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext :rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
def __last_commit(self): cmd = [, ] op = self.sh(cmd, shell=False) if not op: return None author, rev, datestr = op.split()[7:10] author = author.split(, 1)[1].strip() rev = rev.split(, 1)[1].strip() datestr = datestr.split(, 1)[1].split(, 1)[0].strip() return datestr, (rev, author, None, None)
Retrieve the most recent commit message (with ``svn info``) Returns: tuple: (datestr, (revno, user, None, desc)) $ svn info Path: . URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python Repository Root: http://python-dlp.googlecode.com/svn Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d Revision: 378 Node Kind: directory Schedule: normal Last Changed Author: chimezie Last Changed Rev: 378 Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
def translate_output_properties(res: , output: Any) -> Any: if isinstance(output, dict): return {res.translate_output_property(k): translate_output_properties(res, v) for k, v in output.items()} if isinstance(output, list): return [translate_output_properties(res, v) for v in output] return output
Recursively rewrite keys of objects returned by the engine to conform with a naming convention specified by the resource's implementation of `translate_output_property`. If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed by recursing. If output is a `list`, every value is recursively transformed. If output is a primitive (i.e. not a dict or list), the value is returned without modification.
def retrieve(self, *args, **kwargs): lookup, key = self._lookup(*args, **kwargs) return lookup[key]
Retrieve the permsission function for the provided things.
def elog(exc, func, args=None, kwargs=None, str=str, pretty=True, name=): from .str import safe_str args = args if args else () kwargs = kwargs if kwargs else {} name = .format(get_mod(func), name) if name else full_funcname(func) if pretty: invocation = .join([safe_str(arg) for arg in args]) if kwargs: invocation += invocation += .join([.format(key, safe_str(value)) for key, value in sorted(kwargs.items())]) else: invocation = .format(safe_str(args), safe_str(kwargs)) msg = .format(get_typename(exc), message(exc), name, invocation) elogger.error(msg)
For logging exception-raising function invocations during randomized unit tests.
def process_api_config_response(self, config_json): with self._config_lock: self._add_discovery_config() for config in config_json.get(, []): lookup_key = config.get(, ), config.get(, ) self._configs[lookup_key] = config for config in self._configs.itervalues(): name = config.get(, ) api_version = config.get(, ) path_version = config.get(, ) sorted_methods = self._get_sorted_methods(config.get(, {})) for method_name, method in sorted_methods: self._save_rest_method(method_name, name, path_version, method)
Parses a JSON API config and registers methods for dispatch. Side effects: Parses method name, etc. for all methods and updates the indexing data structures with the information. Args: config_json: A dict, the JSON body of the getApiConfigs response.
def load_class_by_name(name: str): mod_path, _, cls_name = name.rpartition() mod = importlib.import_module(mod_path) cls = getattr(mod, cls_name) return cls
Given a dotted path, returns the class
def export(self, filename=): from smc.administration.tasks import Task return Task.download(self, , filename)
Export this element. Usage:: engine = Engine('myfirewall') extask = engine.export(filename='fooexport.zip') while not extask.done(): extask.wait(3) print("Finished download task: %s" % extask.message()) print("File downloaded to: %s" % extask.filename) :param str filename: filename to store exported element :raises TaskRunFailed: invalid permissions, invalid directory, or this element is a system element and cannot be exported. :return: DownloadTask .. note:: It is not possible to export system elements
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params): params.update({ : self.SIDE_BUY, }) return self.order_limit(timeInForce=timeInForce, **params)
Send in a new limit buy order Any order with an icebergQty MUST have timeInForce set to GTC. :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param price: required :type price: str :param timeInForce: default Good till cancelled :type timeInForce: str :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param stopPrice: Used with stop orders :type stopPrice: decimal :param icebergQty: Used with iceberg orders :type icebergQty: decimal :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
def get_process(cmd): if sys.platform.startswith(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW process = subprocess.Popen( cmd, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) else: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) return process
Get a command process.
def bovy_print(fig_width=5,fig_height=5,axes_labelsize=16, text_fontsize=11,legend_fontsize=12, xtick_labelsize=10,ytick_labelsize=10, xtick_minor_size=2,ytick_minor_size=2, xtick_major_size=4,ytick_major_size=4): fig_size = [fig_width,fig_height] params = {: axes_labelsize, : text_fontsize, : legend_fontsize, :xtick_labelsize, :ytick_labelsize, : True, : fig_size, : xtick_major_size, : ytick_major_size, : xtick_minor_size, : ytick_minor_size, :1, : True, : , : True, : } pyplot.rcParams.update(params) rc(, preamble=r+ +r)
NAME: bovy_print PURPOSE: setup a figure for plotting INPUT: fig_width - width in inches fig_height - height in inches axes_labelsize - size of the axis-labels text_fontsize - font-size of the text (if any) legend_fontsize - font-size of the legend (if any) xtick_labelsize - size of the x-axis labels ytick_labelsize - size of the y-axis labels xtick_minor_size - size of the minor x-ticks ytick_minor_size - size of the minor y-ticks OUTPUT: (none) HISTORY: 2009-12-23 - Written - Bovy (NYU)
def save_base_map(filename, grouped_by_text): rows = [] for group in grouped_by_text: text_string = group[0] for db, db_id, count in group[1]: if db == : name = uniprot_client.get_mnemonic(db_id) else: name = row = [text_string, db, db_id, count, name] rows.append(row) write_unicode_csv(filename, rows, delimiter=, quotechar=, quoting=csv.QUOTE_MINIMAL, lineterminator=)
Dump a list of agents along with groundings and counts into a csv file Parameters ---------- filename : str Filepath for output file grouped_by_text : list of tuple List of tuples of the form output by agent_texts_with_grounding
def cudaMemcpy_dtoh(dst, src, count): status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyDeviceToHost) cudaCheckStatus(status)
Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy.
def run(self, func=None): to_run = self.prepare_namespace(func) result = to_run(*self.args, **self.kwargs) return result
Evaluates the packaged function as func(*self.args,**self.kwargs) If func is a method of an object, it's accessed as getattr(self.obj,__name__). If it's a user-defined function, it needs to be passed in here because it can't be serialized. Returns: object: function's return value
def add_ref(self, name, ref): if self._name is None: self._name = name elif name != self._name: raise RuntimeError( ) self._refs.append(weakref.ref(ref))
Add a reference for the backend object that gives access to the low level context. Used in vispy.app.canvas.backends. The given name must match with that of previously added references.
def _updateTransitionMatrix(self): C = self.model.count_matrix() + self.prior_C if self.reversible and not _tmatrix_disconnected.is_connected(C, strong=True): raise NotImplementedError( + str(C) + ) P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=10000, warn_not_converged=False) zeros = np.where(P0 + P0.T == 0) C[zeros] = 0 Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps, reversible=self.reversible) if self.stationary: p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C) else: n0 = self.model.count_init().astype(float) first_timestep_counts_with_prior = n0 + self.prior_n0 positive = first_timestep_counts_with_prior > 0 p0 = np.zeros_like(n0) p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior[positive]) self.model.update(p0, Tij)
Updates the hidden-state transition matrix and the initial distribution
def convertAsOpenMath(term, converter): if hasattr(term, "_ishelper") and term._ishelper or isinstance(term, om.OMAny): return interpretAsOpenMath(term) if converter is not None: try: _converted = converter.to_openmath(term) except Exception as e: _converted = None if isinstance(_converted, om.OMAny): return _converted return interpretAsOpenMath(term)
Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method
def fromdict(dict): seed = hb_decode(dict[]) index = dict[] return Challenge(seed, index)
Takes a dictionary as an argument and returns a new Challenge object from the dictionary. :param dict: the dictionary to convert
def get_client_data(self, client): data = self._request(, .format(client)) return data.json()
Returns a client.
def add_entry(self, length): offset = -1 for index, entry in enumerate(self._entries): if index == 0: if entry.offset != 0 and length <= entry.offset: offset = 0 break else: lastentry = self._entries[index - 1] lastend = lastentry.offset + lastentry.length - 1 gapsize = entry.offset - lastend - 1 if gapsize >= length: offset = lastend + 1 break else: if self._entries: lastentry = self._entries[-1] lastend = lastentry.offset + lastentry.length - 1 left = self._max_block_size - lastend - 1 if left >= length: offset = lastend + 1 else: if self._max_block_size >= length: offset = 0 if offset >= 0: bisect.insort_left(self._entries, RockRidgeContinuationEntry(offset, length)) return offset
Add a new entry to this Rock Ridge Continuation Block. This method attempts to find a gap that fits the new length anywhere within this Continuation Block. If successful, it returns the offset at which it placed this entry. If unsuccessful, it returns None. Parameters: length - The length of the entry to find a gap for. Returns: The offset the entry was placed at, or None if no gap was found.
def db_putHex(self, db_name, key, value): warnings.warn(, DeprecationWarning) if not value.startswith(): value = add_0x(value) return (yield from self.rpc_call(, [db_name, key, value]))
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex DEPRECATED
def visit_comprehension(self, node, parent): newnode = nodes.Comprehension(parent) newnode.postinit( self.visit(node.target, newnode), self.visit(node.iter, newnode), [self.visit(child, newnode) for child in node.ifs], getattr(node, "is_async", None), ) return newnode
visit a Comprehension node by returning a fresh instance of it
def check_undelivered(to=None): failed_count = Dispatch.objects.filter(dispatch_status=Dispatch.DISPATCH_STATUS_FAILED).count() if failed_count: from sitemessage.shortcuts import schedule_email from sitemessage.messages.email import EmailTextMessage if to is None: admins = settings.ADMINS if admins: to = list(dict(admins).values()) if to: priority = 999 register_message_types(EmailTextMessage) schedule_email( % (failed_count, get_site_url()), subject=, to=to, priority=priority) send_scheduled_messages(priority=priority) return failed_count
Sends a notification email if any undelivered dispatches. Returns undelivered (failed) dispatches count. :param str|unicode to: Recipient address. If not set Django ADMINS setting is used. :rtype: int
def runSearchReads(self, request): return self.runSearchRequest( request, protocol.SearchReadsRequest, protocol.SearchReadsResponse, self.readsGenerator)
Runs the specified SearchReadsRequest.
def g_voigt(self): return (2. * self.voigt[:3, :3].trace() - np.triu(self.voigt[:3, :3]).sum() + 3 * self.voigt[3:, 3:].trace()) / 15.
returns the G_v shear modulus
def cnst_A1(self, X, Xf=None): r if Xf is None: Xf = sl.rfftn(X, axes=self.cri.axisN) return sl.irfftn(sl.inner( self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv, self.cri.axisN)
r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
def _create_cipher(self, password, salt, IV): from Crypto.Protocol.KDF import PBKDF2 from Crypto.Cipher import AES pw = PBKDF2(password, salt, dkLen=self.block_size) return AES.new(pw[:self.block_size], AES.MODE_CFB, IV)
Create the cipher object to encrypt or decrypt a payload.
def find_project_by_short_name(short_name, pbclient, all=None): try: response = pbclient.find_project(short_name=short_name, all=all) check_api_error(response) if (len(response) == 0): msg = error = raise ProjectNotFound(msg, error) return response[0] except exceptions.ConnectionError: raise except ProjectNotFound: raise
Return project by short_name.
def download_attachments(self): if not self._parent.has_attachments: log.debug( .format( self._parent.__class__.__name__)) return False if not self._parent.object_id: raise RuntimeError( .format( self._parent.__class__.__name__)) url = self.build_url(self._endpoints.get().format( id=self._parent.object_id)) response = self._parent.con.get(url) if not response: return False attachments = response.json().get(, []) self.untrack = True self.add({self._cloud_data_key: attachments}) self.untrack = False return True
Downloads this message attachments into memory. Need a call to 'attachment.save' to save them on disk. :return: Success / Failure :rtype: bool
def new_ele_description(**kwargs): desc = { :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None, :None } for key in kwargs: desc[key.lower()] = kwargs[key] return(desc)
from elist.elist import * from elist.jprint import pobj root_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[]) pobj(root_desc) #None means not handled
def delete_marked_communities(): raise NotImplementedError() Community.query.filter_by( Community.delete_time > datetime.utcnow()).delete() db.session.commit()
Delete communities after holdout time.
def error_for(response): klass = error_classes.get(response.status) if klass is None: if 400 <= response.status < 500: klass = ClientError if 500 <= response.status < 600: klass = ServerError return klass(response)
Return the appropriate initialized exception class for a response.
def parse(table, query=None, date=None, fields=None, distinct=False, limit=None, alias=None): date = date_range(date) limit = int(limit or -1) if query and date: query = % (query, date) elif date: query = date elif query: pass else: query = None fields = parse_fields(fields=fields) or None fields = fields if fields else table.columns msg = % (query, fields) logger.debug(msg) kwargs = {} if query: interpreter = MQLInterpreter(table) query = interpreter.parse(query) kwargs[] = query if distinct: kwargs[] = distinct query = select(fields, from_obj=table, **kwargs) if limit >= 1: query = query.limit(limit) if alias: query = query.alias(alias) return query
Given a SQLAlchemy Table() instance, generate a SQLAlchemy Query() instance with the given parameters. :param table: SQLAlchemy Table() instance :param query: MQL query :param date: metrique date range query :param date: metrique date range query element :param fields: list of field names to return as columns :param distinct: apply DISTINCT to this query :param limit: apply LIMIT to this query :param alias: apply ALIAS AS to this query
def ipython(args): from sregistry.main import get_client client = get_client(args.endpoint) client.announce(args.command) from IPython import embed embed()
give the user an ipython shell, optionally with an endpoint of choice.
def to_json(self, filename=None, encoding="utf-8", errors="strict", multiline=False, **json_kwargs): if filename and multiline: lines = [_to_json(item, filename=False, encoding=encoding, errors=errors, **json_kwargs) for item in self] with open(filename, , encoding=encoding, errors=errors) as f: f.write("\n".join(lines).decode() if sys.version_info < (3, 0) else "\n".join(lines)) else: return _to_json(self.to_list(), filename=filename, encoding=encoding, errors=errors, **json_kwargs)
Transform the BoxList object into a JSON string. :param filename: If provided will save to file :param encoding: File encoding :param errors: How to handle encoding errors :param multiline: Put each item in list onto it's own line :param json_kwargs: additional arguments to pass to json.dump(s) :return: string of JSON or return of `json.dump`
def beeswarm(*args, **kwargs): ax, args, kwargs = maybe_get_ax(*args, **kwargs) return bp
Create a R-like beeswarm plot showing the mean and datapoints. The difference from matplotlib is only the left axis line is shown, and ticklabels labeling each category of data can be added. @param ax: @param x: @param kwargs: Besides xticklabels, which is a prettyplotlib-specific argument which will label each individual beeswarm, many arguments for matplotlib.pyplot.boxplot will be accepted: http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot Additional arguments include: *median_color* : (default gray) The color of median lines *median_width* : (default 2) Median line width *colors* : (default None) Colors to use when painting a dataseries, for example list1 = [1,2,3] list2 = [5,6,7] ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"]) @return:
def is_interesting(entry): if entry.get() == : return False status = entry.find() if status is None: warning( % entry.get()) return False if status.get() in (, ): return False return True
Is this entry interesting? ``entry`` is an XML node representing one entry of the svn status XML output. It looks like this:: <entry path="unchanged.txt"> <wc-status item="normal" revision="1" props="none"> <commit revision="1"> <author>mg</author> <date>2015-02-06T07:52:38.163516Z</date> </commit> </wc-status> </entry> <entry path="added-but-not-committed.txt"> <wc-status item="added" revision="-1" props="none"></wc-status> </entry> <entry path="ext"> <wc-status item="external" props="none"></wc-status> </entry> <entry path="unknown.txt"> <wc-status props="none" item="unversioned"></wc-status> </entry>
def upload(self, file, name=None, prefix=None, extensions=None, overwrite=False, public=False, random_name=False, **kwargs): tmp_file = None try: if "acl" not in kwargs: kwargs["acl"] = "public-read" if public else "private" extra = kwargs if isinstance(file, FileStorage): obj = self.container.upload_object_via_stream(iterator=file.stream, object_name=name, extra=extra) else: obj = self.container.upload_object(file_path=file, object_name=name, extra=extra) return Object(obj=obj) except Exception as e: raise e finally: if tmp_file and os.path.isfile(tmp_file): os.remove(tmp_file)
To upload file :param file: FileStorage object or string location :param name: The name of the object. :param prefix: A prefix for the object. Can be in the form of directory tree :param extensions: list of extensions to allow. If empty, it will use all extension. :param overwrite: bool - To overwrite if file exists :param public: bool - To set acl to private or public-read. Having acl in kwargs will override it :param random_name - If True and Name is None it will create a random name. Otherwise it will use the file name. `name` will always take precedence :param kwargs: extra params: ie: acl, meta_data etc. :return: Object
def start_engine(self): if self.disable_security is True: log.warning() else: log.debug() self.__priv_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE) log.debug() self.__signing_key = nacl.signing.SigningKey.generate() self._processes.append(self._start_auth_proc()) log.debug() proc = self._start_pub_px_proc() self._processes.append(proc) pub_id = 0 for pub in self.publisher: publisher_type, publisher_opts = list(pub.items())[0] proc = self._start_pub_proc(publisher_type, publisher_opts, pub_id) self._processes.append(proc) pub_id += 1 log.info() started_os_proc = [] for device_os, device_config in self.config_dict.items(): if not self._whitelist_blacklist(device_os): log.debug(, device_os) continue log.debug(, self.device_worker_processes, device_os) for proc_index in range(self.device_worker_processes): self._processes.append(self._start_dev_proc(device_os, device_config)) started_os_proc.append(device_os) self._processes.append(self._start_srv_proc(started_os_proc)) for lst in self.listener: listener_type, listener_opts = list(lst.items())[0] proc = self._start_lst_proc(listener_type, listener_opts) self._processes.append(proc) thread = threading.Thread(target=self._check_children) thread.start()
Start the child processes (one per device OS)
async def nodes(self, text, opts=None, user=None): return [n async for n in self.eval(text, opts=opts, user=user)]
A simple non-streaming way to return a list of nodes.
def _define(self): definition = [] q = QuantumRegister(2, "q") rule = [ (HGate(), [q[1]], []), (SdgGate(), [q[1]], []), (CnotGate(), [q[0], q[1]], []), (HGate(), [q[1]], []), (TGate(), [q[1]], []), (CnotGate(), [q[0], q[1]], []), (TGate(), [q[1]], []), (HGate(), [q[1]], []), (SGate(), [q[1]], []), (XGate(), [q[1]], []), (SGate(), [q[0]], []) ] for inst in rule: definition.append(inst) self.definition = definition
gate ch a,b { h b; sdg b; cx a,b; h b; t b; cx a,b; t b; h b; s b; x b; s a;}
def _fill_parameters(self): self._parameters = self._config.get(, {}) self._fill_defaults() for k in self._parameters.keys(): try: if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(): parts = self._parameters[k].split() tmp = parts[1].replace(, ) val = self._get_ssm_parameter(tmp) if val: self._parameters[k] = val else: logging.error(.format(tmp)) return False elif self._parameters[k] == self.ASK: val = None a1 = a2 = prompt1 = "Enter value for : ".format(k) prompt2 = "Confirm value for : ".format(k) while a1 != a2: a1 = getpass.getpass(prompt=prompt1) a2 = getpass.getpass(prompt=prompt2) if a1 == a2: val = a1 else: print() self._parameters[k] = val except: pass return True
Fill in the _parameters dict from the properties file. Args: None Returns: True Todo: Figure out what could go wrong and at least acknowledge the the fact that Murphy was an optimist.
def change_ref(self, r0=None, lmax=None): if lmax is None: lmax = self.lmax clm = self.pad(lmax) if r0 is not None and r0 != self.r0: for l in _np.arange(lmax+1): clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**(l+2) if self.errors is not None: clm.errors[:, l, :l+1] *= (self.r0 / r0)**(l+2) clm.r0 = r0 return clm
Return a new SHMagCoeffs class instance with a different reference r0. Usage ----- clm = x.change_ref([r0, lmax]) Returns ------- clm : SHMagCoeffs class instance. Parameters ---------- r0 : float, optional, default = self.r0 The reference radius of the spherical harmonic coefficients. lmax : int, optional, default = self.lmax Maximum spherical harmonic degree to output. Description ----------- This method returns a new class instance of the magnetic potential, but using a difference reference r0. When changing the reference radius r0, the spherical harmonic coefficients will be upward or downward continued under the assumption that the reference radius is exterior to the body.
def _parse_cli_filters(filters): parsed_filters = [] for filter_entry in filters: filter_parts = re.match(, filter_entry) parsed_filters.append({ : filter_parts.group(), : filter_parts.group().split() }) return parsed_filters
Parse the filters from the CLI and turn them into a filter dict for boto. :param filters: :return:
def runCLI(): args = docopt(__doc__, version=) try: check_arguments(args) command_list = [, , ] select = itemgetter(, , ) selectedCommand = command_list[select(args).index(True)] cmdClass = get_command_class(selectedCommand) obj = cmdClass(args) obj.execute_command() except POSSIBLE_EXCEPTIONS as e: print(, e, )
The starting point for the execution of the Scrapple command line tool. runCLI uses the docstring as the usage description for the scrapple command. \ The class for the required command is selected by a dynamic dispatch, and the \ command is executed through the execute_command() method of the command class.
def get_page(pno, zoom = False, max_size = None, first = False): dlist = dlist_tab[pno] if not dlist: dlist_tab[pno] = doc[pno].getDisplayList() dlist = dlist_tab[pno] r = dlist.rect clip = r zoom_0 = 1 if max_size: zoom_0 = min(1, max_size[0] / r.width, max_size[1] / r.height) if zoom_0 == 1: zoom_0 = min(max_size[0] / r.width, max_size[1] / r.height) mat_0 = fitz.Matrix(zoom_0, zoom_0) if not zoom: pix = dlist.getPixmap(matrix = mat_0, alpha=False) else: mp = r.tl + (r.br - r.tl) * 0.5 w2 = r.width / 2 h2 = r.height / 2 clip = r * 0.5 tl = zoom[0] tl.x += zoom[1] * (w2 / 2) tl.x = max(0, tl.x) tl.x = min(w2, tl.x) tl.y += zoom[2] * (h2 / 2) tl.y = max(0, tl.y) tl.y = min(h2, tl.y) clip = fitz.Rect(tl, tl.x + w2, tl.y + h2) mat = mat_0 * fitz.Matrix(2, 2) pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip) if first: img = pix.getPNGData() else: pilimg = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) img = ImageTk.PhotoImage(pilimg) return img, clip.tl
Return a PNG image for a document page number.
def get_bibtex(isbn_identifier): bibtex = doi.get_bibtex(to_doi(isbn_identifier)) if bibtex is None: bibtex = isbnlib.registry.bibformatters[]( isbnlib.meta(isbn_identifier, )) return bibtex
Get a BibTeX string for the given ISBN. :param isbn_identifier: ISBN to fetch BibTeX entry for. :returns: A BibTeX string or ``None`` if could not fetch it. >>> get_bibtex('9783161484100') '@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
def put(self, name=None, user_ids=None): return self.connection.put(, data=dict(name=name, user_ids=user_ids))
:param name: str of name for the account, defaults to the created timestamp :param user_ids: list of int of users to give access to this account defaults to current user :return: Account dict created
def filter_belief(stmts_in, belief_cutoff, **kwargs): dump_pkl = kwargs.get() logger.info( % (len(stmts_in), belief_cutoff)) stmts_out = [] for stmt in stmts_in: if stmt.belief < belief_cutoff: continue stmts_out.append(stmt) supp_by = [] supp = [] for st in stmt.supports: if st.belief >= belief_cutoff: supp.append(st) for st in stmt.supported_by: if st.belief >= belief_cutoff: supp_by.append(st) stmt.supports = supp stmt.supported_by = supp_by logger.info( % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
Filter to statements with belief above a given cutoff. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. belief_cutoff : float Only statements with belief above the belief_cutoff will be returned. Here 0 < belief_cutoff < 1. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
def update_changes_after_row_delete(self, row_num): if row_num in self.changes.copy(): self.changes.remove(row_num) updated_rows = [] for changed_row in self.changes: if changed_row == -1: updated_rows.append(-1) if changed_row > row_num: updated_rows.append(changed_row - 1) if changed_row < row_num: updated_rows.append(changed_row) self.changes = set(updated_rows)
Update self.changes so that row numbers for edited rows are still correct. I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3. This function updates self.changes to reflect that.
def get_vmss(access_token, subscription_id, resource_group, vmss_name): endpoint = .join([get_rm_endpoint(), , subscription_id, , resource_group, , vmss_name, , COMP_API]) return do_get(endpoint, access_token)
Get virtual machine scale set details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of scale set properties.
def _init_exception_logging(self, app): enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False) if not enabled: return exception_telemetry_client = TelemetryClient( self._key, telemetry_channel=self._channel) @app.errorhandler(Exception) def exception_handler(exception): if HTTPException and isinstance(exception, HTTPException): return exception try: raise exception except Exception: exception_telemetry_client.track_exception() finally: raise exception self._exception_telemetry_client = exception_telemetry_client
Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING`` is set in the Flask config. Args: app (flask.Flask). the Flask application for which to initialize the extension.
def connect(self, ctrl): if self.prompt: self.prompt_re = self.driver.make_dynamic_prompt(self.prompt) else: self.prompt_re = self.driver.prompt_re self.ctrl = ctrl if self.protocol.connect(self.driver): if self.protocol.authenticate(self.driver): self.ctrl.try_read_prompt(1) if not self.prompt: self.prompt = self.ctrl.detect_prompt() if self.is_target: self.update_config_mode() if self.mode is not None and self.mode != : self.last_error_msg = "Device is not in global mode. Disconnected." self.chain.disconnect() return False self.prompt_re = self.driver.make_dynamic_prompt(self.prompt) self.connected = True if self.is_target is False: if self.os_version is None: self.update_os_version() self.update_hostname() else: self._connected_to_target() return True else: self.connected = False return False
Connect to the device.
async def stations(self): data = await self.retrieve(API_DISTRITS) Station = namedtuple(, [, , , , , , , ]) _stations = [] for station in data[]: _station = Station( self._to_number(station[]), self._to_number(station[]), station[], station[], station[], station[], station[]//100 * 100, station[], ) _stations.append(_station) return _stations
Retrieve stations.
def dt_comp(self, sampled_topics): samples = sampled_topics.shape[0] dt = np.zeros((self.D, self.K, samples)) for s in range(samples): dt[:, :, s] = \ samplers_lda.dt_comp(self.docid, sampled_topics[s, :], self.N, self.K, self.D, self.alpha) return dt
Compute document-topic matrix from sampled_topics.
def list_namespaced_resource_quota(self, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs) return data
list or watch objects of kind ResourceQuota This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_resource_quota(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ResourceQuotaList If the method is called asynchronously, returns the request thread.
def solve(self, max_worlds=10000, silent=False): self.num_worlds = 0 num_unhappy = 0 for tax_rate in range(self.tax_range[0],self.tax_range[1]): for equity in range(self.equity_range[0],self.equity_range[1]): for tradition in range(self.tradition_range[0],self.tradition_range[1]): self.num_worlds += 1 if self.num_worlds > max_worlds: break w = World(str(self.num_worlds).zfill(6), [5000, tax_rate/10, tradition/10, equity/10]) world_happiness = 0 num_unhappy = 0 for person in self.all_people: wh = Happiness(person, w) world_happiness += wh.rating if wh.rating < 0: num_unhappy += 1 if world_happiness > self.net_happiness: self.net_happiness = world_happiness self.unhappy_people = num_unhappy if not silent: print( + w.nme + + str(world_happiness) + + str(self.unhappy_people))
find the best world to make people happy
def construct_meta(need_data, env): hide_options = env.config.needs_hide_options if not isinstance(hide_options, list): raise SphinxError() node_meta = nodes.line_block(classes=[]) param_status = "status: " param_tags = "tags: " if need_data["status"] is not None and not in hide_options: status_line = nodes.line(classes=[]) node_status = nodes.inline(param_status, param_status, classes=[]) status_line.append(node_status) status_line.append(nodes.inline(need_data["status"], need_data["status"], classes=["needs-status", str(need_data[])])) node_meta.append(status_line) if need_data["tags"] and not in hide_options: tag_line = nodes.line(classes=[]) node_tags = nodes.inline(param_tags, param_tags, classes=[]) tag_line.append(node_tags) for tag in need_data[]: tag_line.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)])) tag_line.append(nodes.inline(, )) node_meta.append(tag_line) if need_data[] and not in hide_options: node_incoming_line = nodes.line(classes=[, ]) prefix = "links incoming: " node_incoming_prefix = nodes.inline(prefix, prefix) node_incoming_line.append(node_incoming_prefix) node_incoming_links = Need_incoming(reftarget=need_data[]) node_incoming_links.append(nodes.inline(need_data[], need_data[])) node_incoming_line.append(node_incoming_links) node_meta.append(node_incoming_line) if need_data[] and not in hide_options: node_outgoing_line = nodes.line(classes=[, ]) prefix = "links outgoing: " node_outgoing_prefix = nodes.inline(prefix, prefix) node_outgoing_line.append(node_outgoing_prefix) node_outgoing_links = Need_outgoing(reftarget=need_data[]) node_outgoing_links.append(nodes.inline(need_data[], need_data[])) node_outgoing_line.append(node_outgoing_links) node_meta.append(node_outgoing_line) extra_options = getattr(env.config, , {}) node_extra_options = [] for key, value in extra_options.items(): if key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = .format(key) option_line = nodes.line(classes=[]) option_line.append(nodes.inline(param_option, param_option, classes=[])) option_line.append(nodes.inline(param_data, param_data, classes=["needs-extra-option", str(key)])) node_extra_options.append(option_line) node_meta += node_extra_options global_options = getattr(env.config, , {}) node_global_options = [] for key, value in global_options.items(): if key in extra_options or key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = .format(key) global_option_line = nodes.line(classes=[]) global_option_line.append(nodes.inline(param_option, param_option, classes=[])) global_option_line.append(nodes.inline(param_data, param_data, classes=["needs-global-option", str(key)])) node_global_options.append(global_option_line) node_meta += node_global_options return node_meta
Constructs the node-structure for the status container :param need_data: need_info container :return: node
def parse_int_list(string): integers = [] for comma_part in string.split(","): for substring in comma_part.split(" "): if len(substring) == 0: continue if "-" in substring: left, right = substring.split("-") left_val = int(left.strip()) right_val = int(right.strip()) integers.extend(range(left_val, right_val + 1)) else: integers.append(int(substring.strip())) return integers
Parses a string of numbers and ranges into a list of integers. Ranges are separated by dashes and inclusive of both the start and end number. Example: parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13]
def to_networkx(cyjs, directed=True): if directed: g = nx.MultiDiGraph() else: g = nx.MultiGraph() network_data = cyjs[DATA] if network_data is not None: for key in network_data.keys(): g.graph[key] = network_data[key] nodes = cyjs[ELEMENTS][NODES] edges = cyjs[ELEMENTS][EDGES] for node in nodes: data = node[DATA] g.add_node(data[ID], attr_dict=data) for edge in edges: data = edge[DATA] source = data[SOURCE] target = data[TARGET] g.add_edge(source, target, attr_dict=data) return g
Convert Cytoscape.js-style JSON object into NetworkX object. By default, data will be handles as a directed graph.
def copyglob(src: str, dest: str, allow_nothing: bool = False, allow_nonfiles: bool = False) -> None: something = False for filename in glob.glob(src): if allow_nonfiles or os.path.isfile(filename): shutil.copy(filename, dest) something = True if something or allow_nothing: return raise ValueError("No files found matching: {}".format(src))
Copies files whose filenames match the glob src" into the directory "dest". Raises an error if no files are copied, unless allow_nothing is True. Args: src: source glob (e.g. ``/somewhere/*.txt``) dest: destination directory allow_nothing: don't raise an exception if no files are found allow_nonfiles: copy things that are not files too (as judged by :func:`os.path.isfile`). Raises: ValueError: if no files are found and ``allow_nothing`` is not set
def convert_idx_to_name(self, y, lens): y = [[self.id2label[idx] for idx in row[:l]] for row, l in zip(y, lens)] return y
Convert label index to name. Args: y (list): label index list. lens (list): true length of y. Returns: y: label name list. Examples: >>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'} >>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]] >>> lens = [1, 2, 3] >>> self.convert_idx_to_name(y, lens) [['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
def get_or_create_group(groupname, gid_preset, system=False, id_dependent=True): gid = get_group_id(groupname) if gid is None: create_group(groupname, gid_preset, system) return gid_preset elif id_dependent and gid != gid_preset: error("Present group id does not match the required id of the environment .".format(gid, gid_preset)) return gid
Returns the id for the given group, and creates it first in case it does not exist. :param groupname: Group name. :type groupname: unicode :param gid_preset: Group id to set if a new group is created. :type gid_preset: int or unicode :param system: Create a system group. :type system: bool :param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown. :type id_dependent: bool :return: Group id of the existing or new group. :rtype: int
def save(filename=None, family=): * if _conf() and not filename: filename = _conf() nft_families = [, , , ] rules = " for family in nft_families: out = get_rules(family) if out: rules += rules = rules + .join(out) rules = rules + try: with salt.utils.files.fopen(filename, ) as _fh: _fh.writelines(salt.utils.data.encode(rules)) except (IOError, OSError) as exc: raise CommandExecutionError( .format(exc) ) return rules
Save the current in-memory rules to disk CLI Example: .. code-block:: bash salt '*' nftables.save /etc/nftables
def speech(self) -> str: if not self.data: self.update() return speech.metar(self.data, self.units)
Report summary designed to be read by a text-to-speech program
def prepare_environment(params: Params): seed = params.pop_int("random_seed", 13370) numpy_seed = params.pop_int("numpy_seed", 1337) torch_seed = params.pop_int("pytorch_seed", 133) if seed is not None: random.seed(seed) if numpy_seed is not None: numpy.random.seed(numpy_seed) if torch_seed is not None: torch.manual_seed(torch_seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(torch_seed) log_pytorch_version_info()
Sets random seeds for reproducible experiments. This may not work as expected if you use this from within a python project in which you have already imported Pytorch. If you use the scripts/run_model.py entry point to training models with this library, your experiments should be reasonably reproducible. If you are using this from your own project, you will want to call this function before importing Pytorch. Complete determinism is very difficult to achieve with libraries doing optimized linear algebra due to massively parallel execution, which is exacerbated by using GPUs. Parameters ---------- params: Params object or dict, required. A ``Params`` object or dict holding the json parameters.
async def get_timezone(self) -> Optional[tzinfo]: u = await self._get_user() diff = float(u.get(, 0)) * 3600.0 return tz.tzoffset(, diff)
We can't exactly know the time zone of the user from what Facebook gives (fucking morons) but we can still give something that'll work until next DST.
def get_default_user_groups(self, **kwargs): kwargs[] = True if kwargs.get(): return self.get_default_user_groups_with_http_info(**kwargs) else: (data) = self.get_default_user_groups_with_http_info(**kwargs) return data
Get default user groups customer preferences # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_default_user_groups(async_req=True) >>> result = thread.get() :param async_req bool :param User body: :return: ResponseContainerListUserGroup If the method is called asynchronously, returns the request thread.
def word_tokenize(sentence): date_pattern = r number_pattern = r arr_pattern = r word_pattern = r non_space_pattern = r.format(re.escape()) space_pattern = r anything_pattern = r patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern] big_pattern = r.join([( + pattern + ) for pattern in patterns]) for match in re.finditer(big_pattern, sentence): yield match.group(0)
A generator which yields tokens based on the given sentence without deleting anything. >>> context = "I love you. Please don't leave." >>> list(word_tokenize(context)) ['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
def update_state(self): response = self.api_interface.get_device_state(self, type_override="button") return self._update_state_from_response(response)
Update state with latest info from Wink API.
def get_score(self, terms): assert isinstance(terms, list) or isinstance(terms, tuple) score_li = np.asarray([self._get_score(t) for t in terms]) s_pos = np.sum(score_li[score_li > 0]) s_neg = -np.sum(score_li[score_li < 0]) s_pol = (s_pos-s_neg) * 1.0 / ((s_pos+s_neg)+self.EPSILON) s_sub = (s_pos+s_neg) * 1.0 / (len(score_li)+self.EPSILON) return {self.TAG_POS: s_pos, self.TAG_NEG: s_neg, self.TAG_POL: s_pol, self.TAG_SUB: s_sub}
Get score for a list of terms. :type terms: list :param terms: A list of terms to be analyzed. :returns: dict
def find(self, item_id=None): "Recursively find a menu item by its id (useful for event handlers)" for it in self: if it.id == item_id: return it elif isinstance(it, Menu): found = it.find(item_id) if found: return found
Recursively find a menu item by its id (useful for event handlers)
def _repr_latex_(self): lines.append(r"\begin{align*}") for lhs, rhs in zip(self.left_hand_side_descriptors, self.right_hand_side): lines.append(r"\dot{{{0}}} &= {1} \\".format(sympy.latex(lhs.symbol), sympy.latex(rhs))) lines.append(r"\end{align*}") return "\n".join(lines)
This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX. How Cool is this?
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]: pass
Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects.
def get_template_uuid(self): response = requests.get(self.url + , headers=self.headers, verify=False) templates = json.loads(response.text) for template in templates[]: if template[] == self.template_name: return template[]
Retrieves the uuid of the given template name.
def get_type_description(self, _type, suffix=, *args, **kwargs): if not SchemaObjects.contains(_type): return _type schema = SchemaObjects.get(_type) if schema.all_of: models = .join( (self.get_type_description(_type, *args, **kwargs) for _type in schema.all_of) ) result = .format(models.split()[0]) for r in models.split()[1:]: result += .format(r) elif schema.is_array: result = .format( self.get_type_description(schema.item[], *args, **kwargs)) else: result = .format(schema.name, schema.schema_id, suffix) return result
Get description of type :param suffix: :param str _type: :rtype: str