code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def resolve_and_call(self, func, extra_env=None): kwargs = self.resolve_parameters(func, extra_env=extra_env) return func(**kwargs)
Resolve function arguments and call them, possibily filling from the environment
def gen_FS_DF(df_output): df_day = pd.pivot_table( df_output, values=[, , , ], index=[, , ], aggfunc=[min, max, np.mean, ]) df_day_all_year = pd.pivot_table( df_output, values=[, , , ], index=[, ], aggfunc=[min, max, np.mean, ]) array_yr_mon = df_day.index.droplevel( ).to_frame().drop_duplicates().values df_fs = pd.DataFrame( {(yr, mon): (df_day.loc[(yr, mon)].apply(gen_score_ser) - df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean() for yr, mon in array_yr_mon}) return df_fs
generate DataFrame of scores. Parameters ---------- df_WS_data : type Description of parameter `df_WS_data`. Returns ------- type Description of returned object.
def vcf2cytosure(institute_id, case_name, individual_id): (display_name, vcf2cytosure) = controllers.vcf2cytosure(store, institute_id, case_name, individual_id) outdir = os.path.abspath(os.path.dirname(vcf2cytosure)) filename = os.path.basename(vcf2cytosure) log.debug("Attempt to deliver file {0} from dir {1}".format(filename, outdir)) attachment_filename = display_name + ".vcf2cytosure.cgh" return send_from_directory(outdir, filename, attachment_filename=attachment_filename, as_attachment=True)
Download vcf2cytosure file for individual.
def get_k8s_upgrades_completion_list(cmd, prefix, namespace, **kwargs): resource_group = getattr(namespace, , None) name = getattr(namespace, , None) return get_k8s_upgrades(cmd.cli_ctx, resource_group, name) if resource_group and name else None
Return Kubernetes versions available for upgrading an existing cluster.
def process_request(self, request): super(SubdomainURLRoutingMiddleware, self).process_request(request) subdomain = getattr(request, , UNSET) if subdomain is not UNSET: urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain) if urlconf is not None: logger.debug("Using urlconf %s for subdomain: %s", repr(urlconf), repr(subdomain)) request.urlconf = urlconf
Sets the current request's ``urlconf`` attribute to the urlconf associated with the subdomain, if it is listed in ``settings.SUBDOMAIN_URLCONFS``.
def open(cls, grammar_filename, rel_to=None, **options): if rel_to: basepath = os.path.dirname(rel_to) grammar_filename = os.path.join(basepath, grammar_filename) with open(grammar_filename, encoding=) as f: return cls(f, **options)
Create an instance of Lark with the grammar given by its filename If rel_to is provided, the function will find the grammar filename in relation to it. Example: >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr") Lark(...)
def as_p(self, show_leaf=True, current_linkable=False, class_current="active_link"): return self.__do_menu("as_p", show_leaf, current_linkable, class_current)
It returns breadcrumb as p
def bind(self, fn: Callable[[Any], ]) -> : return List.concat(self.map(fn))
Flatten and map the List. Haskell: xs >>= f = concat (map f xs)
def get_vm_info(name): machinecwd try: vm_ = __utils__[](_build_sdb_uri(name), __opts__) except KeyError: raise SaltInvocationError( ) if vm_ is None or not in vm_: raise SaltInvocationError( .format(name)) return vm_
get the information for a VM. :param name: salt_id name :return: dictionary of {'machine': x, 'cwd': y, ...}.
def rtree_filter(self): return RtreeFilter(self.src_filter.sitecol, self.oqparam.maximum_distance, self.src_filter.filename)
:returns: an RtreeFilter
def get_repr(expr, multiline=False): signature = _get_object_signature(expr) if signature is None: return "{}()".format(type(expr).__name__) defaults = {} for name, parameter in signature.parameters.items(): if parameter.default is not inspect._empty: defaults[name] = parameter.default args, var_args, kwargs = get_vars(expr) args_parts = collections.OrderedDict() var_args_parts = [] kwargs_parts = {} has_lines = multiline parts = [] for i, (key, value) in enumerate(args.items()): arg_repr = _dispatch_formatting(value) if "\n" in arg_repr: has_lines = True args_parts[key] = arg_repr for arg in var_args: arg_repr = _dispatch_formatting(arg) if "\n" in arg_repr: has_lines = True var_args_parts.append(arg_repr) for key, value in sorted(kwargs.items()): if key in defaults and value == defaults[key]: continue value = _dispatch_formatting(value) arg_repr = "{}={}".format(key, value) has_lines = True kwargs_parts[key] = arg_repr for _, part in args_parts.items(): parts.append(part) parts.extend(var_args_parts) for _, part in sorted(kwargs_parts.items()): parts.append(part) if has_lines and parts: for i, part in enumerate(parts): parts[i] = "\n".join(" " + line for line in part.split("\n")) parts.append(" )") parts = ",\n".join(parts) return "{}(\n{}".format(type(expr).__name__, parts) parts = ", ".join(parts) return "{}({})".format(type(expr).__name__, parts)
Build a repr string for ``expr`` from its vars and signature. :: >>> class MyObject: ... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs): ... self.arg1 = arg1 ... self.arg2 = arg2 ... self.var_args = var_args ... self.foo = foo ... self.bar = bar ... self.kwargs = kwargs ... >>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z']) :: >>> import uqbar >>> print(uqbar.objects.get_repr(my_object)) MyObject( 'a', 'b', 'c', 'd', foo='x', quux=['y', 'z'], )
def copy_file_job(job, name, file_id, output_dir): work_dir = job.fileStore.getLocalTempDir() fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, name)) copy_files([fpath], output_dir)
Job version of move_files for one file :param JobFunctionWrappingJob job: passed automatically by Toil :param str name: Name of output file (including extension) :param str file_id: FileStoreID of file :param str output_dir: Location to place output file
def _parse_qemu_img_info(info): raw_infos = salt.utils.json.loads(info) disks = [] for disk_infos in raw_infos: disk = { : disk_infos[], : disk_infos[], : disk_infos[], : disk_infos[], : disk_infos[] if in disk_infos else None, } if in disk_infos.keys(): disk[] = format(disk_infos[]) if in disk_infos.keys(): disk[] = [ { : snapshot[], : snapshot[], : snapshot[], : datetime.datetime.fromtimestamp( float(.format(snapshot[], snapshot[]))).isoformat(), : datetime.datetime.utcfromtimestamp( float(.format(snapshot[], snapshot[]))).time().isoformat() } for snapshot in disk_infos[]] disks.append(disk) for disk in disks: if in disk.keys(): candidates = [info for info in disks if in info.keys() and info[] == disk[]] if candidates: disk[] = candidates[0] return disks[0]
Parse qemu-img info JSON output into disk infos dictionary
def state_delta(self, selector=, power=None, duration=1.0, infrared=None, hue=None, saturation=None, brightness=None, kelvin=None): argument_tuples = [ ("power", power), ("duration", duration), ("infrared", infrared), ("hue", hue), ("saturation", saturation), ("brightness", brightness), ("kelvin", kelvin) ] return self.client.perform_request( method=, endpoint=, endpoint_args=[selector], argument_tuples=argument_tuples)
Given a state delta, apply the modifications to lights' state over a given period of time. selector: required String The selector to limit which lights are controlled. power: String The power state you want to set on the selector. on or off duration: Double How long in seconds you want the power action to take. Range: 0.0 – 3155760000.0 (100 years) infrared: Double The maximum brightness of the infrared channel. hue: Double Rotate the hue by this angle in degrees. saturation: Double Change the saturation by this additive amount; the resulting saturation is clipped to [0, 1]. brightness: Double Change the brightness by this additive amount; the resulting brightness is clipped to [0, 1]. kelvin: Double Change the kelvin by this additive amount; the resulting kelvin is clipped to [2500, 9000].
def main(output_directory: int, data: str) -> None: json_files = glob.glob(os.path.join(data, "*.json")) for dataset in json_files: dataset_name = os.path.basename(dataset)[:-5] print(f"Processing dataset: {dataset} into query and question " f"splits at output path: {output_directory + + dataset_name}") full_dataset = json.load(open(dataset)) if not isinstance(full_dataset, list): full_dataset = [full_dataset] for split_type in ["query_split", "question_split"]: dataset_out = os.path.join(output_directory, dataset_name, split_type) for split, split_dataset in process_dataset(full_dataset, split_type): dataset_out = os.path.join(output_directory, dataset_name, split_type) os.makedirs(dataset_out, exist_ok=True) json.dump(split_dataset, open(os.path.join(dataset_out, split), "w"), indent=4)
Processes the text2sql data into the following directory structure: ``dataset/{query_split, question_split}/{train,dev,test}.json`` for datasets which have train, dev and test splits, or: ``dataset/{query_split, question_split}/{split_{split_id}}.json`` for datasets which use cross validation. The JSON format is identical to the original datasets, apart from they are split into separate files with respect to the split_type. This means that for the question split, all of the sql data is duplicated for each sentence which is bucketed together as having the same semantics. As an example, the following blob would be put "as-is" into the query split dataset, and split into two datasets with identical blobs for the question split, differing only in the "sentence" key, where blob1 would end up in the train split and blob2 would be in the dev split, with the rest of the json duplicated in each. { "comments": [], "old-name": "", "query-split": "train", "sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}], "sql": [], "variables": [] }, Parameters ---------- output_directory : str, required. The output directory. data: str, default = None The path to the data director of https://github.com/jkkummerfeld/text2sql-data.
def result(self, line=): args = magic_arguments.parse_argstring(self.result, line) if self.last_result is None: raise UsageError(NO_LAST_RESULT) self.last_result.get() self.last_result.display_outputs(groupby=args.groupby)
Print the result of the last asynchronous %px command. This lets you recall the results of %px computations after asynchronous submission (block=False). Examples -------- :: In [23]: %px os.getpid() Async parallel execution on engine(s): all In [24]: %pxresult Out[8:10]: 60920 Out[9:10]: 60921 Out[10:10]: 60922 Out[11:10]: 60923
def prep_jid(nocache=False, passed_jid=None): t collide (unless its passed a jid). So do what you have to do to make sure that stays the case ' conn = _get_conn() if conn is None: return None cur = conn.cursor() if passed_jid is None: jid = _gen_jid(cur) else: jid = passed_jid while not jid: log.info("jid clash, generating a new one") jid = _gen_jid(cur) cur.close() conn.close() return jid
Return a job id and prepare the job id directory This is the function responsible for making sure jids don't collide (unless its passed a jid). So do what you have to do to make sure that stays the case
def logged_in(self): try: self._proxy.User.get({: []}) return True except Fault as e: if e.faultCode == 505 or e.faultCode == 32000: return False raise e
This is True if this instance is logged in else False. We test if this session is authenticated by calling the User.get() XMLRPC method with ids set. Logged-out users cannot pass the 'ids' parameter and will result in a 505 error. If we tried to login with a token, but the token was incorrect or expired, the server returns a 32000 error. For Bugzilla 5 and later, a new method, User.valid_login is available to test the validity of the token. However, this will require that the username be cached along with the token in order to work effectively in all scenarios and is not currently used. For more information, refer to the following url. http://bugzilla.readthedocs.org/en/latest/api/core/v1/user.html#valid-login
def p_catch(self, p): p[0] = ast.Catch(identifier=p[3], elements=p[5])
catch : CATCH LPAREN identifier RPAREN block
def yield_event(self, act): if act in self.tokens: coro = act.coro op = self.try_run_act(act, self.tokens[act]) if op: del self.tokens[act] return op, coro
Hande completion for a request and return an (op, coro) to be passed to the scheduler on the last completion loop of a proactor.
def get_attribute_data(doc): attributes = dict() for attribute_node in doc.xpath("//div[contains(@class, )]"): node = attribute_node.xpath(".//*[contains(@class, )]")[0] label = " ".join(node.itertext()).strip() node = attribute_node.xpath(".//*[contains(@class, )]")[0] value = " ".join(node.itertext()).strip() link_node = node.find() if link_node is not None: link = link_node.get() link_text = link_node.text else: link = None link_text = None attributes[label] = dict(value=value, link=link, link_text=link_text) return attributes
Helper function: parse attribute data from a wiki html doc Args: doc (document parsed with lxml.html): parsed wiki page Returns: dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``; only the first hyperlink listed in each attribute value is included
def get_bank_name(clabe: str) -> str: code = clabe[:3] try: bank_name = BANK_NAMES[BANKS[code]] except KeyError: raise ValueError(f"Ningún banco tiene código ") else: return bank_name
Regresa el nombre del banco basado en los primeros 3 digitos https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
def _set_offset_cpu(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "uint32", : , }) self.__offset_cpu = t if hasattr(self, ): self._set()
Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_offset_cpu is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_offset_cpu() directly.
def _get_exception_log_path(): app = sys.argv[0].split()[-1] for exception_log in [ % app, % app, % app]: if os.access(path.dirname(exception_log), os.W_OK): return exception_log return None
Return the normalized path for the connection log, raising an exception if it can not written to. :return: str
def drug_names_match_criteria(drug_names: List[str], names_are_generic: bool = False, include_categories: bool = False, **criteria: Dict[str, bool]) -> List[bool]: return [ drug_name_matches_criteria( dn, name_is_generic=names_are_generic, include_categories=include_categories, **criteria) for dn in drug_names ]
Establish whether multiple drugs, passed as a list of drug names, each matches the specified criteria. See :func:`drug_matches_criteria`.
def dumpJSON(self): g = get_root(self).globals return dict( RA=self.ra[], DEC=self.dec[], tel=g.cpars[], alt=self._getVal(self.alt), az=self._getVal(self.az), secz=self._getVal(self.airmass), pa=self._getVal(self.pa), foc=self._getVal(self.focus), mdist=self._getVal(self.mdist) )
Return dictionary of data for FITS headers.
def workflow( graph: BELGraph, node: BaseEntity, key: Optional[str] = None, tag: Optional[str] = None, default_score: Optional[float] = None, runs: Optional[int] = None, minimum_nodes: int = 1, ) -> List[]: subgraph = generate_mechanism(graph, node, key=key) if subgraph.number_of_nodes() <= minimum_nodes: return [] runners = multirun(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs) return list(runners)
Generate candidate mechanisms and run the heat diffusion workflow. :param graph: A BEL graph :param node: The BEL node that is the focus of this analysis :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score' :param default_score: The initial score for all nodes. This number can go up or down. :param runs: The number of times to run the heat diffusion workflow. Defaults to 100. :param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion :return: A list of runners
def AND(*args, **kwargs): for arg in args: try: arg() except CertifierError as e: exc = kwargs.get(, None) if exc is not None: raise exc(e) raise
ALL args must not raise an exception when called incrementally. If an exception is specified, raise it, otherwise raise the callable's exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. :raises CertifierError: The first certifier error if at least one raises a certifier error.
def _assert_input_is_valid(input_value, validators, validated_func, input_name ): for validator in validators: validator.assert_valid(input_name, input_value)
Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before executing the function. It simply delegates to the validator. The signature of this function is hardcoded to correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed. :param input_value: the value to validate :param validator: the Validator object that will be applied on input_value_to_validate :param validated_func: the function for which this validation is performed. This is not used since the Validator knows it already, but we should not change the signature here. :param input_name: the name of the function input that is being validated :return: Nothing
def expired(self): self._data["_killed"] = True self.save() raise SessionExpired(self._config.expired_message)
Called when an expired session is atime
def dict_diff(d1, d2, no_key=): d1keys = set(d1.keys()) d2keys = set(d2.keys()) both = d1keys & d2keys diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]} diff.update({k: (d1[k], no_key) for k in d1keys - both}) diff.update({k: (no_key, d2[k]) for k in d2keys - both}) return diff
Compares two dictionaries Args: d1 (DictUpperBound): First dictionary to compare d2 (DictUpperBound): Second dictionary to compare no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: Dict: Comparison dictionary
def register(self, event, fn): self._handler_dict.setdefault(event, []) if fn not in self._handler_dict[event]: self._handler_dict[event].append(fn)
Registers the given function as a handler to be applied in response to the the given event.
def fit_effective_mass(distances, energies, parabolic=True): if parabolic: fit = np.polyfit(distances, energies, 2) c = 2 * fit[0] else: def f(x, alpha, d): top = np.sqrt(4 * alpha * d * x**2 + 1) - 1 bot = 2 * alpha return top / bot bounds = ((1e-8, -np.inf), (np.inf, np.inf)) popt, _ = curve_fit(f, distances, energies, p0=[1., 1.], bounds=bounds) c = 2 * popt[1] eff_mass = (angstrom_to_bohr**2 / eV_to_hartree) / c return eff_mass
Fit the effective masses using either a parabolic or nonparabolic fit. Args: distances (:obj:`numpy.ndarray`): The x-distances between k-points in reciprocal Angstroms, normalised to the band extrema. energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the eigenvalue of the band extrema. parabolic (:obj:`bool`, optional): Use a parabolic fit of the band edges. If ``False`` then nonparabolic fitting will be attempted. Defaults to ``True``. Returns: float: The effective mass in units of electron rest mass, :math:`m_0`.
def availability(self, dcid, params=None): params = update_params(params, {: dcid}) return self.request(, params, )
/v1/regions/availability GET - public Retrieve a list of the VPSPLANIDs currently available in this location. If your account has special plans available, you will need to pass your api_key in in order to see them. For all other accounts, the API key is not optional. Link: https://www.vultr.com/api/#regions_region_available
def set_residual(self, pores=[], overwrite=False): r Ps = self._parse_indices(pores) if overwrite: self[] = False self[][Ps] = True residual = self[] net = self.project.network conns = net[] rclusters = site_percolation(conns, residual).sites rcluster_ids = np.unique(rclusters[rclusters > -1]) initial_num = len(self.queue)-1 for rcluster_id in rcluster_ids: rPs = rclusters == rcluster_id existing = np.unique(self[][rPs]) existing = existing[existing > -1] if len(existing) > 0: cluster_num = existing[0] else: cluster_num = len(self.queue) self.queue.append([]) queue = self.queue[cluster_num] self[][rPs] = cluster_num Ts = net.find_neighbor_throats(pores=rPs, flatten=True, mode=) self[][Ts] = cluster_num self[][rPs] = 0 self[][Ts] = 0 self[][rPs] = -np.inf self[][Ts] = -np.inf Ts = net.find_neighbor_throats(pores=rPs, flatten=True, mode=) for T in Ts: data = [] data.append(self[][T]) data.append(T) data.append() hq.heappush(queue, data) self.invasion_running = [True]*len(self.queue) for c_num in range(len(self.queue)): if c_num > initial_num: self.invasion_running[c_num] = False
r""" Method to start invasion in a network w. residual saturation. Called after inlets are set. Parameters ---------- pores : array_like The pores locations that are to be filled with invader at the beginning of the simulation. overwrite : boolean If ``True`` then all existing inlet locations will be removed and then the supplied locations will be added. If ``False``, then supplied locations are added to any already existing locations. Notes ----- Currently works for pores only and treats inner throats, i.e. those that connect two pores in the cluster as invaded and outer ones as uninvaded. Uninvaded throats are added to a new residual cluster queue but do not start invading independently if not connected to an inlet. Step 1. Identify clusters in the phase occupancy. Step 2. Look for clusters that are connected or contain an inlet Step 3. For those that are merge into inlet cluster. May be connected to more than one - run should sort this out Step 4. For those that are isolated set the queue to not invading. Step 5. (in run) When isolated cluster is met my invading cluster it merges in and starts invading
def batch_load_docs(db, doc_iterator, on_duplicate="replace"): batch_size = 100 counter = 0 collections = {} docs = {} if on_duplicate not in ["error", "update", "replace", "ignore"]: log.error(f"Bad parameter for on_duplicate: {on_duplicate}") return for (collection_name, doc) in doc_iterator: if collection_name not in collections: collections[collection_name] = db.collection(collection_name) docs[collection_name] = [] counter += 1 docs[collection_name].append(doc) if counter % batch_size == 0: log.info(f"Bulk import arangodb: {counter}") for cname in docs: collections[cname].import_bulk( docs[cname], on_duplicate=on_duplicate, halt_on_error=False ) docs[cname] = [] log.info(f"Bulk import arangodb: {counter}") for cname in docs: collections[cname].import_bulk( docs[cname], on_duplicate=on_duplicate, halt_on_error=False ) docs[cname] = []
Batch load documents Args: db: ArangoDB client database handle doc_iterator: function that yields (collection_name, doc_key, doc) on_duplicate: defaults to replace, but can be error, update, replace or ignore https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
def find_vc_pdir_vswhere(msvc_version): vswhere_path = os.path.join( , , , , ) vswhere_cmd = [vswhere_path, , msvc_version, , ] if os.path.exists(vswhere_path): sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) vsdir, err = sp.communicate() vsdir = vsdir.decode("mbcs") vsdir = vsdir.rstrip() vc_pdir = os.path.join(vsdir, ) return vc_pdir else: return None
Find the MSVC product directory using vswhere.exe . Run it asking for specified version and get MSVS install location :param msvc_version: :return: MSVC install dir
def assign_site_properties(self, slab, height=0.9): if in slab.site_properties.keys(): return slab else: surf_sites = self.find_surface_sites_by_height(slab, height) surf_props = [ if site in surf_sites else for site in slab.sites] return slab.copy( site_properties={: surf_props})
Assigns site properties.
def df(self, src): return self._getStdOutCmd([self._hadoop_cmd, , , self._full_hdfs_path(src)], True)
Perform ``df`` on a path
def get_identifier(self): ident = self.get_name() if self.is_method: args = ",".join(self.get_arg_type_descriptors()) if self.is_bridge(): ident = "%s(%s):%s" % (ident, args, self.get_descriptor()) else: ident = "%s(%s)" % (ident, args) return ident
For methods this is the return type, the name and the (non-pretty) argument descriptor. For fields it is simply the name. The return-type of methods is attached to the identifier when it is a bridge method, which can technically allow two methods with the same name and argument type list, but with different return type.
def MultiWritePathHistory(self, client_path_histories): for client_path, client_path_history in iteritems(client_path_histories): if client_path.client_id not in self.metadatas: raise db.UnknownClientError(client_path.client_id) path_info = rdf_objects.PathInfo( path_type=client_path.path_type, components=client_path.components) for timestamp, stat_entry in iteritems(client_path_history.stat_entries): path_record = self._GetPathRecord( client_path.client_id, path_info, set_default=False) if path_record is None: raise db.AtLeastOneUnknownPathError([]) path_record.AddStatEntry(stat_entry, timestamp) for timestamp, hash_entry in iteritems(client_path_history.hash_entries): path_record = self._GetPathRecord( client_path.client_id, path_info, set_default=False) if path_record is None: raise db.AtLeastOneUnknownPathError([]) path_record.AddHashEntry(hash_entry, timestamp)
Writes a collection of hash and stat entries observed for given paths.
def clear(self): self.erase() output = self.output output.erase_screen() output.cursor_goto(0, 0) output.flush() self.request_absolute_cursor_position()
Clear screen and go to 0,0
def add_virtualip(self, lb, vip): resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id, body=vip.to_dict()) return resp, body
Adds the VirtualIP to the specified load balancer.
def get_argument_starttime(self): try: starttime = self.get_argument(constants.PARAM_STARTTIME) return starttime except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
Helper function to get starttime argument. Raises exception if argument is missing. Returns the starttime argument.
def set_attributes_all(target, attributes, discard_others=True): attrs = target.attrs existing = dict(attrs.items()) if sys.hexversion >= 0x03000000: str_arr_dtype = h5py.special_dtype(vlen=str) else: str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode) for k, (kind, value) in attributes.items(): if kind == : attrs.create(k, [convert_to_str(s) for s in value], dtype=str_arr_dtype) else: if kind == : value = np.bytes_(value) if k not in existing: attrs.create(k, value) else: try: if value.dtype == existing[k].dtype \ and value.shape == existing[k].shape: attrs.modify(k, value) except: attrs.create(k, value) if discard_others: for k in set(existing) - set(attributes): del attrs[k]
Set Attributes in bulk and optionally discard others. Sets each Attribute in turn (modifying it in place if possible if it is already present) and optionally discarding all other Attributes not explicitly set. This function yields much greater performance than the required individual calls to ``set_attribute``, ``set_attribute_string``, ``set_attribute_string_array`` and ``del_attribute`` put together. .. versionadded:: 0.2 Parameters ---------- target : Dataset or Group Dataset or Group to set the Attributes of. attributes : dict The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. discard_others : bool, optional Whether to discard all other Attributes not explicitly set (default) or not. See Also -------- set_attribute set_attribute_string set_attribute_string_array
def on_linkType_changed(self, evt): if self.current_idx < 0: evt.Skip() return n = self.linkType.GetSelection() lt_str = self.linkType.GetString(n) lt = self.link_code[lt_str] self.prep_link_details(lt) lnk = self.page_links[self.current_idx] lnk["update"] = True lnk["kind"] = lt self.enable_update() if lt == fitz.LINK_GOTO: if not self.toPage.Value.isdecimal(): self.toPage.ChangeValue("1") self.toPage.Enable() if not self.toLeft.Value.isdecimal(): self.toLeft.ChangeValue("0") self.toLeft.Enable() if not self.toHeight.Value.isdecimal(): self.toHeight.ChangeValue("0") self.toHeight.Enable() lnk["page"] = int(self.toPage.Value) - 1 lnk["to"] = fitz.Point(int(self.toLeft.Value), int(self.toHeight.Value)) elif lt == fitz.LINK_GOTOR: if not self.toFile.Value: self.toFile.SetValue(self.text_in_rect()) self.toFile.MarkDirty() if not self.toPage.Value.isdecimal(): self.toPage.ChangeValue("1") if not self.toLeft.Value.isdecimal(): self.toLeft.ChangeValue("0") if not self.toHeight.Value.isdecimal(): self.toHeight.ChangeValue("0") self.toLeft.Enable() self.toPage.Enable() self.toFile.Enable() self.toHeight.Enable() lnk["file"] = self.toFile.Value lnk["page"] = int(self.toPage.Value) - 1 lnk["to"] = fitz.Point(int(self.toLeft.Value), int(self.toHeight.Value)) elif lt == fitz.LINK_URI: if not self.toURI.Value: self.toURI.SetValue(self.text_in_rect()) self.toURI.MarkDirty() lnk["uri"] = self.toURI.Value self.toURI.Enable() elif lt == fitz.LINK_LAUNCH: if not self.toFile.Value: self.toFile.SetValue(self.text_in_rect()) self.toFile.MarkDirty() lnk["file"] = self.toFile.Value self.toFile.Enable() elif lt == fitz.LINK_NAMED: self.toName.SetSelection(0) self.toName.Enable() self.page_links[self.current_idx] = lnk evt.Skip() return
User changed link kind, so prepare available fields.
def _parsed_callback_wrapper(self, callback_parsed, callback_plain, foc, data): if foc == R_FEED: point_ref = data[] else: point_ref = Control(self, data[P_ENTITY_LID], data[P_LID], * 32) try: data[] = self._get_point_data_handler_for(point_ref).get_template(data=data[P_DATA]) except RefreshException: if callback_plain: callback_plain(data) except: logger.warning(, foc_to_str(foc), point_ref, if callback_plain else , exc_info=DEBUG_ENABLED) if callback_plain: callback_plain(data) else: callback_parsed(data)
Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an object.
def run_scan_command( self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand ) -> PluginScanResult: plugin_class = self._plugins_repository.get_plugin_class_for_command(scan_command) plugin = plugin_class() return plugin.process_task(server_info, scan_command)
Run a single scan command against a server; will block until the scan command has been completed. Args: server_info: The server's connectivity information. The test_connectivity_to_server() method must have been called first to ensure that the server is online and accessible. scan_command: The scan command to run against this server. Returns: The result of the scan command, which will be an instance of the scan command's corresponding PluginScanResult subclass.
def get_gmm_pdf(self, x): def my_norm_pdf(xt, mu, sigma): z = (xt - mu) / sigma return (math.exp(-0.5 * z * z) / (math.sqrt(2. * np.pi) * sigma)) y = 0 if (x < self.min_limit): return 0 if (x > self.max_limit): return 0 for _x in range(self.points.size): y += (my_norm_pdf(x, self.points[_x], self.sigma[_x]) * self.weights[_x]) / self.W_sum return y
Calculate the GMM likelihood for a single point. .. math:: y = \\sum_{i=1}^{N} w_i \\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i :label: gmm-likelihood Arguments --------- x : float Point at which likelihood needs to be computed Returns ------- float Likelihood value at x
def module_remove(name): ret = {: name, : True, : , : {}} modules = __salt__[]() if name not in modules: ret[] = .format(name) ret[] = False return ret if __salt__[](name): ret[] = .format(name) return ret ret[] = False ret[] = .format(name) return ret
Removes SELinux module name The name of the module to remove .. versionadded:: 2016.11.6
def open(self, fp, mode=): self._file = open(fp, mode=mode) return self._file
Open the NMEAFile.
def _load_data( self, resource, detail_resource=None, resource_id=None, querystring=None, traverse_pagination=False, default=DEFAULT_VALUE_SAFEGUARD, ): default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {} querystring = querystring if querystring else {} cache_key = utils.get_cache_key( resource=resource, querystring=querystring, traverse_pagination=traverse_pagination, resource_id=resource_id ) response = cache.get(cache_key) if not response: endpoint = getattr(self.client, resource)(resource_id) endpoint = getattr(endpoint, detail_resource) if detail_resource else endpoint response = endpoint.get(**querystring) if traverse_pagination: results = utils.traverse_pagination(response, endpoint) response = { : len(results), : , : , : results, } if response: cache.set(cache_key, response, settings.ENTERPRISE_API_CACHE_TIMEOUT) return response or default_val
Loads a response from a call to one of the Enterprise endpoints. :param resource: The endpoint resource name. :param detail_resource: The sub-resource to append to the path. :param resource_id: The resource ID for the specific detail to get from the endpoint. :param querystring: Optional query string parameters. :param traverse_pagination: Whether to traverse pagination or return paginated response. :param default: The default value to return in case of no response content. :return: Data returned by the API.
def format_search(q, **kwargs): m = search(q, **kwargs) count = m[] if not count: raise DapiCommError() return for mdap in m[]: mdap = mdap[] return _format_dap_with_description(mdap)
Formats the results of a search
def get_asset_lookup_session(self, proxy, *args, **kwargs): if not self.supports_asset_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise proxy = self._convert_proxy(proxy) try: session = sessions.AssetLookupSession(proxy=proxy, runtime=self._runtime, **kwargs) except AttributeError: raise return session
Gets the OsidSession associated with the asset lookup service. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetLookupSession) - the new AssetLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_lookup() is false compliance: optional - This method must be implemented if supports_asset_lookup() is true.
def list_events_view(request): page_name = "Upcoming Events" profile = UserProfile.objects.get(user=request.user) event_form = EventForm( request.POST if in request.POST else None, profile=profile, ) if event_form.is_valid(): event_form.save() return HttpResponseRedirect(reverse()) }, context_instance=RequestContext(request))
A list view of upcoming events.
def get_cache_key(bucket, name, args, kwargs): u = .join(map(str, (bucket, name, args, kwargs))) return % sha_constructor(u).hexdigest()
Gets a unique SHA1 cache key for any call to a native tag. Use args and kwargs in hash so that the same arguments use the same key
def mul(value, arg): try: return valid_numeric(value) * valid_numeric(arg) except (ValueError, TypeError): try: return value * arg except Exception: return
Multiply the arg with the value.
def list_build_configurations_for_product(id=None, name=None, page_size=200, page_index=0, sort="", q=""): data = list_build_configurations_for_product_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all BuildConfigurations associated with the given Product.
def smudge(newtype, target): db = smudge_db.get() magic_bytes = db[newtype][] magic_offset = db[newtype][] _backup_bytes(target, magic_offset, len(magic_bytes)) _smudge_bytes(target, magic_offset, magic_bytes)
Smudge magic bytes with a known type
def tipbod(ref, body, et): ref = stypes.stringToCharP(ref) body = ctypes.c_int(body) et = ctypes.c_double(et) retmatrix = stypes.emptyDoubleMatrix() libspice.tipbod_c(ref, body, et, retmatrix) return stypes.cMatrixToNumpy(retmatrix)
Return a 3x3 matrix that transforms positions in inertial coordinates to positions in body-equator-and-prime-meridian coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tipbod_c.html :param ref: ID of inertial reference frame to transform from. :type ref: str :param body: ID code of body. :type body: int :param et: Epoch of transformation. :type et: float :return: Transformation (position), inertial to prime meridian. :rtype: 3x3-Element Array of floats
def extraction(event_collection, timeframe=None, timezone=None, filters=None, latest=None, email=None, property_names=None): _initialize_client_from_environment() return _client.extraction(event_collection=event_collection, timeframe=timeframe, timezone=timezone, filters=filters, latest=latest, email=email, property_names=property_names)
Performs a data extraction Returns either a JSON object of events or a response indicating an email will be sent to you with data. :param event_collection: string, the name of the collection to query :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param latest: int, the number of most recent records you'd like to return :param email: string, optional string containing an email address to email results to :param property_names: string or list of strings, used to limit the properties returned
def invert(self, copy=False): if copy: return Striplog([i.invert(copy=True) for i in self]) else: for i in self: i.invert() self.__sort() o = self.order self.order = {: , : }[o] return
Inverts the striplog, changing its order and the order of its contents. Operates in place by default. Args: copy (bool): Whether to operate in place or make a copy. Returns: None if operating in-place, or an inverted copy of the striplog if not.
def _check_point(self, lat, lng): if abs(lat) > 90 or abs(lng) > 180: msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng) raise IllegalPointException(msg)
Checks if latitude and longitude correct
def send(self, message): provider_name = self._default_provider if message.provider is not None: assert message.provider in self._providers, \ .format(provider_name) provider = self.get_provider(message.provider) else: if message.routing_values is not None: provider_name = self.router(message, *message.routing_values) or self._default_provider assert provider_name in self._providers, \ .format(provider_name) provider = self.get_provider(provider_name) message.provider = provider.name message = provider.send(message) self.onSend(message) return message
Send a message object :type message: data.OutgoingMessage :param message: The message to send :rtype: data.OutgoingMessage :returns: The sent message with populated fields :raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage) :raises MessageSendError: generic errors :raises AuthError: provider authentication failed :raises LimitsError: sending limits exceeded :raises CreditError: not enough money on the account
def _update_mean_in_window(self): self._mean_x_in_window = numpy.mean(self._x_in_window) self._mean_y_in_window = numpy.mean(self._y_in_window)
Compute mean in window the slow way. useful for first step. Considers all values in window See Also -------- _add_observation_to_means : fast update of mean for single observation addition _remove_observation_from_means : fast update of mean for single observation removal
def add_listener(self, listener_type, callback): self.listener_type= listener_type if listener_type == : self.listener_callback_source_hover= callback elif listener_type == : self.listener_callback_source_click= callback elif listener_type == : self.listener_callback_click= callback elif listener_type == : self.listener_callback_select= callback self.listener_flag= not self.listener_flag
add a listener to the widget Args: listener_type: string that can either be 'objectHovered' or 'objClicked' callback: python function
def get_mopheader(expnum, ccd, version=, prefix=None): prefix = prefix is None and "" or prefix mopheader_uri = dbimages_uri(expnum=expnum, ccd=ccd, version=version, prefix=prefix, ext=) if mopheader_uri in mopheaders: return mopheaders[mopheader_uri] filename = os.path.basename(mopheader_uri) if os.access(filename, os.F_OK): logger.debug("File already on disk: {}".format(filename)) mopheader_fpt = StringIO(open(filename, ).read()) else: mopheader_fpt = StringIO(open_vos_or_local(mopheader_uri).read()) with warnings.catch_warnings(): warnings.simplefilter(, AstropyUserWarning) mopheader = fits.open(mopheader_fpt) header = mopheader[0].header try: header[] = get_fwhm(expnum, ccd) except IOError: header[] = 10 header[] = mopheader[0].header[] header[] = header[] header[] = header[] header[] = header[] header[] = str(Time(header[], format=, scale=, precision=5).replicate(format=)) header[] = MAXCOUNT mopheaders[mopheader_uri] = header mopheader.close() return mopheaders[mopheader_uri]
Retrieve the mopheader, either from cache or from vospace @param expnum: @param ccd: @param version: @param prefix: @return: Header
def register_lazy_provider_method(self, cls, method): if not in getattr(method, , {}): raise DiayException( % method) @functools.wraps(method) def wrapper(*args, **kwargs): return getattr(self.get(cls), method.__name__)(*args, **kwargs) self.factories[method.__di__[]] = wrapper
Register a class method lazily as a provider.
def roll_alpha_beta(returns, factor_returns, window=10, **kwargs): returns, factor_returns = _aligned_series(returns, factor_returns) return roll_alpha_beta_aligned( returns, factor_returns, window=window, **kwargs )
Computes alpha and beta over a rolling window. Parameters ---------- lhs : array-like The first array to pass to the rolling alpha-beta. rhs : array-like The second array to pass to the rolling alpha-beta. window : int Size of the rolling window in terms of the periodicity of the data. out : array-like, optional Array to use as output buffer. If not passed, a new array will be created. **kwargs Forwarded to :func:`~empyrical.alpha_beta`.
def getApplicationKeyByProcessId(self, unProcessId, pchAppKeyBuffer, unAppKeyBufferLen): fn = self.function_table.getApplicationKeyByProcessId result = fn(unProcessId, pchAppKeyBuffer, unAppKeyBufferLen) return result
Returns the key of the application for the specified Process Id. The buffer should be at least k_unMaxApplicationKeyLength in order to fit the key.
def _Descriptor_from_json(self, obj): descs = getattr(self, "_all_descriptors", None) if descs is None: from mordred import descriptors descs = { cls.__name__: cls for cls in get_descriptors_in_module(descriptors) } descs[ConstDescriptor.__name__] = ConstDescriptor self._all_descriptors = descs return _from_json(obj, descs)
Create Descriptor instance from json dict. Parameters: obj(dict): descriptor dict Returns: Descriptor: descriptor
def altersingle(self, alpha, i, b, g, r): n = self.network[i] n[0] -= (alpha * (n[0] - b)) n[1] -= (alpha * (n[1] - g)) n[2] -= (alpha * (n[2] - r))
Move neuron i towards biased (b,g,r) by factor alpha
def _launch_editor(starting_text=): "Launch editor, let user write text, then return that text." editor = os.environ.get(, ) with tempfile.TemporaryDirectory() as dirname: filename = pathlib.Path(dirname) / with filename.open(mode=) as handle: handle.write(starting_text) subprocess.call([editor, filename]) with filename.open(mode=) as handle: text = handle.read() return text
Launch editor, let user write text, then return that text.
def get_provider(self, provider_name=): try: if self._providers is None: self._providers = self._initialize_providers() return self._providers[provider_name] except KeyError: raise AssertionError(f)
Fetch provider with the name specified in Configuration file
def _GetWinevtRcDatabaseReader(self): if not self._winevt_database_reader and self._data_location: database_path = os.path.join( self._data_location, self._WINEVT_RC_DATABASE) if not os.path.isfile(database_path): return None self._winevt_database_reader = ( winevt_rc.WinevtResourcesSqlite3DatabaseReader()) if not self._winevt_database_reader.Open(database_path): self._winevt_database_reader = None return self._winevt_database_reader
Opens the Windows Event Log resource database reader. Returns: WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource database reader or None.
def MDL(N, rho, k): r from numpy import log mdl = N* log(rho) + k * log(N) return mdl
r"""Minimum Description Length .. math:: MDL(k) = N log \rho_k + p \log N :validation: results
def data_format_value(self): try: if self._part: value = self._part.data_format else: value = self._buffer.pixel_format except InvalidParameterException: value = self._node_map.PixelFormat.value return value
:return: The data type of the data component as integer value.
def summary(raster, geometry=None, all_touched=False, mean_only=False, bounds=None, exclude_nodata_value=True): def no_result(mean_only): if mean_only: return None else: return Summary(None, None, None, None, None, None) try: if geometry: if not isinstance(geometry, dict): geojson = mapping(geometry) else: geojson = geometry geometry = shape(geometry) result, _ = mask( raster, [geojson], crop=True, all_touched=all_touched, ) pixels = result.data.flatten() else: pixels = raster.read(1).flatten() except ValueError: return no_result(mean_only) raster_shape = raster_to_shape(raster) if not raster_shape.contains(geometry): log.warning( .format( geometry, ) ) if bounds: score_mask = numpy.logical_and( numpy.greater_equal(pixels, bounds[0]), numpy.less_equal(pixels, bounds[1]), ) else: score_mask = numpy.not_equal(pixels, raster.nodata), scored_pixels = numpy.extract(score_mask, pixels) if len(scored_pixels): if mean_only: return scored_pixels.mean() else: if exclude_nodata_value: count = len(scored_pixels) else: count = len(pixels) return Summary( count, scored_pixels.sum(), scored_pixels.mean(), scored_pixels.min(), scored_pixels.max(), scored_pixels.std(), ) else: return no_result(mean_only)
Return ``ST_SummaryStats`` style stats for the given raster. If ``geometry`` is provided, we mask the raster with the given geometry and return the stats for the intersection. The parameter can be a GeoJSON-like object, a WKT string, or a Shapely geometry. If ``all_touched`` is set, we include every pixel that is touched by the given geometry. If set to ``False``, we only include pixels that are "mostly" inside the given geometry (the calculation is done by Rasterio). If ``mean_only`` is ``True`` we only return the mean value of the pixels, not the full set of stats. If ``bounds`` is passed, it should be a two-tuple of (min, max) to use for filtering raster pixels. If not provided, we exclude anything equal to the raster no data value. If ``mean_only`` is ``False``, we return a ``namedtuple`` representing the stats. All other attributes should be obvious and are consistent with PostGIS (``min``, ``max``, ``std``, etc). If ``mean_only`` is ``True``, we simply return a ``float`` or ``None`` representing the mean value of the matching pixels. The ``exclude_nodata_value`` is consistent with ``ST_SummaryStats`` in that if it's ``True`` (default) we only count non-nodata pixels (or those pixels within ``bounds`` if defined). If it's ``False`` we return the count of all pixels.
def on_equalarea_specimen_select(self, event): if not self.specimen_EA_xdata or not self.specimen_EA_ydata: return pos = event.GetPosition() width, height = self.canvas2.get_width_height() pos[1] = height - pos[1] xpick_data, ypick_data = pos xdata_org = self.specimen_EA_xdata ydata_org = self.specimen_EA_ydata data_corrected = self.specimen_eqarea.transData.transform( vstack([xdata_org, ydata_org]).T) xdata, ydata = data_corrected.T xdata = list(map(float, xdata)) ydata = list(map(float, ydata)) e = 4e0 index = None for i, (x, y) in enumerate(zip(xdata, ydata)): if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e: index = i break if index != None: self.fit_box.SetSelection(index) self.draw_figure(self.s, True) self.on_select_fit(event)
Get mouse position on double click find the nearest interpretation to the mouse position then select that interpretation Parameters ---------- event : the wx Mouseevent for that click Alters ------ current_fit
def ordc(item, inset): assert isinstance(inset, stypes.SpiceCell) assert inset.is_char() assert isinstance(item, str) item = stypes.stringToCharP(item) return libspice.ordc_c(item, ctypes.byref(inset))
The function returns the ordinal position of any given item in a character set. If the item does not appear in the set, the function returns -1. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ordc_c.html :param item: An item to locate within a set. :type item: str :param inset: A set to search for a given item. :type inset: SpiceCharCell :return: the ordinal position of item within the set :rtype: int
def separate_comma_imports(partitions): def _inner(): for partition in partitions: if partition.code_type is CodeType.IMPORT: import_obj = import_obj_from_str(partition.src) if import_obj.has_multiple_imports: for new_import_obj in import_obj.split_imports(): yield CodePartition( CodeType.IMPORT, new_import_obj.to_text(), ) else: yield partition else: yield partition return list(_inner())
Turns `import a, b` into `import a` and `import b`
def crossing(b, component, time, dynamics_method=, ltte=True, tol=1e-4, maxiter=1000): def projected_separation_sq(time, b, dynamics_method, cind1, cind2, ltte=True): times = np.array([time]) if dynamics_method in [, ]: ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle(b, times, compute=None, ltte=ltte) elif dynamics_method==: ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle_bs(b, times, compute, ltte=ltte) elif dynamics_method==: ts, xs, ys, zs, vxs, vys, vzs = dynamics.keplerian.dynamics_from_bundle(b, times, compute=None, ltte=ltte, return_euler=False) else: raise NotImplementedError return (xs[cind2][0]-xs[cind1][0])**2 + (ys[cind2][0]-ys[cind1][0])**2 starrefs = b.hierarchy.get_stars() cind1 = starrefs.index(component) cind2 = starrefs.index(b.hierarchy.get_sibling_of(component)) return newton(projected_separation_sq, x0=time, args=(b, dynamics_method, cind1, cind2, ltte), tol=tol, maxiter=maxiter)
tol in days
def _size_from_header(cls, header): result = [] for data in header: result.append(header[data]) return result
Get the size of each columns from the header. :param header: The header template we have to get the size from. :type header: dict :return: The maximal size of the each data to print. :rtype: list
def generic_path_not_found(*args): exception_tuple = LambdaErrorResponses.PathNotFoundException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body( LambdaErrorResponses.LOCAL_SERVICE_ERROR, "PathNotFoundException"), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
Creates a Lambda Service Generic PathNotFound Response Parameters ---------- args list List of arguments Flask passes to the method Returns ------- Flask.Response A response object representing the GenericPathNotFound Error
def wait_for_build(self, interval=5, path=None): path = path or start = time.time() next_log = 0 while True: response = self.get(path)[] if not response: raise ValueError() if response[]: if response[]: return response else: raise LuminosoError(response) elapsed = time.time() - start if elapsed > next_log: logger.info(, next_log) next_log += 120 time.sleep(interval)
A convenience method designed to inform you when a project build has completed. It polls the API every `interval` seconds until there is not a build running. At that point, it returns the "last_build_info" field of the project record if the build succeeded, and raises a LuminosoError with the field as its message if the build failed. If a `path` is not specified, this method will assume that its URL is the URL for the project. Otherwise, it will use the specified path (which should be "/projects/<project_id>/").
def delete(args): jm = setup(args) if not args.local and in args.status: stop(args) jm.delete(job_ids=get_ids(args.job_ids), array_ids=get_ids(args.array_ids), delete_logs=not args.keep_logs, delete_log_dir=not args.keep_log_dir, status=args.status)
Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped.
def update_state_machine_tab_label(self, state_machine_m): sm_id = state_machine_m.state_machine.state_machine_id if sm_id in self.tabs: sm = state_machine_m.state_machine if not self.tabs[sm_id][] == sm.marked_dirty or \ not self.tabs[sm_id][] == sm.file_system_path or \ not self.tabs[sm_id][] == sm.root_state.name: label = self.view["notebook"].get_tab_label(self.tabs[sm_id]["page"]).get_child().get_children()[0] set_tab_label_texts(label, state_machine_m, unsaved_changes=sm.marked_dirty) self.tabs[sm_id][] = sm.file_system_path self.tabs[sm_id][] = sm.marked_dirty self.tabs[sm_id][] = sm.root_state.name else: logger.warning("State machine tab label can not be updated there is no tab.".format(sm_id))
Updates tab label if needed because system path, root state name or marked_dirty flag changed :param StateMachineModel state_machine_m: State machine model that has changed :return:
def get_paths_for_attribute_set(self, keys): if not isinstance(keys, (list, set)): keys = [keys] has_all_keys = lambda name, structure: \ all(map(lambda k: k in structure, keys)) return self.find_path(has_all_keys, on_targets=True)
Given a list/set of keys (or one key), returns the parts that have all of the keys in the list. Because on_targets=True, this DOES NOT WORK WITH TOP LEVEL PROPERTIES, only those of targets. These paths are not pointers to the objects themselves, but tuples of attribute names that allow us to (attempt) to look up that object in any belief state.
def in_transaction(self): self._in_transaction = self._in_transaction and self.is_connected return self._in_transaction
:return: True if there is an open transaction.
def indexes_all(ol,value): aaaa length = ol.__len__() indexes =[] for i in range(0,length): if(value == ol[i]): indexes.append(i) else: pass return(indexes)
from elist.elist import * ol = [1,'a',3,'a',4,'a',5] indexes_all(ol,'a')
def app(environ, start_response): r = HttpRequestHandler(environ, start_response, Router).dispatch() return r
Function called by the WSGI server.
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=None): if value.tzinfo is None: value = default_tz.localize(value) else: value = value.astimezone(default_tz) return super(LocalizedDateTimeField, self).get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared)
Returns field's value prepared for database lookup.
def guggenheim_katayama(target, K2, n, temperature=, critical_temperature=, critical_pressure=): r T = target[temperature] Pc = target[critical_pressure] Tc = target[critical_temperature] sigma_o = K2*Tc**(1/3)*Pc**(2/3) value = sigma_o*(1-T/Tc)**n return value
r""" Missing description Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. K2 : scalar Fluid specific constant n : scalar Fluid specific constant temperature : string The dictionary key containing the temperature values (K) critical_temperature : string The dictionary key containing the critical temperature values (K) critical_pressure : string The dictionary key containing the critical pressure values (K)
def watch_docs(c): www_c = Context(config=c.config.clone()) www_c.update(**www.configuration()) www_handler = make_handler( ctx=www_c, task_=www["build"], regexes=[r"\./README.rst", r"\./sites/www"], ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"], ) docs_c = Context(config=c.config.clone()) docs_c.update(**docs.configuration()) regexes = [r"\./sites/docs"] package = c.get("packaging", {}).get("package", None) if package is None: package = c.get("tests", {}).get("package", None) if package: regexes.append(r"\./{}/".format(package)) api_handler = make_handler( ctx=docs_c, task_=docs["build"], regexes=regexes, ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"], ) observe(www_handler, api_handler)
Watch both doc trees & rebuild them if files change. This includes e.g. rebuilding the API docs if the source code changes; rebuilding the WWW docs if the README changes; etc. Reuses the configuration values ``packaging.package`` or ``tests.package`` (the former winning over the latter if both defined) when determining which source directory to scan for API doc updates.
def get_language_settings(language_code, site_id=None): if site_id is None: site_id = getattr(settings, , None) for lang_dict in FLUENT_BLOGS_LANGUAGES.get(site_id, ()): if lang_dict[] == language_code: return lang_dict return FLUENT_BLOGS_LANGUAGES[]
Return the language settings for the current site
def _GetFlagValues(self, flags): event_types = [] for event_flag, description in self._FLAG_VALUES.items(): if event_flag & flags: event_types.append(description) return .join(event_types)
Determines which events are indicated by a set of fsevents flags. Args: flags (int): fsevents record flags. Returns: str: a comma separated string containing descriptions of the flag values stored in an fsevents record.
def insertLayer(self, layer, name=None): if name is None: name = layer.name name = normalizers.normalizeLayerName(name) if name in self: self.removeLayer(name) return self._insertLayer(layer, name=name)
Insert **layer** into the font. :: >>> layer = font.insertLayer(otherLayer, name="layer 2") This will not insert the layer directly. Rather, a new layer will be created and the data from **layer** will be copied to to the new layer. **name** indicates the name that should be assigned to the layer after insertion. If **name** is not given, the layer's original name must be used. If the layer does not have a name, an error must be raised. The data that will be inserted from **layer** is the same data as documented in :meth:`BaseLayer.copy`.
def editcomponent(self, data): data = data.copy() self._component_data_convert(data, update=True) return self._proxy.Component.update(data)
A method to edit a component in Bugzilla. Takes a dict, with mandatory elements of product. component, and initialowner. All other elements are optional and use the same names as the addcomponent() method.
def add_group(self, name, devices): device = self.add_device(name, "group") device.add_to_group(devices) return device
Add a new device group. :return: a :class:`DeviceGroup` instance.
def packvalue(value, *properties): def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], value) return func
Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path.