code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def exists_using_casper(self, filename): casper_results = casper.Casper(self.connection["jss"]) distribution_servers = casper_results.find("distributionservers") all_packages = [] for distribution_server in distribution_servers: packages = set() for package in distribution_server.findall("packages/package"): packages.add(os.path.basename(package.find("fileURL").text)) all_packages.append(packages) base_set = all_packages.pop() for packages in all_packages: base_set = base_set.intersection(packages) return filename in base_set
Check for the existence of a package file. Unlike other DistributionPoint types, JDS and CDP types have no documented interface for checking whether the server and its children have a complete copy of a file. The best we can do is check for an object using the API /packages URL--JSS.Package() or /scripts and look for matches on the filename. If this is not enough, this method uses the results of the casper.jxml page to determine if a package exists. This is an undocumented feature and as such should probably not be relied upon. Please note, scripts are not listed per-distributionserver like packages. For scripts, the best you can do is use the regular exists method. It will test for whether the file exists on ALL configured distribution servers. This may register False if the JDS is busy syncing them.
def bundle_changed(self, event): kind = event.get_kind() bundle = event.get_bundle() if kind == BundleEvent.STOPPING_PRECLEAN: self._unregister_bundle_factories(bundle) elif kind == BundleEvent.STARTED: self._register_bundle_factories(bundle) elif kind == BundleEvent.UPDATE_BEGIN: self._autorestart_store_components(bundle) elif kind == BundleEvent.UPDATED: self._autorestart_components(bundle) self._autorestart_clear_components(bundle) elif kind == BundleEvent.UPDATE_FAILED: self._autorestart_clear_components(bundle)
A bundle event has been triggered :param event: The bundle event
def timestr_to_seconds( x: Union[dt.date, str], *, inverse: bool = False, mod24: bool = False ) -> int: if not inverse: try: hours, mins, seconds = x.split(":") result = int(hours) * 3600 + int(mins) * 60 + int(seconds) if mod24: result %= 24 * 3600 except: result = np.nan else: try: seconds = int(x) if mod24: seconds %= 24 * 3600 hours, remainder = divmod(seconds, 3600) mins, secs = divmod(remainder, 60) result = f"{hours:02d}:{mins:02d}:{secs:02d}" except: result = np.nan return result
Given an HH:MM:SS time string ``x``, return the number of seconds past midnight that it represents. In keeping with GTFS standards, the hours entry may be greater than 23. If ``mod24``, then return the number of seconds modulo ``24*3600``. If ``inverse``, then do the inverse operation. In this case, if ``mod24`` also, then first take the number of seconds modulo ``24*3600``.
def build_job_configs(self, args): job_configs = {} components = Component.build_from_yamlfile(args[]) NAME_FACTORY.update_base_dict(args[]) if self._comp_dict is None or self._comp_dict_file != args[]: self._comp_dict_file = args[] self._comp_dict = make_catalog_comp_dict(sources=self._comp_dict_file, basedir=NAME_FACTORY.base_dict[]) else: print ("Using cached catalog dict from %s" % args[]) catalog_info_dict = self._comp_dict[] comp_info_dict = self._comp_dict[] n_src_per_job = args[] if args[]: SrcmapsCatalog_SG._make_xml_files(catalog_info_dict, comp_info_dict) for catalog_name, catalog_info in catalog_info_dict.items(): n_cat_src = len(catalog_info.catalog.table) n_job = int(math.ceil(float(n_cat_src) / n_src_per_job)) for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key() name_keys = dict(zcut=zcut, sourcekey=catalog_name, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=, fullpath=True) for i_job in range(n_job): full_key = "%s_%02i" % (key, i_job) srcmin = i_job * n_src_per_job srcmax = min(srcmin + n_src_per_job, n_cat_src) outfile = NAME_FACTORY.srcmaps( **name_keys).replace(, "_%02i.fits" % (i_job)) logfile = make_nfs_path(outfile.replace(, )) job_configs[full_key] = dict(cmap=NAME_FACTORY.ccube(**name_keys), expcube=NAME_FACTORY.ltcube(**name_keys), irfs=NAME_FACTORY.irfs(**name_keys), bexpmap=NAME_FACTORY.bexpcube(**name_keys), outfile=outfile, logfile=logfile, srcmdl=catalog_info.srcmdl_name, evtype=comp.evtype, srcmin=srcmin, srcmax=srcmax) return job_configs
Hook to build job configurations
def take(list_, index_list): try: return [list_[index] for index in index_list] except TypeError: return list_[index_list]
Selects a subset of a list based on a list of indices. This is similar to np.take, but pure python. Args: list_ (list): some indexable object index_list (list, slice, int): some indexing object Returns: list or scalar: subset of the list CommandLine: python -m utool.util_list --test-take SeeAlso: ut.dict_take ut.dict_subset ut.none_take ut.compress Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index_list = [2, 0] >>> result = take(list_, index_list) >>> print(result) [2, 0] Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = 2 >>> result = take(list_, index) >>> print(result) 2 Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [0, 1, 2, 3] >>> index = slice(1, None, 2) >>> result = take(list_, index) >>> print(result) [1, 3]
def validate(cls, mapper_spec): super(RawDatastoreInputReader, cls).validate(mapper_spec) params = _get_params(mapper_spec) entity_kind = params[cls.ENTITY_KIND_PARAM] if "." in entity_kind: logging.warning( ". detected in entity kind %s specified for reader %s." "Assuming entity kind contains the dot.", entity_kind, cls.__name__) if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] for f in filters: if f[1] != "=": raise BadReaderParamsError( "Only equality filters are supported: %s", f)
Inherit docs.
def get_input(input_func, input_str): val = input_func("Please enter your {0}: ".format(input_str)) while not val or not len(val.strip()): val = input_func("You didn't enter a valid {0}, please try again: ".format(input_str)) return val
Get input from the user given an input function and an input string
def attowiki_distro_path(): attowiki_path = os.path.abspath(__file__) if attowiki_path[-1] != : attowiki_path = attowiki_path[:attowiki_path.rfind()] else: attowiki_path = attowiki_path[:attowiki_path[:-1].rfind()] return attowiki_path
return the absolute complete path where attowiki is located .. todo:: use pkg_resources ?
def _is_utf_8(txt): assert isinstance(txt, six.binary_type) try: _ = six.text_type(txt, ) except (TypeError, UnicodeEncodeError): return False else: return True
Check a string is utf-8 encoded :param bytes txt: utf-8 string :return: Whether the string\ is utf-8 encoded or not :rtype: bool
def reply_webapi(self, text, attachments=None, as_user=True, in_thread=None): if in_thread is None: in_thread = in self.body if in_thread: self.send_webapi(text, attachments=attachments, as_user=as_user, thread_ts=self.thread_ts) else: text = self.gen_reply(text) self.send_webapi(text, attachments=attachments, as_user=as_user)
Send a reply to the sender using Web API (This function supports formatted message when using a bot integration) If the message was send in a thread, answer in a thread per default.
def _dump_cml_molecule(f, molecule): extra = getattr(molecule, "extra", {}) attr_str = " ".join("%s=" % (key, value) for key, value in extra.items()) f.write(" <molecule id= %s>\n" % (molecule.title, attr_str)) f.write(" <atomArray>\n") atoms_extra = getattr(molecule, "atoms_extra", {}) for counter, number, coordinate in zip(range(molecule.size), molecule.numbers, molecule.coordinates/angstrom): atom_extra = atoms_extra.get(counter, {}) attr_str = " ".join("%s=" % (key, value) for key, value in atom_extra.items()) f.write(" <atom id= elementType= x3= y3= z3= %s />\n" % ( counter, periodic[number].symbol, coordinate[0], coordinate[1], coordinate[2], attr_str, )) f.write(" </atomArray>\n") if molecule.graph is not None: bonds_extra = getattr(molecule, "bonds_extra", {}) f.write(" <bondArray>\n") for edge in molecule.graph.edges: bond_extra = bonds_extra.get(edge, {}) attr_str = " ".join("%s=" % (key, value) for key, value in bond_extra.items()) i1, i2 = edge f.write(" <bond atomRefs2= %s />\n" % (i1, i2, attr_str)) f.write(" </bondArray>\n") f.write(" </molecule>\n")
Dump a single molecule to a CML file Arguments: | ``f`` -- a file-like object | ``molecule`` -- a Molecule instance
def appt_exists(self, complex: str, house: str, appt: str) -> bool: try: self.check_appt(complex, house, appt) except exceptions.RumetrApptNotFound: return False return True
Shortcut to check if appt exists in our database.
def _kill_worker_threads(self): for x in range(self.number_of_threads): self.task_queue.put((None, None)) self.logger.debug("waiting for standard worker threads to stop") for t in self.thread_list: t.join()
This function coerces the consumer/worker threads to kill themselves. When called by the queuing thread, one death token will be placed on the queue for each thread. Each worker thread is always looking for the death token. When it encounters it, it immediately runs to completion without drawing anything more off the queue. This is a blocking call. The thread using this function will wait for all the worker threads to die.
def measure_power(self, hz, duration, tag, offset=30): num = duration * hz oset = offset * hz data = None self.usb("auto") time.sleep(1) with self.dut.handle_usb_disconnect(): time.sleep(1) try: data = self.take_samples(hz, num, sample_offset=oset) if not data: raise MonsoonError( "No data was collected in measurement %s." % tag) data.tag = tag self.dut.log.info("Measurement summary: %s", repr(data)) return data finally: self.mon.StopDataCollection() self.log.info("Finished taking samples, reconnecting to dut.") self.usb("on") self.dut.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_USB_ON) time.sleep(10) self.dut.log.info("Dut reconnected.")
Measure power consumption of the attached device. Because it takes some time for the device to calm down after the usb connection is cut, an offset is set for each measurement. The default is 30s. The total time taken to measure will be (duration + offset). Args: hz: Number of samples to take per second. duration: Number of seconds to take samples for in each step. offset: The number of seconds of initial data to discard. tag: A string that's the name of the collected data group. Returns: A MonsoonData object with the measured power data.
def has_pkgs_signed_with(self, allowed_keys): if not allowed_keys or not isinstance(allowed_keys, list): raise ConuException("allowed_keys must be a list") command = [, , , ] cont = self.run_via_binary(command=command) try: out = cont.logs_unicode()[:-1].split() check_signatures(out, allowed_keys) finally: cont.stop() cont.delete() return True
Check signature of packages installed in image. Raises exception when * rpm binary is not installed in image * parsing of rpm fails * there are packages in image that are not signed with one of allowed keys :param allowed_keys: list of allowed keys :return: bool
def check_permissions(self, request): permissions = [permission() for permission in self.permission_classes] for permission in permissions: if not permission.has_permission(request): raise PermissionDenied()
Check if the request should be permitted. Raises an appropriate exception if the request is not permitted.
def encrypt_assertion(self, statement, enc_key, template, key_type=, node_xpath=None): return self.crypto.encrypt_assertion( statement, enc_key, template, key_type, node_xpath)
Will encrypt an assertion :param statement: A XML document that contains the assertion to encrypt :param enc_key: File name of a file containing the encryption key :param template: A template for the encryption part to be added. :param key_type: The type of session key to use. :return: The encrypted text
def state_in_ec(self, ec_index): with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) return self.participating_ec_states[ec_index] else: return self.owned_ec_states[ec_index]
Get the state of the component in an execution context. @param ec_index The index of the execution context to check the state in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs.
def dynamics_from_bundle_bs(b, times, compute=None, return_roche_euler=False, **kwargs): stepsize = 0.01 orbiterror = 1e-16 computeps = b.get_compute(compute, check_visible=False, force_ps=True) ltte = computeps.get_value(, check_visible=False, **kwargs) hier = b.hierarchy starrefs = hier.get_stars() orbitrefs = hier.get_orbits() def mean_anom(t0, t0_perpass, period): return 2 * np.pi * (t0 - t0_perpass) / period masses = [b.get_value(, u.solMass, component=component, context=) * c.G.to().value for component in starrefs] smas = [b.get_value(, u.AU, component=component, context=) for component in orbitrefs] eccs = [b.get_value(, component=component, context=) for component in orbitrefs] incls = [b.get_value(, u.rad, component=component, context=) for component in orbitrefs] per0s = [b.get_value(, u.rad, component=component, context=) for component in orbitrefs] long_ans = [b.get_value(, u.rad, component=component, context=) for component in orbitrefs] t0_perpasses = [b.get_value(, u.d, component=component, context=) for component in orbitrefs] periods = [b.get_value(, u.d, component=component, context=) for component in orbitrefs] vgamma = b.get_value(, context=, unit=u.solRad/u.d) t0 = b.get_value(, context=, unit=u.d) mean_anoms = [b.get_value(, u.rad, component=component, context=) for component in orbitrefs] return dynamics_bs(times, masses, smas, eccs, incls, per0s, long_ans, \ mean_anoms, t0, vgamma, stepsize, orbiterror, ltte, return_roche_euler=return_roche_euler)
Parse parameters in the bundle and call :func:`dynamics`. See :func:`dynamics` for more detailed information. NOTE: you must either provide compute (the label) OR all relevant options as kwargs (ltte) Args: b: (Bundle) the bundle with a set hierarchy times: (list or array) times at which to run the dynamics stepsize: (float, optional) stepsize for the integration [default: 0.01] orbiterror: (float, optional) orbiterror for the integration [default: 1e-16] ltte: (bool, default False) whether to account for light travel time effects. Returns: t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times, the remaining are a list of numpy arrays (a numpy array per star - in order given by b.hierarchy.get_stars()) for the cartesian positions and velocities of each star at those same times.
def parse_application_name(setup_filename): with open(setup_filename, ) as setup_file: fst = RedBaron(setup_file.read()) for node in fst: if ( node.type == and str(node.name) == ): for call in node.call: if str(call.name) == : value = call.value if hasattr(value, ): value = value.to_python() name = str(value) break if name: break return name
Parse a setup.py file for the name. Returns: name, or None
def open_ext_pack_file(self, path): if not isinstance(path, basestring): raise TypeError("path can only be an instance of type basestring") file_p = self._call("openExtPackFile", in_p=[path]) file_p = IExtPackFile(file_p) return file_p
Attempts to open an extension pack file in preparation for installation. in path of type str The path of the extension pack tarball. This can optionally be followed by a "::SHA-256=hex-digit" of the tarball. return file_p of type :class:`IExtPackFile` The interface of the extension pack file object.
def getlist(self, key, default=[]): if key in self: return [node.value for node in self._map[key]] return default
Returns: The list of values for <key> if <key> is in the dictionary, else <default>. If <default> is not provided, an empty list is returned.
def sort(expr, field = None, keytype=None, ascending=True): weld_obj = WeldObject(encoder_, decoder_) expr_var = weld_obj.update(expr) if isinstance(expr, WeldObject): expr_var = expr.obj_id weld_obj.dependencies[expr_var] = expr if field is not None: key_str = "x.$%s" % field else: key_str = "x" if not ascending: key_str = key_str + "* %s(-1)" % keytype weld_template = weld_obj.weld_code = weld_template % {"expr":expr_var, "key":key_str} return weld_obj
Sorts the vector. If the field parameter is provided then the sort operators on a vector of structs where the sort key is the field of the struct. Args: expr (WeldObject) field (Int)
def list_subdomains(self, limit=None, offset=None): return self.manager.list_subdomains(self, limit=limit, offset=offset)
Returns a list of all subdomains for this domain.
def from_str(cls, coordinate): m = cls._coordinate_str_regex.match(coordinate) if m is None: raise ValueError() if m.group(1) == : relative = True else: relative = False return TikZCoordinate( float(m.group(2)), float(m.group(4)), relative=relative)
Build a TikZCoordinate object from a string.
def saveData(self, dataOutputFile, categoriesOutputFile): if self.records is None: return False if not dataOutputFile.endswith("csv"): raise TypeError("data output file must be csv.") if not categoriesOutputFile.endswith("json"): raise TypeError("category output file must be json") dataOutputDirectory = os.path.dirname(dataOutputFile) if not os.path.exists(dataOutputDirectory): os.makedirs(dataOutputDirectory) categoriesOutputDirectory = os.path.dirname(categoriesOutputFile) if not os.path.exists(categoriesOutputDirectory): os.makedirs(categoriesOutputDirectory) with open(dataOutputFile, "w") as f: writer = csv.DictWriter(f, fieldnames=self.fieldNames) writer.writeheader() writer.writerow(self.types) writer.writerow(self.specials) for data in self.records: for record in data: writer.writerow(record) with open(categoriesOutputFile, "w") as f: f.write(json.dumps(self.categoryToId, sort_keys=True, indent=4, separators=(",", ": "))) return dataOutputFile
Save the processed data and the associated category mapping. @param dataOutputFile (str) Location to save data @param categoriesOutputFile (str) Location to save category map @return (str) Path to the saved data file iff saveData() is successful.
def _cram_to_fastq_region(cram_file, work_dir, base_name, region, data): ref_file = tz.get_in(["reference", "fasta", "base"], data) resources = config_utils.get_resources("bamtofastq", data["config"]) cores = tz.get_in(["config", "algorithm", "num_cores"], data, 1) max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores rext = "-%s" % region.replace(":", "_").replace("-", "_") if region else "full" out_s, out_p1, out_p2, out_o1, out_o2 = [os.path.join(work_dir, "%s%s-%s.fq.gz" % (base_name, rext, fext)) for fext in ["s1", "p1", "p2", "o1", "o2"]] if not utils.file_exists(out_p1): with file_transaction(data, out_s, out_p1, out_p2, out_o1, out_o2) as \ (tx_out_s, tx_out_p1, tx_out_p2, tx_out_o1, tx_out_o2): cram_file = objectstore.cl_input(cram_file) sortprefix = "%s-sort" % utils.splitext_plus(tx_out_s)[0] cmd = ("bamtofastq filename={cram_file} inputformat=cram T={sortprefix} " "gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY " "F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} " "reference={ref_file}") if region: cmd += " ranges=" do.run(cmd.format(**locals()), "CRAM to fastq %s" % region if region else "") return [[out_p1, out_p2, out_s]]
Convert CRAM to fastq in a specified region.
def ladders(session, game_id): if isinstance(game_id, str): game_id = lookup_game_id(game_id) lobbies = get_lobbies(session, game_id) ladder_ids = set() for lobby in lobbies: ladder_ids |= set(lobby[]) return list(ladder_ids)
Get a list of ladder IDs.
def ProcessAllReadyRequests(self): request_dict = data_store.REL_DB.ReadFlowRequestsReadyForProcessing( self.rdf_flow.client_id, self.rdf_flow.flow_id, next_needed_request=self.rdf_flow.next_request_to_process) if not request_dict: return 0 processed = 0 while self.rdf_flow.next_request_to_process in request_dict: request, responses = request_dict[self.rdf_flow.next_request_to_process] self.RunStateMethod(request.next_state, request, responses) self.rdf_flow.next_request_to_process += 1 processed += 1 self.completed_requests.append(request) if processed and self.IsRunning() and not self.outstanding_requests: self.RunStateMethod("End") if (self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING and not self.outstanding_requests): self.MarkDone() self.PersistState() if not self.IsRunning(): self._ClearAllRequestsAndResponses() return processed
Processes all requests that are due to run. Returns: The number of processed requests.
def register_column(self, column, expr, deltas=None, checkpoints=None, odo_kwargs=None): self._table_expressions[column] = ExprData( expr, deltas, checkpoints, odo_kwargs, )
Explicitly map a single bound column to a collection of blaze expressions. The expressions need to have ``timestamp`` and ``as_of`` columns. Parameters ---------- column : BoundColumn The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze`
def validator( *fields: str, pre: bool = False, whole: bool = False, always: bool = False, check_fields: bool = True ) -> Callable[[AnyCallable], classmethod]: if not fields: raise ConfigError() elif isinstance(fields[0], FunctionType): raise ConfigError( "validators should be used with fields and keyword arguments, not bare. " "E.g. usage should be `@validator(, ...)`" ) def dec(f: AnyCallable) -> classmethod: if not in_ipython(): ref = f.__module__ + + f.__qualname__ if ref in _FUNCS: raise ConfigError(f) _FUNCS.add(ref) f_cls = classmethod(f) f_cls.__validator_config = fields, Validator(f, pre, whole, always, check_fields) return f_cls return dec
Decorate methods on the class indicating that they should be used to validate fields :param fields: which field(s) the method should be called on :param pre: whether or not this validator should be called before the standard validators (else after) :param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object :param always: whether this method and other validators should be called even if the value is missing :param check_fields: whether to check that the fields actually exist on the model
def write_yara(self, output_file): fout = open(output_file, ) fout.write() for iocid in self.yara_signatures: signature = self.yara_signatures[iocid] fout.write(signature) fout.write() fout.close() return True
Write out yara signatures to a file.
def ip_rtm_config_route_static_route_oif_vrf_static_route_oif_name(self, **kwargs): config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static_route_oif_vrf = ET.SubElement(route, "static-route-oif-vrf") static_route_next_vrf_dest_key = ET.SubElement(static_route_oif_vrf, "static-route-next-vrf-dest") static_route_next_vrf_dest_key.text = kwargs.pop() next_hop_vrf_key = ET.SubElement(static_route_oif_vrf, "next-hop-vrf") next_hop_vrf_key.text = kwargs.pop() static_route_oif_type_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-type") static_route_oif_type_key.text = kwargs.pop() static_route_oif_name = ET.SubElement(static_route_oif_vrf, "static-route-oif-name") static_route_oif_name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def create_archaius(self): utils.banner("Creating S3") s3.init_properties(env=self.env, app=self.app)
Create S3 bucket for Archaius.
def copy(self): rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.identifiers = object.__new__(self.identifiers.__class__) rv.identifiers.__dict__.update(self.identifiers.__dict__) return rv
Create a copy of the current one.
def split_action_id (id): assert isinstance(id, basestring) split = id.split (, 1) toolset = split [0] name = if len (split) > 1: name = split [1] return (toolset, name)
Splits an id in the toolset and specific rule parts. E.g. 'gcc.compile.c++' returns ('gcc', 'compile.c++')
def routeevent(self, path, routinemethod, container = None, host = None, vhost = None, method = [b, b]): regm = re.compile(path + b) if vhost is None: vhost = self.vhost if container is None: container = getattr(routinemethod, , None) def ismatch(event): if vhost is not None and getattr(event.createby, , ) != vhost: return False psplit = urlsplit(event.path) if psplit.path[:1] != b: return False if psplit.netloc and host is not None and host != psplit.netloc: return False if getattr(event.createby, , True): realpath = unquote_plus_to_bytes(psplit.path) else: realpath = unquote_to_bytes(psplit.path) m = regm.match(realpath) if m is None: return False event.realpath = realpath event.querystring = psplit.query event.path_match = m return True def func(event, scheduler): try: if event.canignore: return event.canignore = True c = event.connection if container is None else container c.subroutine(routinemethod(event), False) except Exception: pass for m in method: self.registerHandler(HttpRequestEvent.createMatcher(host, None, m, _ismatch = ismatch), func)
Route specified path to a routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(event), event is the HttpRequestEvent :param container: routine container. If None, default to self for bound method, or event.connection if not :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts): from mlabwrap import mlab filename, varnames, outOf = __saveVarsHelper( filename, varNamesStr, outOf, , **opts) try: for varname in varnames: mlab._set(varname, outOf[varname]) mlab._do("save(,)" % (filename, "".join(varnames)), nout=0) finally: assert varnames mlab._do("clear()" % "".join(varnames), nout=0)
Hacky convinience function to dump a couple of python variables in a .mat file. See `awmstools.saveVars`.
def LoadImage(filename): app_dir = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(app_dir, , filename) return Tkinter.PhotoImage(file=path)
return an image from the images/ directory
def from_xml(cls, xml): s = parse_string(xml) return Sentence(s.split("\n")[0], token=s.tags, language=s.language)
Returns a new Text from the given XML string.
def rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf(self, **kwargs): config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index_key = ET.SubElement(rule, "index") index_key.text = kwargs.pop() command = ET.SubElement(rule, "command") cmdlist = ET.SubElement(command, "cmdlist") interface_h = ET.SubElement(cmdlist, "interface-h") interface_ge_leaf = ET.SubElement(interface_h, "interface-ge-leaf") interface = ET.SubElement(interface_ge_leaf, "interface") gigabitethernet_leaf = ET.SubElement(interface, "gigabitethernet-leaf") gigabitethernet_leaf.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def transformed(self, t): assert t.shape[0] == t.shape[1] extra_dimensions = t.shape[0] - self.dimensions - 1 def transform(a): return t.dot(np.concatenate( (a, [0] * extra_dimensions, [1]), axis=0 ))[:self.dimensions] return Rect(transform(self.mins), transform(self.maxes))
Transforms an m-dimensional Rect using t, an nxn matrix that can transform vectors in the form: [x, y, z, …, 1]. The Rect is padded to n dimensions.
def _expand(self, normalization, csphase, **kwargs): if normalization.lower() == : norm = 1 elif normalization.lower() == : norm = 2 elif normalization.lower() == : norm = 3 elif normalization.lower() == : norm = 4 else: raise ValueError( "The normalization must be , , , " + "or . Input value was {:s}." .format(repr(normalization)) ) cilm = _shtools.SHExpandDH(self.data, norm=norm, csphase=csphase, sampling=self.sampling, **kwargs) coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(), csphase=csphase, copy=False) return coeffs
Expand the grid into real spherical harmonics.
def set_members(self, name, members, mode=None): commands = list() grpid = re.search(r, name).group() current_members = self.get_members(name) lacp_mode = self.get_lacp_mode(name) if mode and mode != lacp_mode: lacp_mode = mode self.set_lacp_mode(grpid, lacp_mode) for member in set(current_members).difference(members): commands.append( % member) commands.append( % grpid) for member in set(members).difference(current_members): commands.append( % member) commands.append( % (grpid, lacp_mode)) return self.configure(commands) if commands else True
Configures the array of member interfaces for the Port-Channel Args: name(str): The Port-Channel interface name to configure the member interfaces members(list): The list of Ethernet interfaces that should be member interfaces mode(str): The LACP mode to configure the member interfaces to. Valid values are 'on, 'passive', 'active'. When there are existing channel-group members and their lacp mode differs from this attribute, all of those members will be removed and then re-added using the specified lacp mode. If this attribute is omitted, the existing lacp mode will be used for new member additions. Returns: True if the operation succeeds otherwise False
def list(self, args, unknown): pm = plugins.PluginManager.get() plugs = pm.get_all_plugins() if not plugs: print "No standalone addons found!" return print "Addons:" for p in plugs: if isinstance(p, plugins.JB_StandalonePlugin): print "\t%s" % p.__class__.__name__
List all addons that can be launched :param args: arguments from the launch parser :type args: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: None
def getElementsByClassName(self, className, root=, useIndex=True): rootroot (root, isFromRoot) = self._handleRootArg(root) if useIndex is True and self.indexClassNames is True: elements = self._classNameMap.get(className, []) if isFromRoot is False: _hasTagInParentLine = self._hasTagInParentLine elements = [x for x in elements if _hasTagInParentLine(x, root)] return TagCollection(elements) return AdvancedHTMLParser.getElementsByClassName(self, className, root)
getElementsByClassName - Searches and returns all elements containing a given class name. @param className <str> - A one-word class name @param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. @param useIndex <bool> If useIndex is True and class names are indexed [see constructor] only the index will be used. Otherwise a full search is performed.
def copy_from(self, other): self.__attributes__ = {k: v.copy() for k, v in other.__attributes__.items()} self.__fields__ = {k: v.copy() for k, v in other.__fields__.items()} self.__relations__ = {k: v.copy() for k, v in other.__relations__.items()} self.maps = {k: m.copy() for k, m in other.maps.items()} self.dimensions = other.dimensions.copy()
Copy properties from another ChemicalEntity
def max_normal_germline_depth(in_file, params, somatic_info): bcf_in = pysam.VariantFile(in_file) depths = [] for rec in bcf_in: stats = _is_possible_loh(rec, bcf_in, params, somatic_info) if tz.get_in(["normal", "depth"], stats): depths.append(tz.get_in(["normal", "depth"], stats)) if depths: return np.median(depths) * NORMAL_FILTER_PARAMS["max_depth_percent"]
Calculate threshold for excluding potential heterozygotes based on normal depth.
def import_image_from_image(self, image, repository=None, tag=None, changes=None): return self.import_image( image=image, repository=repository, tag=tag, changes=changes )
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only supports importing from another image, like the ``FROM`` Dockerfile parameter. Args: image (str): Image name to import from repository (str): The repository to create tag (str): The tag to apply
async def parseResults(self, api_data): results = [] xml_text = api_data.decode("utf-8") xml_root = xml.etree.ElementTree.fromstring(xml_text) status = xml_root.get("status") if status != "ok": raise Exception("Unexpected Last.fm response status: %s" % (status)) img_elements = xml_root.findall("album/image") thumbnail_url = None thumbnail_size = None for img_element in img_elements: img_url = img_element.text if not img_url: continue lfm_size = img_element.get("size") if lfm_size == "mega": check_metadata = CoverImageMetadata.SIZE else: check_metadata = CoverImageMetadata.NONE try: size = __class__.SIZES[lfm_size] except KeyError: continue if (size[0] <= MAX_THUMBNAIL_SIZE) and ((thumbnail_size is None) or (size[0] < thumbnail_size)): thumbnail_url = img_url thumbnail_size = size[0] format = os.path.splitext(img_url)[1][1:].lower() format = SUPPORTED_IMG_FORMATS[format] results.append(LastFmCoverSourceResult(img_url, size, format, thumbnail_url=thumbnail_url, source=self, check_metadata=check_metadata)) return results
See CoverSource.parseResults.
def iterfields(klass): is_field = lambda x: isinstance(x, TypedField) for name, field in inspect.getmembers(klass, predicate=is_field): yield name, field
Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples.
def subject(self) -> Optional[UnstructuredHeader]: try: return cast(UnstructuredHeader, self[b][0]) except (KeyError, IndexError): return None
The ``Subject`` header.
def typestring(obj): obj_type = type(obj) return .join((obj_type.__module__, obj_type.__name__))
Make a string for the object's type Parameters ---------- obj : obj Python object. Returns ------- `str` String representation of the object's type. This is the type's importable namespace. Examples -------- >>> import docutils.nodes >>> para = docutils.nodes.paragraph() >>> typestring(para) 'docutils.nodes.paragraph'
def clone(self): result = copy.copy(self) result._compound_mfrs = copy.deepcopy(self._compound_mfrs) return result
Create a complete copy of the stream. :returns: A new MaterialStream object.
def nmtoken_from_string(text): text = text.replace(, ) return .join([(((not char.isalnum() and char not in [ , , , ]) and str(ord(char))) or char) for char in text])
Returns a Nmtoken from a string. It is useful to produce XHTML valid values for the 'name' attribute of an anchor. CAUTION: the function is surjective: 2 different texts might lead to the same result. This is improbable on a single page. Nmtoken is the type that is a mixture of characters supported in attributes such as 'name' in HTML 'a' tag. For example, <a name="Articles%20%26%20Preprints"> should be tranformed to <a name="Articles372037263720Preprints"> using this function. http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken Also note that this function filters more characters than specified by the definition of Nmtoken ('CombiningChar' and 'Extender' charsets are filtered out).
def p_statement_break(p): if len(p) == 3: p[0] = ast.Break(None, lineno=p.lineno(1)) else: p[0] = ast.Break(p[2], lineno=p.lineno(1))
statement : BREAK SEMI | BREAK expr SEMI
def path(self): return "/projects/%s/datasets/%s/models/%s" % ( self._proto.project_id, self._proto.dataset_id, self._proto.model_id, )
str: URL path for the model's APIs.
def split_into(max_num_chunks, list_to_chunk): max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks) return chunks_of(max_chunk_size, list_to_chunk)
Yields the list with a max total size of max_num_chunks
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes, ascending, rs=np.random.RandomState()): G, S = exprs.shape expr_mat = exprs.values.T perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1)) for arr in perm_cor_tensor[:-1]: rs.shuffle(arr) classes = np.array(classes) pos = classes == pos neg = classes == neg pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1) neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1) pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1) neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1) if method == : cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std) elif method == : denom = 1.0/G cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2) elif method == : cor_mat = pos_cor_mean / neg_cor_mean elif method == : cor_mat = pos_cor_mean - neg_cor_mean elif method == : cor_mat = np.log2(pos_cor_mean / neg_cor_mean) else: logging.error("Please provide correct method name!!!") sys.exit(0) cor_mat_ind = cor_mat.argsort() cor_mat.sort() if ascending: return cor_mat_ind, cor_mat return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
Build shuffled ranking matrix when permutation_type eq to phenotype. :param exprs: gene_expression DataFrame, gene_name indexed. :param str method: calculate correlation or ranking. methods including: 1. 'signal_to_noise'. 2. 't_test'. 3. 'ratio_of_classes' (also referred to as fold change). 4. 'diff_of_classes'. 5. 'log2_ratio_of_classes'. :param int permuation_num: how many times of classes is being shuffled :param str pos: one of labels of phenotype's names. :param str neg: one of labels of phenotype's names. :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what class of phenotype. :param bool ascending: bool. Sort ascending vs. descending. :return: returns two 2d ndarray with shape (nperm, gene_num). | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix. | cor_mat: sorted and permutated (exclude last row) ranking matrix.
def ws_db004(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( .format(value)) self._ws_db004 = value
Corresponds to IDD Field `ws_db004` Mean wind speed coincident with 0.4% dry-bulb temperature Args: value (float): value for IDD Field `ws_db004` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def alias(self, alias): assert isinstance(alias, basestring), "alias should be a string" return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
Returns a new :class:`DataFrame` with an alias set. :param alias: string, an alias name to be set for the DataFrame. >>> from pyspark.sql.functions import * >>> df_as1 = df.alias("df_as1") >>> df_as2 = df.alias("df_as2") >>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner') >>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect() [Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
def reshape_text(buffer, from_row, to_row): lines = buffer.text.splitlines(True) lines_before = lines[:from_row] lines_after = lines[to_row + 1:] lines_to_reformat = lines[from_row:to_row + 1] if lines_to_reformat: length = re.search(r, lines_to_reformat[0]).end() indent = lines_to_reformat[0][:length].replace(, ) words = .join(lines_to_reformat).split() width = (buffer.text_width or 80) - len(indent) reshaped_text = [indent] current_width = 0 for w in words: if current_width: if len(w) + current_width + 1 > width: reshaped_text.append() reshaped_text.append(indent) current_width = 0 else: reshaped_text.append() current_width += 1 reshaped_text.append(w) current_width += len(w) if reshaped_text[-1] != : reshaped_text.append() buffer.document = Document( text=.join(lines_before + reshaped_text + lines_after), cursor_position=len(.join(lines_before + reshaped_text)))
Reformat text, taking the width into account. `to_row` is included. (Vi 'gq' operator.)
def get_recipe_instances_for_badges(self, badges): from .exceptions import BadgeNotFound valid, invalid = [], [] if not isinstance(badges, (list, tuple)): badges = [badges] for badge in badges: try: recipe = self.get_recipe_instance(badge) valid.append(recipe) except BadgeNotFound: logger.debug(, badge) invalid.append(badge) return (valid, invalid)
Takes a list of badge slugs and returns a tuple: ``(valid, invalid)``.
def aes_encrypt(self, plain, sec_key, enable_b64=True): plain = helper.to_str(plain) sec_key = helper.to_str(sec_key) plain += * (self.bs - len(plain) % self.bs) plain = helper.to_bytes(plain) cipher = self.aes_obj(sec_key).encrypt(plain) cip = base64.b64encode(cipher) if enable_b64 else cipher return helper.to_str(cip)
使用 ``aes`` 加密数据, 并由 ``base64编码`` 加密后的数据 - ``sec_key`` 加密 ``msg``, 最后选择 ``是否由base64编码数据`` - msg长度为16位数, 不足则补 'ascii \\0' .. warning:: msg长度为16位数, 不足则补 'ascii \\0' :param plain: :type plain: str :param sec_key: :type sec_key: str :param enable_b64: :type enable_b64: bool :return: :rtype:
def auto_schedule_hosting_devices(self, plugin, context, agent_host): query = context.session.query(bc.Agent) query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG, host=agent_host, admin_state_up=True) try: cfg_agent_db = query.one() except (exc.MultipleResultsFound, exc.NoResultFound): LOG.debug(, agent_host) return if cfg_agentschedulers_db.CfgAgentSchedulerDbMixin.is_agent_down( cfg_agent_db.heartbeat_timestamp): LOG.warning(, cfg_agent_db.id) return cfg_agent_db
Schedules unassociated hosting devices to Cisco cfg agent. Schedules hosting devices to agent running on <agent_host>.
def eq_(result, expected, msg=None): params = {: expected, : result} aka = % params default_msg = % params if ( (repr(result) != six.text_type(result)) or (repr(expected) != six.text_type(expected)) ): default_msg += aka assertion_msg = msg or default_msg assert result == expected, assertion_msg
Shadow of the Nose builtin which presents easier to read multiline output.
def rgb2gray(image_rgb_array): image_gray_array = [0.0] * len(image_rgb_array); for index in range(0, len(image_rgb_array), 1): image_gray_array[index] = float(image_rgb_array[index][0]) * 0.2989 + float(image_rgb_array[index][1]) * 0.5870 + float(image_rgb_array[index][2]) * 0.1140; return image_gray_array;
! @brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel. @details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum: \f[Y = 0.2989R + 0.587G + 0.114B\f] @param[in] image_rgb_array (list): Image represented by RGB list. @return (list) Image as gray colored matrix, where one element of list describes pixel. @code colored_image = read_image(file_name); gray_image = rgb2gray(colored_image); @endcode @see read_image()
def telegram(): if not exists(, msg=): run() run() run() with warn_only(): run() run() else: print() run(, msg="\nCreate executable :")
Install Telegram desktop client for linux (x64). More infos: https://telegram.org https://desktop.telegram.org/
def set_fan_mode(self, mode): self.set_service_value( self.thermostat_fan_service, , , mode) self.set_cache_value(, mode)
Set the fan mode
def fit_polynomial(pixel_data, mask, clip=True): mask = np.logical_and(mask,pixel_data > 0) if not np.any(mask): return pixel_data x,y = np.mgrid[0:pixel_data.shape[0],0:pixel_data.shape[1]] x2 = x*x y2 = y*y xy = x*y o = np.ones(pixel_data.shape) a = np.array([x[mask],y[mask],x2[mask],y2[mask],xy[mask],o[mask]]) coeffs = scipy.linalg.lstsq(a.transpose(),pixel_data[mask])[0] output_pixels = np.sum([coeff * index for coeff, index in zip(coeffs, [x,y,x2,y2,xy,o])],0) if clip: output_pixels[output_pixels > 1] = 1 output_pixels[output_pixels < 0] = 0 return output_pixels
Return an "image" which is a polynomial fit to the pixel data Fit the image to the polynomial Ax**2+By**2+Cxy+Dx+Ey+F pixel_data - a two-dimensional numpy array to be fitted mask - a mask of pixels whose intensities should be considered in the least squares fit clip - if True, clip the output array so that pixels less than zero in the fitted image are zero and pixels that are greater than one are one.
def assemble( iterable, patterns=None, minimum_items=2, case_sensitive=True, assume_padded_when_ambiguous=False ): collectionsremainder collection_map = defaultdict(set) collections = [] remainder = [] flags = 0 if not case_sensitive: flags |= re.IGNORECASE compiled_patterns = [] if patterns is not None: if not patterns: return collections, list(iterable) for pattern in patterns: if isinstance(pattern, basestring): compiled_patterns.append(re.compile(pattern, flags=flags)) else: compiled_patterns.append(pattern) else: compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags)) for item in iterable: matched = False for pattern in compiled_patterns: for match in pattern.finditer(item): index = match.group() head = item[:match.start()] tail = item[match.end():] if not case_sensitive: head = head.lower() tail = tail.lower() padding = match.group() if padding: padding = len(index) else: padding = 0 key = (head, tail, padding) collection_map[key].add(int(index)) matched = True if not matched: remainder.append(item) merge_candidates = [] for (head, tail, padding), indexes in collection_map.items(): collection = Collection(head, tail, padding, indexes) collections.append(collection) if collection.padding == 0: merge_candidates.append(collection) fully_merged = [] for collection in collections: if collection.padding == 0: continue for candidate in merge_candidates: if ( candidate.head == collection.head and candidate.tail == collection.tail ): merged_index_count = 0 for index in candidate.indexes: if len(str(abs(index))) == collection.padding: collection.indexes.add(index) merged_index_count += 1 if merged_index_count == len(candidate.indexes): fully_merged.append(candidate) collections = [collection for collection in collections if collection not in fully_merged] filtered = [] remainder_candidates = [] for collection in collections: if len(collection.indexes) >= minimum_items: filtered.append(collection) else: for member in collection: remainder_candidates.append(member) for candidate in remainder_candidates: if candidate in remainder: continue has_membership = False for collection in filtered: if candidate in collection: has_membership = True break if not has_membership: remainder.append(candidate) if assume_padded_when_ambiguous: for collection in filtered: if ( not collection.padding and collection.indexes ): indexes = list(collection.indexes) first_index_width = len(str(indexes[0])) last_index_width = len(str(indexes[-1])) if first_index_width == last_index_width: collection.padding = first_index_width return filtered, remainder
Assemble items in *iterable* into discreet collections. *patterns* may be specified as a list of regular expressions to limit the returned collection possibilities. Use this when interested in collections that only match specific patterns. Each pattern must contain the expression from :py:data:`DIGITS_PATTERN` exactly once. A selection of common expressions are available in :py:data:`PATTERNS`. .. note:: If a pattern is supplied as a string it will be automatically compiled to a :py:class:`re.RegexObject` instance for convenience. When *patterns* is not specified, collections are formed by examining all possible groupings of the items in *iterable* based around common numerical components. *minimum_items* dictates the minimum number of items a collection must have in order to be included in the result. The default is 2, filtering out single item collections. If *case_sensitive* is False, then items will be treated as part of the same collection when they only differ in casing. To avoid ambiguity, the resulting collection will always be lowercase. For example, "item.0001.dpx" and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx". .. note:: Any compiled *patterns* will also respect the set case sensitivity. For certain collections it may be ambiguous whether they are padded or not. For example, 1000-1010 can be considered either an unpadded collection or a four padded collection. By default, Clique is conservative and assumes that the collection is unpadded. To change this behaviour, set *assume_padded_when_ambiguous* to True and any ambiguous collection will have a relevant padding set. .. note:: *assume_padded_when_ambiguous* has no effect on collections that are unambiguous. For example, 1-100 will always be considered unpadded regardless of the *assume_padded_when_ambiguous* setting. Return tuple of two lists (collections, remainder) where 'collections' is a list of assembled :py:class:`~clique.collection.Collection` instances and 'remainder' is a list of items that did not belong to any collection.
def find_lt(array, x): i = bisect.bisect_left(array, x) if i: return array[i - 1] raise ValueError
Find rightmost value less than x. :type array: list :param array: an iterable object that support inex :param x: a comparable value Example:: >>> find_lt([0, 1, 2, 3], 2.5) 2 **中文文档** 寻找最大的小于x的数。
def serial_udb_extra_f5_send(self, sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST, force_mavlink1=False): return self.send(self.serial_udb_extra_f5_encode(sue_YAWKP_AILERON, sue_YAWKD_AILERON, sue_ROLLKP, sue_ROLLKD, sue_YAW_STABILIZATION_AILERON, sue_AILERON_BOOST), force_mavlink1=force_mavlink1)
Backwards compatible version of SERIAL_UDB_EXTRA F5: format sue_YAWKP_AILERON : Serial UDB YAWKP_AILERON Gain for Proporional control of navigation (float) sue_YAWKD_AILERON : Serial UDB YAWKD_AILERON Gain for Rate control of navigation (float) sue_ROLLKP : Serial UDB Extra ROLLKP Gain for Proportional control of roll stabilization (float) sue_ROLLKD : Serial UDB Extra ROLLKD Gain for Rate control of roll stabilization (float) sue_YAW_STABILIZATION_AILERON : YAW_STABILIZATION_AILERON Proportional control (float) sue_AILERON_BOOST : Gain For Boosting Manual Aileron control When Plane Stabilized (float)
def search(self, query, page=0, order=7, category=0, multipage=False): search = Search(self.base_url, query, page, order, category) if multipage: search.multipage() return search
Searches TPB for query and returns a list of paginated Torrents capable of changing query, categories and orders.
def search(self, category, term=, index=0, count=100): search_category = self._get_search_prefix_map().get(category, None) if search_category is None: raise MusicServiceException( "%s does not support the search category" % ( self.service_name, category)) response = self.soap_client.call( , [ (, search_category), (, term), (, index), (, count)]) return parse_response(self, response, category)
Search for an item in a category. Args: category (str): The search category to use. Standard Sonos search categories are 'artists', 'albums', 'tracks', 'playlists', 'genres', 'stations', 'tags'. Not all are available for each music service. Call available_search_categories for a list for this service. term (str): The term to search for. index (int): The starting index. Default 0. count (int): The maximum number of items to return. Default 100. Returns: ~collections.OrderedDict: The search results, or `None`. See also: The Sonos `search API <http://musicpartners.sonos.com/node/86>`_
def image_file(path=None, zscript=, xscript=, yscript=, g=None, **kwargs): if in kwargs: delimiter = kwargs.pop() else: delimiter = None d = _data.load(paths=path, delimiter = delimiter) if d is None or len(d) == 0: return default_kwargs = dict(xlabel = str(xscript), ylabel = str(yscript), title = d.path, clabel = str(zscript)) default_kwargs.update(kwargs) X = d(xscript, g) Y = d(yscript, g) Z = _n.array(d(zscript, g)) image_data(Z, X, Y, **default_kwargs)
Loads an data file and plots it with color. Data file must have columns of the same length! Parameters ---------- path=None Path to data file. zscript='self[1:]' Determines how to get data from the columns xscript='[0,1]', yscript='d[0]' Determine the x and y arrays used for setting the axes bounds g=None Optional dictionary of globals for the scripts See spinmob.plot.image.data() for additional optional keyword arguments. See spinmob.data.databox.execute_script() for more information about scripts.
def trylock(self): "Try to acquire lock and return True; if cannot acquire the lock at this moment, return False." if self.locked: return True if self.lockroutine: return False waiter = self.scheduler.send(LockEvent(self.context, self.key, self)) if waiter: return False else: self.locked = True return True
Try to acquire lock and return True; if cannot acquire the lock at this moment, return False.
def newDocProp(self, name, value): ret = libxml2mod.xmlNewDocProp(self._o, name, value) if ret is None:raise treeError() __tmp = xmlAttr(_obj=ret) return __tmp
Create a new property carried by a document.
def execute_command(self, verb, verb_arguments): request = self._build_request(verb, verb_arguments) return self._execute(request)
Executes command (ex. add) via a dedicated http object. Async APIs may take minutes to complete. Therefore, callers are encouraged to leverage concurrent.futures (or similar) to place long running commands on a separate threads. Args: verb (str): Method to execute on the component (ex. get, list). verb_arguments (dict): key-value pairs to be passed to _build_request. Returns: dict: An async operation Service Response.
def load_rocstories_dataset(dataset_path): with open(dataset_path, encoding=) as f: f = csv.reader(f) output = [] next(f) for line in tqdm(f): output.append((.join(line[1:5]), line[5], line[6], int(line[-1])-1)) return output
Output a list of tuples(story, 1st continuation, 2nd continuation, label)
def as_table(self, name=None): if name is None: name = self._id return alias(self.subquery(), name=name)
Return an alias to a table
def load_directory(self, directory, ext=None): self._say("Loading from directory: " + directory) if ext is None: ext = [, ] elif type(ext) == str: ext = [ext] if not os.path.isdir(directory): self._warn("Error: " + directory + " is not a directory.") return for root, subdirs, files in os.walk(directory): for file in files: for extension in ext: if file.lower().endswith(extension): self.load_file(os.path.join(root, file)) break
Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``.
def reply_regexp(self, user, regexp): if regexp in self.master._regexc["trigger"]: return self.master._regexc["trigger"][regexp] regexp = re.sub(RE.zero_star, r, regexp) arrays = re.findall(RE.array, regexp) for array in arrays: rep = if array in self.master._array: rep = r + .join(self.expand_array(array)) + regexp = re.sub(r + re.escape(array) + r, rep, regexp) regexp = regexp.replace(, ) regexp = regexp.replace(, ) regexp = regexp.replace(, ) regexp = re.sub(RE.weight, , regexp) regexp = regexp.replace(, r) optionals = re.findall(RE.optionals, regexp) for match in optionals: parts = match.split("|") new = [] for p in parts: p = r.format(p.strip()) new.append(p) pipes = .join(new) pipes = pipes.replace(r, r) pipes = pipes.replace(r, r) pipes = pipes.replace(r, r) regexp = re.sub(r + re.escape(match) + , + pipes + r, regexp) if self.utf8: return re.compile(r + regexp.lower() + r, re.UNICODE) else: return re.compile(r + regexp.lower() + r)
Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.
def exception_to_signal(sig: Union[SignalException, signal.Signals]): if isinstance(sig, SignalException): signum = sig.signum else: signum = sig.value signal.signal(signum, signal.SIG_DFL)
Rollback any changes done by :py:func:`signal_to_exception`.
def is_instance_running(self, instance_id): instance = self._load_instance(instance_id) if instance.update() == "running": if not instance.ip_address and self.request_floating_ip: log.debug("Public ip address has to be assigned through " "elasticluster.") self._allocate_address(instance) instance.update() return True else: return False
Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise
def organization_subscription_delete(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions api_path = "/api/v2/organization_subscriptions/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, method="DELETE", **kwargs)
https://developer.zendesk.com/rest_api/docs/core/organization_subscriptions#delete-organization-subscription
def inference(self, state_arr, limit=1000): self.__inferencing_flag = True agent_x, agent_y = np.where(state_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] self.__create_enemy(self.__map_arr) result_list = [(agent_x, agent_y, 0.0)] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) result_val_list.append(0.0) result_list.append(tuple(result_val_list)) self.t = 0 while self.t < limit: next_action_arr = self.extract_possible_actions(state_arr) next_q_arr = self.function_approximator.inference_q(next_action_arr) action_arr, q = self.select_action(next_action_arr, next_q_arr) self.__move_enemy(action_arr) agent_x, agent_y = np.where(action_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) try: result_val_list.append(q[0]) except IndexError: result_val_list.append(q) result_list.append(tuple(result_val_list)) state_arr = self.update_state(state_arr, action_arr) self.t += 1 end_flag = self.check_the_end_flag(state_arr) if end_flag is True: break return result_list
Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route.
def from_mask_and_sub_grid_size(cls, mask, sub_grid_size=1): sub_grid_masked = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size( mask=mask, pixel_scales=mask.pixel_scales, sub_grid_size=sub_grid_size) return SubGrid(sub_grid_masked, mask, sub_grid_size)
Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \ every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates. Parameters ----------- mask : Mask The mask whose masked pixels are used to setup the sub-pixel grid_stack. sub_grid_size : int The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid.
def generateToken(bits=32): if bits == 64: hasher = hashlib.sha256 elif bits == 32: hasher = hashlib.md5 else: raise StandardError() return hasher(nstr(random.getrandbits(256))).hexdigest()
Generates a random token based on the given parameters. :return <str>
def MempoolCheck(self): txs = [] values = self.MemPool.values() for tx in values: txs.append(tx) for tx in txs: res = self.RemoveTransaction(tx) if res: logger.debug("found tx 0x%s on the blockchain ...removed from mempool" % tx.Hash)
Checks the Mempool and removes any tx found on the Blockchain Implemented to resolve https://github.com/CityOfZion/neo-python/issues/703
def format_h2(s, format="text", indents=0): _CHAR = "-" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return [" elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
Encloses string in format text Args, Returns: see format_h1() >>> print("\\n".join(format_h2("Header 2", indents=2))) Header 2 -------- >>> print("\\n".join(format_h2("Header 2", "markdown", 2))) ## Header 2
def to_dict(self): material = {: self.Data1, : self.Data2, : self.Data3, : list(self.Data4) } return {:material, : self.length, : self.instanceHigh, : self.instanceMid, : self.instanceLow, : list(self.SMPTELabel) }
MobID representation as dict
def from_voxels(voxels): dimensions = len(voxels[0]) for d in range(len(dimensions)): size.append(max([i[d] for i in voxels])) result = numpy.zeros(dimensions) for v in voxels: result[v] = 1 return result
Converts a voxel list to an ndarray. Arguments: voxels (tuple[]): A list of coordinates indicating coordinates of populated voxels in an ndarray. Returns: numpy.ndarray The result of the transformation.
def enable_mp_crash_reporting(): global mp_crash_reporting_enabled multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess mp_crash_reporting_enabled = True
Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess. Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead. This function must be called before any imports to mulitprocessing in order for the monkey-patching to work.
def _init_converters(types_map): global _converters _converters = {} for i in _DEFAULT_CONVERTERS: const_val = types_map[i] _converters[const_val] = _DEFAULT_CONVERTERS[i]
Prepares the converters for conversion of java types to python objects. types_map: Mapping of java.sql.Types field name to java.sql.Types field constant value
def export(self, name, columns, points): if name == self.plugins_to_export()[0] and self.buffer != {}: logger.debug("Export stats ({}) to RESTful endpoint ({})".format(listkeys(self.buffer), self.client)) post(self.client, json=self.buffer, allow_redirects=True) self.buffer = {} self.buffer[name] = dict(zip(columns, points))
Export the stats to the Statsd server.
def x_values_ref(self, series): top_row = self.series_table_row_offset(series) + 2 bottom_row = top_row + len(series) - 1 return "Sheet1!$A$%d:$A$%d" % (top_row, bottom_row)
The Excel worksheet reference to the X values for this chart (not including the column label).
async def run_async(self): try: await self.run_loop_async() except Exception as err: _logger.error("Run loop failed %r", err) try: _logger.info("Shutting down all pumps %r", self.host.guid) await self.remove_all_pumps_async("Shutdown") except Exception as err: raise Exception("Failed to remove all pumps {!r}".format(err))
Starts the run loop and manages exceptions and cleanup.
def animation_dialog(images, delay_s=1., loop=True, **kwargs): def _as_pixbuf(image): if isinstance(image, types.StringTypes): return gtk.gdk.pixbuf_new_from_file(image) else: return image pixbufs = map(_as_pixbuf, images) gtk.gdk.threads_init() dialog = gtk.MessageDialog(**kwargs) image = gtk.Image() content_area = dialog.get_content_area() content_area.pack_start(image) content_area.show_all() stop_animation = threading.Event() def _stop_animation(*args): stop_animation.set() def _animate(dialog): def __animate(): if loop: frames = it.cycle(pixbufs) else: frames = pixbufs for pixbuf_i in frames: gobject.idle_add(image.set_from_pixbuf, pixbuf_i) if stop_animation.wait(delay_s): break thread = threading.Thread(target=__animate) thread.daemon = True thread.start() dialog.connect(, _stop_animation) dialog.connect(, _animate) return dialog
.. versionadded:: v0.19 Parameters ---------- images : list Filepaths to images or :class:`gtk.Pixbuf` instances. delay_s : float, optional Number of seconds to display each frame. Default: ``1.0``. loop : bool, optional If ``True``, restart animation after last image has been displayed. Default: ``True``. Returns ------- gtk.MessageDialog Message dialog with animation displayed in `gtk.Image` widget when dialog is run.
def parent_of(self, name): if not self._in_tag(name): return node = self.cur_node while node.tag != name: node = node.getparent() self.cur_node = node.getparent()
go to parent of node with name, and set as cur_node. Useful for creating new paragraphs