code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def sort_by(items, attr): def key_func(item): try: return getattr(item, attr) except AttributeError: try: return item[attr] except TypeError: getattr(item, attr) return sorted(items, key=key_func)
General sort filter - sorts by either attribute or key.
def _make_temp_filename(prefix): temp_location = _get_temp_file_location() temp_file_name = .join([temp_location, str(prefix)+str(_uuid.uuid4())]) return temp_file_name
Generate a temporary file that would not live beyond the lifetime of unity_server. Caller is expected to clean up the temp file as soon as the file is no longer needed. But temp files created using this method will be cleaned up when unity_server restarts
def get_language_tabs(self): current_language = self.get_current_language() if self.object: available_languages = list(self.object.get_available_languages()) else: available_languages = [] return get_language_tabs(self.request, current_language, available_languages)
Determine the language tabs to show.
def create(self, properties): result = self.session.post(self.console.uri + , body=properties) props = copy.deepcopy(properties) props.update(result) name = props.get(self._name_prop, None) uri = props[self._uri_prop] user_role = UserRole(self, uri, name, props) self._name_uri_cache.update(name, uri) return user_role
Create a new (user-defined) User Role in this HMC. Authorization requirements: * Task permission to the "Manage User Roles" task. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create User Role' in the :term:`HMC API` book. Returns: UserRole: The resource object for the new User Role. The object will have its 'object-uri' property set as returned by the HMC, and will also have the input properties set. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def _maybe_download_corpus(tmp_dir, vocab_type): if vocab_type == text_problems.VocabType.CHARACTER: dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext" "/wikitext-103-raw-v1.zip") dir_name = "wikitext-103-raw" else: dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext" "/wikitext-103-v1.zip") dir_name = "wikitext-103" fname = os.path.basename(dataset_url) compressed_filepath = generator_utils.maybe_download(tmp_dir, fname, dataset_url) zip_ref = zipfile.ZipFile(compressed_filepath, "r") zip_ref.extractall(tmp_dir) zip_ref.close() files = os.path.join(tmp_dir, dir_name, "*") train_file, valid_file, test_file = None, None, None for f in tf.gfile.Glob(files): fname = os.path.basename(f) if "train" in fname: train_file = f elif "valid" in fname: valid_file = f elif "test" in fname: test_file = f assert train_file, "Training file not found" assert valid_file, "Validation file not found" assert test_file, "Testing file not found" return train_file, valid_file, test_file
Download and unpack the corpus. Args: tmp_dir: directory containing dataset. vocab_type: which vocabulary are we using. Returns: The list of names of files.
def create(self, name): with contextlib.closing(self.database.cursor()) as cursor: cursor.execute(, (name,)) return cursor.lastrowid
Create user with provided name and return his id.
def get_id_count(search_term): params = {: search_term, : , : } tree = send_request(pubmed_search, params) if tree is None: return None else: count = tree.getchildren()[0].text return int(count)
Get the number of citations in Pubmed for a search query. Parameters ---------- search_term : str A term for which the PubMed search should be performed. Returns ------- int or None The number of citations for the query, or None if the query fails.
def put_manifest(self, manifest): logger.debug("Putting manifest") text = json.dumps(manifest, indent=2, sort_keys=True) key = self.get_manifest_key() self.put_text(key, text)
Store the manifest.
def color(self): return self.tty_stream if self.options.color is None \ else self.options.color
Whether or not color should be output
def get_filled_structure(self, subgroup=None): groupgroup result = [] object_content = model_to_dict(self.object) if not in dir(self): self.exclude_fields = [] self.exclude_fields.append("id") for field in (self.exclude_fields): if field in object_content.keys(): object_content.pop(field) verbose_names = {} for field in object_content.keys(): verbose_names[field] = self.model._meta.get_field(field).verbose_name gr_object_content = [] if subgroup: group_array = subgroup else: group_array = self.groups for group in group_array: item = {} item["name"] = smart_text(group[0]) item["col"] = group[1] item_elements = group[2:] sublist = [] idx = 0 for item_element in item_elements: if (idx > 1) and (type(item_element) == tuple): sublist.append(self.get_filled_structure([subgroup])) else: filter_field = None if type(item_element) == list: field = item_element[0] if len(item_element) >= 3 and item_element[2]: verbose_names[field] = _(item_element[2]) if len(item_element) >= 9: filter_field = item_element[8] else: field = item_element if field not in verbose_names: if field.startswith() and field.endswith(): label_field = remove_getdisplay(field) if self.model: try: verbose_names[field] = self.model._meta.get_field(label_field).verbose_name except FieldDoesNotExist: verbose_names[field] = _(label_field) else: verbose_names[field] = _(label_field) else: label_field = field verbose_names[field] = _(label_field) args = {} value = None for field_split in field.split(): if value is None: try: verbose_names[field] = self.object._meta.get_field(field_split).verbose_name except AttributeError: pass except FieldDoesNotExist: pass value = getattr(self.object, field_split, None) else: try: verbose_names[field] = value._meta.get_field(field_split).verbose_name except AttributeError: pass except FieldDoesNotExist: pass value = getattr(value, field_split, None) if callable(value): related = (getattr(value, , None) is not None) if related: value = ", ".join([str(x) for x in value.all()]) else: if in value.__code__.co_varnames: args[] = self.request value = value(**args) sublist.append({ "name": _(verbose_names[field]), "value": value, "filter": filter_field, }) gr_object_content.append(field) idx += 1 item["value"] = sublist result.append(item) for field in object_content.keys(): item = {} if field not in gr_object_content: item["name"] = _(verbose_names[field]) item["value"] = getattr(self.object, field) result.append(item) return result
method in charged of filling an structure containing the object fields values taking into account the 'group' attribute from the corresponding form object, which is necesary to fill the details form as it is configured in the 'group' attribute
def wait(self): logging.info("waiting for {} jobs to complete".format(len(self.submissions))) while not self.shutdown: time.sleep(1)
Waits for all submitted jobs to complete.
def register_views(app_name, view_filename, urlpatterns=None): app_module = __import__(app_name) view_module = getattr(app_module, view_filename) views = dir(view_module) for view_name in views: if view_name.endswith(): view = getattr(view_module, view_name) if isinstance(view, object): if urlpatterns: urlpatterns += patterns(, url(r % view_name, view.as_view(), name=view_name), ) else: urlpatterns = patterns(, url(r % view_name, view.as_view(), name=view_name), ) else: pass return urlpatterns
app_name APP名 view_filename views 所在的文件 urlpatterns url中已经存在的urlpatterns return urlpatterns 只导入View结尾的,是类的视图
def name(self) -> str: h = self._atomic_partition(self._first_arg_sep)[0] if len(h) == len(self.string): return h[2:-2] return h[2:]
Return template's name (includes whitespace).
def x_upper_limit(self, limit=None): if limit is None: if self._x_upper_limit is None: if self.smallest_x() == self.largest_x(): if int(self.largest_x()) == float(self.largest_x()): return self.largest_x() + 1 else: return math.ceil(self.largest_x()) else: return self.largest_x() else: return self._x_upper_limit else: if not is_numeric(limit): raise TypeError( "upper x limit must be numeric, not " % str(limit) ) if limit <= self.smallest_x(): raise ValueError( "upper x limit must be greater than lower limit (%s), not %s" % ( str(self.smallest_x()), str(limit) ) ) self._x_upper_limit = limit
Returns or sets (if a value is provided) the value at which the x-axis should end. By default this is the highest x value in the associated series. :param limit: If given, the chart's x_upper_limit will be set to this. :raises ValueError: if you try to make the upper limit smaller than the\ lower limit.
def beep(self, duration, frequency): cmd = , [Float(min=0.1, max=5.0), Integer(min=500, max=5000)] self._write(cmd, duration, frequency)
Generates a beep. :param duration: The duration in seconds, in the range 0.1 to 5. :param frequency: The frequency in Hz, in the range 500 to 5000.
def power_on_vm(name, datacenter=None, service_instance=None): s name. name Name of the virtual machine datacenter Datacenter of the virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt vsphere.power_on_vm name=my_vm Powering on virtual machine %snamesummary.runtime.powerStatesummary.runtime.powerStatepoweredOncommentVirtual machine is already powered onchangespower_onobjectoncommentVirtual machine power on action succeededchangespower_on': True}} return result
Powers on a virtual machine specified by it's name. name Name of the virtual machine datacenter Datacenter of the virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.power_on_vm name=my_vm
def _grabContentFromUrl(self, url): response = {} try: socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, self.info["host"], int(self.info["port"]), True) s = socks.socksocket() domain = self.getDomainFromUrl(url) s.connect((domain, 80)) message = + url + s.sendall(message) data = "" while True: reply = s.recv(4096) if not reply: break else: data += reply response = self._createDataStructure(data) except socks.ProxyConnectionError, sPCE: errMsg = "ERROR socks.ProxyConnectionError. Something seems to be wrong with the Tor Bundler." raise Exception( errMsg + " " + str(sPCE)) return response
Function that abstracts capturing a URL. This method rewrites the one from Wrapper. :param url: The URL to be processed. :return: The response in a Json format.
def returnSupportedOptions(self, options): accepted_options = {} for option in options: if option == : log.debug("Returning these accepted options: %s", accepted_options) return accepted_options
This method takes a requested options list from a client, and returns the ones that are supported.
def geoocoords_to_tile_coords(cls, lon, lat, zoom): n = 2.0 ** zoom x = int((lon + 180.0) / 360.0 * n) y = int((1.0 - math.log(math.tan(math.radians(lat)) + (1 / math.cos(math.radians(lat)))) / math.pi) / 2.0 * n) return x, y
Calculates the tile numbers corresponding to the specified geocoordinates at the specified zoom level Coordinates shall be provided in degrees and using the Mercator Projection (http://en.wikipedia.org/wiki/Mercator_projection) :param lon: longitude :type lon: int or float :param lat: latitude :type lat: int or float :param zoom: zoom level :type zoom: int :return: a tuple (x, y) containing the tile-coordinates
def _parse_oracle(lines): config = {} for line in get_active_lines(lines): if in line: line = cleanup.sub(, line) if in line: (key, value) = line.split(, 1) key = key.strip(whitespace + ,"\).lower() for s in value.split()] else: value = value.strip(whitespace + ').lower() config[key] = value return config
Performs the actual file parsing, returning a dict of the config values in a given Oracle DB config file. Despite their differences, the two filetypes are similar enough to allow idential parsing.
def _flow_check_handler_internal(self): integ_flow = self.integ_br_obj.dump_flows_for( in_port=self.int_peer_port_num) ext_flow = self.ext_br_obj.dump_flows_for( in_port=self.phy_peer_port_num) for net_uuid, lvm in six.iteritems(self.local_vlan_map): vdp_vlan = lvm.any_consistent_vlan() flow_required = False if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)): return if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on Integ bridge", {: vdp_vlan, : lvm.lvid}) flow_required = True if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on External bridge", {: vdp_vlan, : lvm.lvid}) flow_required = True if flow_required: LOG.info("Programming flows for lvid %(lvid)s vdp vlan" " %(vdp)s", {: lvm.lvid, : vdp_vlan}) self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan)
Periodic handler to check if installed flows are present. This handler runs periodically to check if installed flows are present. This function cannot detect and delete the stale flows, if present. It requires more complexity to delete stale flows. Generally, stale flows are not present. So, that logic is not put here.
def com_google_fonts_check_metadata_italic_style(ttFont, font_metadata): from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "italic": yield SKIP, "This check only applies to italic fonts." else: font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_fullname) == 0: yield SKIP, "Font lacks fullname entries in name table." else: font_fullname = font_fullname[0] if not bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", "METADATA.pb style has been set to italic" " but font macStyle is improperly set.") elif not font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("bad-fullfont-name", ("Font macStyle Italic bit is set" " but nameID {} (\"{}\") is not ended with" " \"Italic\"").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("OK: METADATA.pb font.style \"italic\"" " matches font internals.")
METADATA.pb font.style "italic" matches font internals?
def cli(obj): client = obj[] timezone = obj[] screen = Screen(client, timezone) screen.run()
Display alerts like unix "top" command.
def gateway(self): url = response = requests.get(url, headers=self._headers()) response.raise_for_status() return response.json()
Return the detail of the gateway.
def em_schedule(**kwargs): mdrunner = kwargs.pop(, None) integrators = kwargs.pop(, [, ]) kwargs.pop(, None) nsteps = kwargs.pop(, [100, 1000]) outputs = [.format(i, integrator) for i,integrator in enumerate(integrators)] outputs[-1] = kwargs.pop(, ) files = {: kwargs.pop(, None)} for i, integrator in enumerate(integrators): struct = files[] logger.info("[em %d] energy minimize with %s for maximum %d steps", i, integrator, nsteps[i]) kwargs.update({:struct, :outputs[i], :integrator, : nsteps[i]}) if not integrator == : kwargs[] = mdrunner else: kwargs[] = None logger.warning("[em %d] Not using mdrunner for L-BFGS because it cannot " "do parallel runs.", i) files = energy_minimize(**kwargs) return files
Run multiple energy minimizations one after each other. :Keywords: *integrators* list of integrators (from 'l-bfgs', 'cg', 'steep') [['bfgs', 'steep']] *nsteps* list of maximum number of steps; one for each integrator in in the *integrators* list [[100,1000]] *kwargs* mostly passed to :func:`gromacs.setup.energy_minimize` :Returns: dictionary with paths to final structure ('struct') and other files :Example: Conduct three minimizations: 1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps 2. steepest descent for 200 steps 3. finish with BFGS for another 30 steps We also do a multi-processor minimization when possible (i.e. for steep (and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see :mod:`gromacs.run` for details):: import gromacs.run gromacs.setup.em_schedule(struct='solvate/ionized.gro', mdrunner=gromacs.run.MDrunnerOpenMP64, integrators=['l-bfgs', 'steep', 'l-bfgs'], nsteps=[50,200, 50]) .. Note:: You might have to prepare the mdp file carefully because at the moment one can only modify the *nsteps* parameter on a per-minimizer basis.
def toupper(self): return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache)
Translate characters from lower to upper case for a particular column. :returns: new H2OFrame with all strings in the current frame converted to the uppercase.
def cache_data(self, request, data, key=): request.session[ % (constants.SESSION_KEY, key)] = data
Cache data in the session store. :param request: :attr:`django.http.HttpRequest` :param data: Arbitrary data to store. :param key: `str` The key under which to store the data.
def mark_offer_as_lose(self, offer_id): return self._create_put_request( resource=OFFERS, billomat_id=offer_id, command=LOSE, )
Mark offer as lose :param offer_id: the offer id :return Response
def _reshape_by_device_single(x, num_devices): x_shape = list(x.shape) batch_size = x_shape[0] batch_size_per_device = batch_size // num_devices if batch_size_per_device * num_devices != batch_size: logging.fatal( "We require that num_devices[%d] divides batch_size[%d] evenly.", num_devices, batch_size) new_shape_prefix = [num_devices, batch_size_per_device] return np.reshape(x, new_shape_prefix + x_shape[1:])
Reshape x into a shape [num_devices, ...].
def E(poly, dist=None, **kws): if not isinstance(poly, (distributions.Dist, polynomials.Poly)): print(type(poly)) print("Approximating expected value...") out = quadrature.quad(poly, dist, veceval=True, **kws) print("done") return out if isinstance(poly, distributions.Dist): dist, poly = poly, polynomials.variable(len(poly)) if not poly.keys: return numpy.zeros(poly.shape, dtype=int) if isinstance(poly, (list, tuple, numpy.ndarray)): return [E(_, dist, **kws) for _ in poly] if poly.dim < len(dist): poly = polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys mom = dist.mom(numpy.array(keys).T, **kws) A = poly.A if len(dist) == 1: mom = mom[0] out = numpy.zeros(poly.shape) for i in range(len(keys)): out += A[keys[i]]*mom[i] out = numpy.reshape(out, shape) return out
Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist): Input to take expected value on. dist (Dist): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E(dist)) [1. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.E(poly, dist)) [1. 1. 0. 0.]
def ask_pascal_16(self, next_rva_ptr): length = self.__get_pascal_16_length() if length == (next_rva_ptr - (self.rva_ptr+2)) / 2: self.length = length return True return False
The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked with the possible length contained in the first word.
def _handle_result_line(self, sline): as_kw = sline[3] a_result = str(sline[5].split()[0]) self._cur_values[as_kw] = { : , : a_result } return 0
Parses the data line and adds to the dictionary. :param sline: a split data line to parse :returns: the number of rows to jump and parse the next data line or return the code error -1
async def freedomscores(self, root): elem = root.find() result = OrderedDict() result[] = int(elem.find().text) result[] = int(elem.find().text) result[] = int(elem.find().text) return result
Nation's `Freedoms`: three basic indicators of the nation's Civil Rights, Economy, and Political Freedom, as percentages. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys of str and values of int Keys being, in order: ``Civil Rights``, ``Economy``, and ``Political Freedom``.
def get_events(self, service_location_id, appliance_id, start, end, max_number=None): start = self._to_milliseconds(start) end = self._to_milliseconds(end) url = urljoin(URLS[], service_location_id, "events") headers = {"Authorization": "Bearer {}".format(self.access_token)} params = { "from": start, "to": end, "applianceId": appliance_id, "maxNumber": max_number } r = requests.get(url, headers=headers, params=params) r.raise_for_status() return r.json()
Request events for a given appliance Parameters ---------- service_location_id : int appliance_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp timezone-naive datetimes are assumed to be in UTC max_number : int, optional The maximum number of events that should be returned by this query Default returns all events in the selected period Returns ------- dict
def scaffold(args): from jcvi.formats.base import FileMerger from jcvi.formats.bed import mates from jcvi.formats.contig import frombed from jcvi.formats.fasta import join from jcvi.utils.iter import grouper p = OptionParser(scaffold.__doc__) p.set_rclip(rclip=1) p.add_option("--conf", help="BAMBUS configuration file [default: %default]") p.add_option("--prefix", default=False, action="store_true", help="Only keep links between IDs with same prefix [default: %default]") opts, args = p.parse_args(args) nargs = len(args) if nargs < 3 or nargs % 2 != 1: sys.exit(not p.print_help()) rclip = opts.rclip ctgfasta = args[0] duos = list(grouper(args[1:], 2)) trios = [] for fastafile, bedfile in duos: prefix = bedfile.rsplit(".", 1)[0] matefile = prefix + ".mates" matebedfile = matefile + ".bed" if need_update(bedfile, [matefile, matebedfile]): matesopt = [bedfile, "--lib", "--nointra", "--rclip={0}".format(rclip), "--cutoff={0}".format(opts.cutoff)] if opts.prefix: matesopt += ["--prefix"] matefile, matebedfile = mates(matesopt) trios.append((fastafile, matebedfile, matefile)) bbfasta, bbbed, bbmate = "bambus.reads.fasta", "bambus.bed", "bambus.mates" for files, outfile in zip(zip(*trios), (bbfasta, bbbed, bbmate)): FileMerger(files, outfile=outfile).merge(checkexists=True) ctgfile = "bambus.contig" idsfile = "bambus.ids" frombedInputs = [bbbed, ctgfasta, bbfasta] if need_update(frombedInputs, ctgfile): frombed(frombedInputs) inputfasta = "bambus.contigs.fasta" singletonfasta = "bambus.singletons.fasta" cmd = "faSomeRecords {0} {1} ".format(ctgfasta, idsfile) sh(cmd + inputfasta) sh(cmd + singletonfasta + " -exclude") prefix = "bambus" cmd = "goBambus -c {0} -m {1} -o {2}".format(ctgfile, bbmate, prefix) if opts.conf: cmd += " -C {0}".format(opts.conf) sh(cmd) cmd = "untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml".\ format(prefix) sh(cmd) final = "final" cmd = "printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib " \ "-merge -detail -oo -sum -o {1}".format(prefix, final) sh(cmd) oofile = final + ".oo" join([inputfasta, "--oo={0}".format(oofile)])
%prog scaffold ctgfasta reads1.fasta mapping1.bed reads2.fasta mapping2.bed ... Run BAMBUS on set of contigs, reads and read mappings.
def gen_xml_doc(self): res = self.make_doc() var_tag = text_tag = keys = res.keys() keys.sort() texts = "" vars = "" for key in keys: value = res[key] vars += var_tag % (value, key) texts += text_tag % (key, value) return CONTENT_TMPL % (vars, texts)
Generate the text tags that should be inserted in the content.xml of a full model
def find_ss_regions(dssp_residues, loop_assignments=(, , , )): loops = loop_assignments previous_ele = None fragment = [] fragments = [] for ele in dssp_residues: if previous_ele is None: fragment.append(ele) elif ele[2] != previous_ele[2]: fragments.append(fragment) fragment = [ele] elif previous_ele[1] in loops: if ele[1] in loops: fragment.append(ele) else: fragments.append(fragment) fragment = [ele] else: if ele[1] == previous_ele[1]: fragment.append(ele) else: fragments.append(fragment) fragment = [ele] previous_ele = ele fragments.append(fragment) return fragments
Separates parsed DSSP data into groups of secondary structure. Notes ----- Example: all residues in a single helix/loop/strand will be gathered into a list, then the next secondary structure element will be gathered into a separate list, and so on. Parameters ---------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility Returns ------- fragments : [[list]] Lists grouped in continuous regions of secondary structure. Innermost list has the same format as above.
def expand_path_cfg(path_cfg, alias_dict={ }, overriding_kargs={ }): if isinstance(path_cfg, str): return _expand_str(path_cfg, alias_dict, overriding_kargs) if isinstance(path_cfg, dict): return _expand_dict(path_cfg, alias_dict) return _expand_tuple(path_cfg, alias_dict, overriding_kargs)
expand a path config Args: path_cfg (str, tuple, dict): a config for path alias_dict (dict): a dict for aliases overriding_kargs (dict): to be used for recursive call
def upload_headimg(self, account, media_file): return self._post( , params={ : account }, files={ : media_file } )
上传客服账号头像 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号 :param media_file: 要上传的头像文件,一个 File-Object :return: 返回的 JSON 数据包
def transferCoincidences(network, fromElementName, toElementName): coincidenceHandle = getLockedHandle( runtimeElement=network.getElement(fromElementName), expression="self._cd._W" ) network.getElement(toElementName).setParameter("coincidencesAbove", coincidenceHandle)
Gets the coincidence matrix from one element and sets it on another element (using locked handles, a la nupic.bindings.research.lockHandle). TODO: Generalize to more node types, parameter name pairs, etc. Does not work across processes.
def add_component(self, entity, component): component_type = type(component) relation = self._get_relation(component_type) if entity in relation: msg = "Component {0} can't be added to entity {1} since it already has a component of type {2}.".format(component, entity, component_type) raise DuplicateComponentError(msg) relation[entity] = component self._entities_with(component_type).add(entity)
Add component to entity. Long-hand for :func:`essence.Entity.add`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component: component to add to the entity :type component: :class:`essence.Component`
def calculate_size(name, include_value, local_only): data_size = 0 data_size += calculate_size_str(name) data_size += BOOLEAN_SIZE_IN_BYTES data_size += BOOLEAN_SIZE_IN_BYTES return data_size
Calculates the request payload size
def _get_plot_data(self): _marker_type = self.settings.get(, ) if self.x_col == self._idxname: x_data = self._idx else: x_data = self.tab[self.x_col].data if self.y_col == self._idxname: y_data = self._idx else: y_data = self.tab[self.y_col].data if self.tab.masked: if self.x_col == self._idxname: x_mask = np.ones_like(self._idx, dtype=np.bool) else: x_mask = ~self.tab[self.x_col].mask if self.y_col == self._idxname: y_mask = np.ones_like(self._idx, dtype=np.bool) else: y_mask = ~self.tab[self.y_col].mask mask = x_mask & y_mask x_data = x_data[mask] y_data = y_data[mask] if len(x_data) > 1: i = np.argsort(x_data) x_data = x_data[i] y_data = y_data[i] if not self.w.show_marker.get_state(): _marker_type = None return x_data, y_data, _marker_type
Extract only good data point for plotting.
def file_saved_in_other_editorstack(self, original_filename, filename): index = self.has_filename(original_filename) if index is None: return finfo = self.data[index] finfo.newly_created = False finfo.filename = to_text_string(filename) finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
File was just saved in another editorstack, let's synchronize! This avoids file being automatically reloaded. The original filename is passed instead of an index in case the tabs on the editor stacks were moved and are now in a different order - see issue 5703. Filename is passed in case file was just saved as another name.
def parse_pv(header): order_fit = parse_order_fit(header) def parse_with_base(i): key_base = "PV%d_" % i pvi_x = [header[key_base + "0"]] def parse_range(lower, upper): for j in range(lower, upper + 1): pvi_x.append(header[key_base + str(j)]) if order_fit >= 1: parse_range(1, 3) if order_fit >= 2: parse_range(4, 6) if order_fit >= 3: parse_range(7, 10) return pvi_x return [parse_with_base(1), parse_with_base(2)]
Parses the PV array from an astropy FITS header. Args: header: astropy.io.fits.header.Header The header containing the PV values. Returns: cd: 2d array (list(list(float)) [[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]] Note that N depends on the order of the fit. For example, an order 3 fit goes up to PV?_10.
def handle_url(url, session, res): print("Parsing", url, file=sys.stderr) try: data = getPageContent(url, session) except IOError as msg: print("ERROR:", msg, file=sys.stderr) return for match in url_matcher.finditer(data): url = match.group(1) + name = unescape(match.group(2)) name = asciify(name.replace(, ).replace(, )) name = capfirst(name) if name in exclude_comics: continue if contains_case_insensitive(res, name): print("INFO: skipping possible duplicate", repr(name), file=sys.stderr) continue end = match.end() mo = num_matcher.search(data[end:]) if not mo: print("ERROR:", repr(data[end:end+300]), file=sys.stderr) continue num = int(mo.group(1)) url = url_overrides.get(name, url) try: if "/d/" not in url: check_robotstxt(url+"d/", session) else: check_robotstxt(url, session) except IOError: print("INFO: robots.txt denied for comicgenesis", repr(name)) continue else: res[name] = (url, num)
Parse one search result page.
def dumps(self, fd, **kwargs): if 0 <= fd <= 2: data = [self.stdin, self.stdout, self.stderr][fd].concretize(**kwargs) if type(data) is list: data = b.join(data) return data return self.get_fd(fd).concretize(**kwargs)
Returns the concrete content for a file descriptor. BACKWARD COMPATIBILITY: if you ask for file descriptors 0 1 or 2, it will return the data from stdin, stdout, or stderr as a flat string. :param fd: A file descriptor. :return: The concrete content. :rtype: str
def calc_qt_v1(self): con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess flu.qt = max(flu.outuh-con.abstr, 0.)
Calculate the total discharge after possible abstractions. Required control parameter: |Abstr| Required flux sequence: |OutUH| Calculated flux sequence: |QT| Basic equation: :math:`QT = max(OutUH - Abstr, 0)` Examples: Trying to abstract less then available, as much as available and less then available results in: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> abstr(2.0) >>> fluxes.outuh = 2.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(1.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) >>> fluxes.outuh = 0.5 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) Note that "negative abstractions" are allowed: >>> abstr(-2.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(2.0)
def _make_index(self): out = {} for layer in self._layers: cls = layer.__class__ out[cls] = out.get(cls, []) + [layer] return out
Perform the index computation. It groups layers by type into a dictionary, to allow quick access.
def collect_basic_info(): s = sys.version_info _collect(json.dumps({:tuple(s)})) _collect(sys.version) return sys.version
collect basic info about the system, os, python version...
def abort(self, exception=exc.ConnectError): log.warn("Aborting connection to %s:%s", self.host, self.port) def abort_pending(f): exc_info = sys.exc_info() if any(exc_info): f.set_exc_info(exc_info) else: f.set_exception(exception(self.host, self.port)) for pending in self.drain_all_pending(): abort_pending(pending)
Aborts a connection and puts all pending futures into an error state. If ``sys.exc_info()`` is set (i.e. this is being called in an exception handler) then pending futures will have that exc info set. Otherwise the given ``exception`` parameter is used (defaults to ``ConnectError``).
def _hash_filter_fn(self, filter_fn, **kwargs): filter_fn_name = self._get_function_name(filter_fn, default="filter-none") logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs)))) fn_source = str(dill.source.getsource(filter_fn)) pickled_fn_source = pickle.dumps(fn_source) hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11) kw_dict = dict(**kwargs) kw_hash = list() if not kw_dict: kw_hash = ["default"] else: [kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())] closure = [] nonlocals = inspect.getclosurevars(filter_fn).nonlocals for (key, val) in nonlocals.items(): if inspect.isfunction(val): closure.append(self._hash_filter_fn(val)) closure.sort() closure_str = "null" if len(closure) == 0 else "-".join(closure) hashed_fn = ".".join(["-".join([filter_fn_name, str(hashed_fn_source)]), ".".join(kw_hash), closure_str] ) return hashed_fn
Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values
def _format_message(value, line_length, indent="", first_indent=None): if indent.find(): indent = indent.replace(, ) result = [] if first_indent is None: first_indent = indent cindent = first_indent tmp = "*" * line_length for ele in value.split(): if ele.find() >= 0: ele = ele.replace(, ) if (len(ele) + len(tmp)) >= line_length: result.append(tmp) tmp = .format(cindent, ele) cindent = indent else: tmp = "{0} {1}".format(tmp, ele) result.append(tmp) result = result[1:] return "\n".join(result)
Return a string with newlines so that the given string fits into this line length. At the start of the line the indent is added. This can be used for commenting the message out within a file or to indent your text. All \\t will be replaced with 4 spaces. @param value: The string to get as a commented multiline comment. @param line_length: The length of the line to fill. @param indent: The indent to use for printing or charcter to put in front @param first_indent: The first indent might be shorter. If None then the first line uses the normal indent as the rest of the string. @return: The string with newlines
def get(self, path, default=_NoDefault, as_type=None, resolve_references=True): value = self._source steps_taken = [] try: for step in path.split(self._separator): steps_taken.append(step) value = value[step] if as_type: return as_type(value) elif isinstance(value, Mapping): namespace = type(self)(separator=self._separator, missing=self._missing) namespace._source = value namespace._root = self._root return namespace elif resolve_references and isinstance(value, str): return self._resolve(value) else: return value except ConfiguredReferenceError: raise except KeyError as e: if default is not _NoDefault: return default else: missing_key = self._separator.join(steps_taken) raise NotConfiguredError(.format(missing_key), key=missing_key) from e
Gets a value for the specified path. :param path: the configuration key to fetch a value for, steps separated by the separator supplied to the constructor (default ``.``) :param default: a value to return if no value is found for the supplied path (``None`` is allowed) :param as_type: an optional callable to apply to the value found for the supplied path (possibly raising exceptions of its own if the value can not be coerced to the expected type) :param resolve_references: whether to resolve references in values :return: the value associated with the supplied configuration key, if available, or a supplied default value if the key was not found :raises ConfigurationError: when no value was found for *path* and *default* was not provided or a reference could not be resolved
def _from_binary_ace_header(cls, binary_stream): type, control_flags, size = cls._REPR.unpack(binary_stream) nw_obj = cls((ACEType(type), ACEControlFlags(control_flags), size)) _MOD_LOGGER.debug("Attempted to unpack ACE Header from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
See base class.
def phrase_pinyin(phrase, style, heteronym, errors=, strict=True): py = [] if phrase in PHRASES_DICT: py = deepcopy(PHRASES_DICT[phrase]) for idx, item in enumerate(py): if heteronym: py[idx] = _remove_dup_items([ _to_fixed(x, style=style, strict=strict) for x in item]) else: py[idx] = [_to_fixed(item[0], style=style, strict=strict)] else: for i in phrase: single = single_pinyin(i, style=style, heteronym=heteronym, errors=errors, strict=strict) if single: py.extend(single) return py
词语拼音转换. :param phrase: 词语 :param errors: 指定如何处理没有拼音的字符 :param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母 :return: 拼音列表 :rtype: list
def maybe_infer_dtype_type(element): tipo = None if hasattr(element, ): tipo = element.dtype elif is_list_like(element): element = np.asarray(element) tipo = element.dtype return tipo
Try to infer an object's dtype, for use in arithmetic ops Uses `element.dtype` if that's available. Objects implementing the iterator protocol are cast to a NumPy array, and from there the array's type is used. Parameters ---------- element : object Possibly has a `.dtype` attribute, and possibly the iterator protocol. Returns ------- tipo : type Examples -------- >>> from collections import namedtuple >>> Foo = namedtuple("Foo", "dtype") >>> maybe_infer_dtype_type(Foo(np.dtype("i8"))) numpy.int64
def add_ylabel(self, text=None): y = self.fit.meta[] if not text: text = + y[] + r + y[] + r self.plt.set_ylabel(text)
Add a label to the y-axis.
def publish_tcp(self, topic, data, **kwargs): return self.__tcp_client.publish(topic, data, **kwargs)
Use :meth:`NsqdTCPClient.publish` instead. .. deprecated:: 1.0.0
def connect(self, host=None, port=None, connect=False, **kwargs): try: self.__connection = MongoClient(host=host, port=port, connect=connect, **kwargs) except (AutoReconnect, ConnectionFailure, ServerSelectionTimeoutError): raise DatabaseIsDownError("No mongod process is running.")
Explicitly creates the MongoClient; this method must be used in order to specify a non-default host or port to the MongoClient. Takes arguments identical to MongoClient.__init__
def get_types_by_attr(resource, template_id=None): resource_type_templates = [] attr_ids = [] for res_attr in resource.attributes: attr_ids.append(res_attr.attr_id) all_resource_attr_ids = set(attr_ids) all_types = db.DBSession.query(TemplateType).options(joinedload_all()).filter(TemplateType.resource_type==resource.ref_key) if template_id is not None: all_types = all_types.filter(TemplateType.template_id==template_id) all_types = all_types.all() for ttype in all_types: type_attr_ids = [] for typeattr in ttype.typeattrs: type_attr_ids.append(typeattr.attr_id) if set(type_attr_ids).issubset(all_resource_attr_ids): resource_type_templates.append(ttype) return resource_type_templates
Using the attributes of the resource, get all the types that this resource matches. @returns a dictionary, keyed on the template name, with the value being the list of type names which match the resources attributes.
def street_name(self): pattern = self.random_element(self.street_name_formats) return self.generator.parse(pattern)
:example 'Crist Parks'
def isValid(cntxt: Context, m: FixedShapeMap) -> Tuple[bool, List[str]]: if not cntxt.is_valid: return False, cntxt.error_list parse_nodes = [] for nodeshapepair in m: n = nodeshapepair.nodeSelector if not isinstance_(n, Node): return False, [f"{n}: Triple patterns are not implemented"] elif not (next(cntxt.graph.predicate_objects(nodeshapepair.nodeSelector), None) or next(cntxt.graph.subject_predicates(nodeshapepair.nodeSelector), None) or not next(cntxt.graph.triples((None, None, None)), None)): return False, [f"Focus: {nodeshapepair.nodeSelector} not in graph"] else: s = cntxt.shapeExprFor(START if nodeshapepair.shapeLabel is None or nodeshapepair.shapeLabel is START else nodeshapepair.shapeLabel) cntxt.current_node = ParseNode(satisfies, s, n, cntxt) if not s: if nodeshapepair.shapeLabel is START or nodeshapepair.shapeLabel is None: cntxt.fail_reason = "START node is not specified or is invalid" else: cntxt.fail_reason = f"Shape: {nodeshapepair.shapeLabel} not found in Schema" return False, cntxt.process_reasons() parse_nodes.append(cntxt.current_node) if not satisfies(cntxt, n, s): cntxt.current_node.result = False return False, cntxt.process_reasons() else: cntxt.current_node.result = True return True, []
`5.2 Validation Definition <http://shex.io/shex-semantics/#validation>`_ The expression isValid(G, m) indicates that for every nodeSelector/shapeLabel pair (n, s) in m, s has a corresponding shape expression se and satisfies(n, se, G, m). satisfies is defined below for each form of shape expression :param cntxt: evaluation context - includes graph and schema :param m: list of NodeShape pairs to test :return: Success/failure indicator and, if fail, a list of failure reasons
def _read_oem(string): ephems = [] required = (, , , , ) mode = None for line in string.splitlines(): if not line or line.startswith("COMMENT"): continue elif line.startswith("META_START"): mode = "meta" ephem = {: []} ephems.append(ephem) elif line.startswith("META_STOP"): mode = "data" for k in required: if k not in ephem: raise ValueError("Missing field ".format(k)) if ephem[].lower() != "earth": ephem[] = ephem[].title().replace(" ", "") elif mode == "meta": key, _, value = line.partition("=") ephem[key.strip()] = value.strip() elif mode == "data": date, *state_vector = line.split() date = Date.strptime(date, "%Y-%m-%dT%H:%M:%S.%f", scale=ephem[]) state_vector = np.array([float(x) for x in state_vector[:6]]) * 1000 ephem[].append(Orbit(date, state_vector, , ephem[], None)) for i, ephem_dict in enumerate(ephems): if not ephem_dict[]: raise ValueError("Empty ephemeris") method = ephem_dict.get(, ).lower() order = int(ephem_dict.get(, 7)) + 1 ephem = Ephem(ephem_dict[], method=method, order=order) ephem.name = ephem_dict[] ephem.cospar_id = ephem_dict[] ephems[i] = ephem if len(ephems) == 1: return ephems[0] return ephems
Args: string (str): String containing the OEM Return: Ephem:
def socket_monitor_loop(self): try: while True: gevent.socket.wait_read(self.socket.fileno()) self._handle_log_rotations() self.capture_packet() finally: self.clean_up()
Monitor the socket and log captured data.
def setValidityErrorHandler(self, err_func, warn_func, arg=None): libxml2mod.xmlRelaxNGSetValidErrors(self._o, err_func, warn_func, arg)
Register error and warning handlers for RelaxNG validation. These will be called back as f(msg,arg)
def on_response(self, msg): LOGGER.debug("natsd.Requester.on_response: " + str(sys.getsizeof(msg)) + " bytes received") working_response = json.loads(msg.data.decode()) working_properties = DriverTools.json2properties(working_response[]) working_body = b+bytes(working_response[], ) if in working_response else None if DriverTools.MSG_CORRELATION_ID in working_properties: if self.corr_id == working_properties[DriverTools.MSG_CORRELATION_ID]: if DriverTools.MSG_SPLIT_COUNT in working_properties and \ int(working_properties[DriverTools.MSG_SPLIT_COUNT]) > 1: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None if self.split_responses is None: self.split_responses = [] self.split_responses_mid = working_properties[DriverTools.MSG_SPLIT_MID] if working_properties[DriverTools.MSG_SPLIT_MID] == self.split_responses_mid: response = { : working_properties, : working_body_decoded } self.split_responses.insert(int(working_properties[DriverTools.MSG_SPLIT_OID]), response) if self.split_responses.__len__() == int(working_properties[DriverTools.MSG_SPLIT_COUNT]): properties = {} body = b for num in range(0, self.split_responses.__len__()): properties.update(self.split_responses[num][]) body += self.split_responses[num][] self.response = { : properties, : body } self.split_responses = None self.split_responses_mid = None else: LOGGER.warn("natsd.Requester.on_response - discarded response : (" + str(working_properties[DriverTools.MSG_CORRELATION_ID]) + "," + str(working_properties[DriverTools.MSG_SPLIT_MID]) + ")") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ : working_properties, : working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else \ bytes(json.dumps({}), ) self.response = { : working_properties, : working_body_decoded } else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response : " + str(working_properties[DriverTools.MSG_CORRELATION_ID])) LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ : working_properties, : working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response (no correlation ID)") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ : working_properties, : working_body_decoded }))
setup response if correlation id is the good one
def _format_dates(self, start, end): start = self._split_date(start) end = self._split_date(end) return start, end
Format start and end dates.
def sphericalAngSep(ra0, dec0, ra1, dec1, radians=False): if radians==False: ra0 = np.radians(ra0) dec0 = np.radians(dec0) ra1 = np.radians(ra1) dec1 = np.radians(dec1) deltaRa= ra1-ra0 deltaDec= dec1-dec0 val = haversine(deltaDec) val += np.cos(dec0) * np.cos(dec1) * haversine(deltaRa) val = min(1, np.sqrt(val)) ; val = 2*np.arcsin(val) if radians==False: val = np.degrees(val) return val
Compute the spherical angular separation between two points on the sky. //Taken from http://www.movable-type.co.uk/scripts/gis-faq-5.1.html NB: For small distances you can probably use sqrt( dDec**2 + cos^2(dec)*dRa) where dDec = dec1 - dec0 and dRa = ra1 - ra0 and dec1 \approx dec \approx dec0
def delete_assessment_part(self, assessment_part_id): if not isinstance(assessment_part_id, ABCId): raise errors.InvalidArgument() collection = JSONClientValidated(, collection=, runtime=self._runtime) if collection.find({: str(assessment_part_id)}).count() != 0: raise errors.IllegalState() collection = JSONClientValidated(, collection=, runtime=self._runtime) try: apls = get_assessment_part_lookup_session(runtime=self._runtime, proxy=self._proxy) apls.use_unsequestered_assessment_part_view() apls.use_federated_bank_view() part = apls.get_assessment_part(assessment_part_id) part.delete() except AttributeError: collection.delete_one({: ObjectId(assessment_part_id.get_identifier())})
Removes an asessment part and all mapped items. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` raise: NotFound - ``assessment_part_id`` not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def set_value(self, value, status=Sensor.NOMINAL, timestamp=None): if timestamp is None: timestamp = self._manager.time() self.set(timestamp, status, value)
Set sensor value with optinal specification of status and timestamp
def build(self, shutit): if shutit.build[] in (,): if shutit.get_current_shutit_pexpect_session_environment().install_type == : shutit.add_to_bashrc() if not shutit.command_available(): shutit.install() shutit.lsb_release() elif shutit.get_current_shutit_pexpect_session_environment().install_type == : shutit.send(, timeout=9999, exit_values=[, ]) shutit.pause_point( + , level=2) return True
Initializes target ready for build and updating package management if in container.
def remove_core_element(self, model): assert model.outcome.parent is self.model.state gui_helper_state_machine.delete_core_element_of_model(model)
Remove respective core element of handed outcome model :param OutcomeModel model: Outcome model which core element should be removed :return:
def register_widget(self, widget_cls, **widget_kwargs): if not issubclass(widget_cls, DashboardWidgetBase): raise ImproperlyConfigured( .format(widget_cls)) widget = widget_cls(**widget_kwargs) widget_name = widget.get_name() if widget_name in self.widgets: raise WidgetAlreadyRegistered( .format(widget_cls, widget_name)) self.widgets[widget_name] = widget
Registers the given widget. Widgets must inherit ``DashboardWidgetBase`` and you cannot register the same widget twice. :widget_cls: A class that inherits ``DashboardWidgetBase``.
def dedupe_cols(frame): cols = list(frame.columns) for i, item in enumerate(frame.columns): if item in frame.columns[:i]: cols[i] = "toDROP" frame.columns = cols return frame.drop("toDROP", 1, errors=)
Need to dedupe columns that have the same name.
def delete(self, symbol, chunk_range=None, audit=None): if chunk_range is not None: sym = self._get_symbol_info(symbol) df = self.read(symbol, chunk_range=chunk_range, filter_data=False) row_adjust = len(df) if not df.empty: df = CHUNKER_MAP[sym[CHUNKER]].exclude(df, chunk_range) query = {SYMBOL: symbol} query.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range)) self._collection.delete_many(query) self._mdata.delete_many(query) self.update(symbol, df) sym = self._get_symbol_info(symbol) sym[LEN] -= row_adjust sym[CHUNK_COUNT] = mongo_count(self._collection, filter={SYMBOL: symbol}) self._symbols.replace_one({SYMBOL: symbol}, sym) else: query = {SYMBOL: symbol} self._collection.delete_many(query) self._symbols.delete_many(query) self._mdata.delete_many(query) if audit is not None: audit[] = symbol if chunk_range is not None: audit[] = row_adjust audit[] = else: audit[] = self._audit.insert_one(audit)
Delete all chunks for a symbol, or optionally, chunks within a range Parameters ---------- symbol : str symbol name for the item chunk_range: range object a date range to delete audit: dict dict to store in the audit log
def _temporal_distance_pdf(self): temporal_distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf() delta_peak_loc_to_probability_mass = {} non_delta_peak_split_points = [temporal_distance_split_points_ordered[0]] non_delta_peak_densities = [] for i in range(0, len(temporal_distance_split_points_ordered) - 1): left = temporal_distance_split_points_ordered[i] right = temporal_distance_split_points_ordered[i + 1] width = right - left prob_mass = norm_cdf[i + 1] - norm_cdf[i] if width == 0.0: delta_peak_loc_to_probability_mass[left] = prob_mass else: non_delta_peak_split_points.append(right) non_delta_peak_densities.append(prob_mass / float(width)) assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - 1) return numpy.array(non_delta_peak_split_points), \ numpy.array(non_delta_peak_densities), delta_peak_loc_to_probability_mass
Temporal distance probability density function. Returns ------- non_delta_peak_split_points: numpy.array non_delta_peak_densities: numpy.array len(density) == len(temporal_distance_split_points_ordered) -1 delta_peak_loc_to_probability_mass : dict
def _request(self, method, resource_uri, **kwargs): data = kwargs.get() response = method(self.API_BASE_URL + resource_uri, json=data, headers=self.headers) response.raise_for_status() return response.json()
Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response
def generate_report( book_url, fund_ids: StringOption( section="Funds", sort_tag="c", documentation_string="Comma-separated list of fund ids.", default_value="8123,8146,8148,8147") ): return render_report(book_url, fund_ids)
Generates the report output
def MediaBoxSize(self): CheckParent(self) val = _fitz.Page_MediaBoxSize(self) val = Point(val) if not bool(val): r = self.rect val = Point(r.width, r.height) return val
Retrieve width, height of /MediaBox.
def close(self): close_command = StandardSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00) self._send_method(close_command, self._close_message_received)
Send CLOSE command to device.
def out_filename(template, n_val, mode): return .format(template.name, n_val, mode.identifier)
Determine the output filename
def update(self, fields=None, async_=None, jira=None, notify=True, **kwargs): if async_ is None: async_ = self._options[] data = {} if fields is not None: data.update(fields) data.update(kwargs) data = json.dumps(data) if not notify: querystring = "?notifyUsers=false" else: querystring = "" r = self._session.put( self.self + querystring, data=data) if in self._options and \ r.status_code == 400: user = None error_list = get_error_list(r) logging.error(error_list) if "The reporter specified is not a user." in error_list: if not in data[]: logging.warning( "autofix: setting reporter to and retrying the update." % self._options[]) data[][] = { : self._options[]} if "Issues must be assigned." in error_list: if not in data[]: logging.warning("autofix: setting assignee to for %s and retrying the update." % ( self._options[], self.key)) data[][] = { : self._options[]} if "Issue type is a sub-task but parent issue key or id not specified." in error_list: logging.warning( "autofix: trying to fix sub-task without parent by converting to it to bug") data[][] = {"name": "Bug"} if "The summary is invalid because it contains newline characters." in error_list: logging.warning("autofix: trying to fix newline in summary") data[][ ] = self.fields.summary.replace("/n", "") for error in error_list: if re.search(r"^User was not found in the system\.", error, re.U): m = re.search( r"^User was not found in the system\.", error, re.U) if m: user = m.groups()[0] else: raise NotImplementedError() if re.search(r"^User does not exist\.", error): m = re.search(r"^User does not exist\.", error) if m: user = m.groups()[0] else: raise NotImplementedError() if user: logging.warning( "Trying to add missing orphan user in order to complete the previous failed operation." % user) jira.add_user(user, , 10100, active=False) if async_: if not hasattr(self._session, ): self._session._async_jobs = set() self._session._async_jobs.add(threaded_requests.put( self.self, data=json.dumps(data))) else: r = self._session.put( self.self, data=json.dumps(data)) time.sleep(self._options[]) self._load(self.self)
Update this resource on the server. Keyword arguments are marshalled into a dict before being sent. If this resource doesn't support ``PUT``, a :py:exc:`.JIRAError` will be raised; subclasses that specialize this method will only raise errors in case of user error. :param fields: Fields which should be updated for the object. :type fields: Optional[Dict[str, Any]] :param async_: If true the request will be added to the queue so it can be executed later using async_run() :type async_: bool :param jira: Instance of JIRA Client :type jira: jira.JIRA :param notify: Whether or not to notify users about the update. (Default: True) :type notify: bool :type kwargs: **Any
def sync(self, old_token=None): token = "http://radicale.org/ns/sync/%s" % self.etag.strip("\"") if old_token: raise ValueError("Sync token are not supported (you can ignore this warning)") return token, self.list()
Get the current sync token and changed items for synchronization. ``old_token`` an old sync token which is used as the base of the delta update. If sync token is missing, all items are returned. ValueError is raised for invalid or old tokens.
def process_callback(self, block=True): try: (callback, args) = self._queue.get(block=block) try: callback(*args) finally: self._queue.task_done() except queue.Empty: return False return True
Dispatch a single callback in the current thread. :param boolean block: If True, blocks waiting for a callback to come. :return: True if a callback was processed; otherwise False.
def stream_file(self, path, fast_lane=True): return Stream(self)
Create a temp file, stream it to the server if online and append its content using the write() method. This makes sure that we have all newest data of this file on the server directly. At the end of the job, the content the server received is stored as git blob on the server. It is then committed locally and pushed. Git detects that the server already has the version (through the continuous streaming) and won't push it again. Very handy for rather large files that will append over time (like channel data, logs) Example: self.log_stream = git.stream_file('log.txt') self.log_stream.write("new line\n"); self.log_stream.write("another line\n");
def add_model(self, model): logger.debug( .format(model.name, self.name)) self.models[model.name] = model
Add a `RegressionModel` instance. Parameters ---------- model : `RegressionModel` Should have a ``.name`` attribute matching one of the groupby segments.
def update(self, portfolio, date, perfs=None): self.portfolio = portfolio self.perfs = perfs self.date = date
Actualizes the portfolio universe with the alog state
def drawPoints(self, pointPen, filterRedundantPoints=False): if filterRedundantPoints: pointPen = FilterRedundantPointPen(pointPen) for contour in self.contours: pointPen.beginPath(identifier=contour["identifier"]) for segmentType, pt, smooth, name, identifier in contour["points"]: pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier) pointPen.endPath() for component in self.components: pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
draw self using pointPen
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock: validate_word(block_hash, title="Block Hash") block_header = self.get_block_header_by_hash(block_hash) return self.get_block_by_header(block_header)
Returns the requested block as specified by block hash.
def __update_window(self, width, height, message_no, page_no): file_exists_label = if not os.path.exists(self.__create_file_name(message_no)): file_exists_label = if PLATFORM == : for _ in range(50): print else: sys.stdout.write() content = self.messages[message_no].output.rstrip() out = content if self.args.color: out = pygments.highlight(content, XmlLexer(), TerminalFormatter()) if message_no not in self.pages: self._form_pages(message_no, content, out, height, width) page_no = max(min(len(self.pages[message_no]) - 1, page_no), 0) page_content = self.pages[message_no][page_no] max_message = str(len(self.messages) - 1) position_string = u.format(len(max_message)) position_string = position_string.format(message_no, max_message) current_max_page = len(self.pages[message_no]) - 1 pages_string = u.format(page_no, current_max_page) menu = (u u).\ format(file_exists_label, position_string, pages_string, * width) print menu print page_content return page_no
Update the window with the menu and the new text
def _parse_tmx(path): def _get_tuv_lang(tuv): for k, v in tuv.items(): if k.endswith("}lang"): return v raise AssertionError("Language not found in `tuv` attributes.") def _get_tuv_seg(tuv): segs = tuv.findall("seg") assert len(segs) == 1, "Invalid number of segments: %d" % len(segs) return segs[0].text with tf.io.gfile.GFile(path) as f: for _, elem in ElementTree.iterparse(f): if elem.tag == "tu": yield { _get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv") } elem.clear()
Generates examples from TMX file.
def query(self, coords, mode=): valid_modes = [ , , , , ] if mode not in valid_modes: raise ValueError( .format(mode, valid_modes)) n_coords_ret = coords.shape[0] has_dist = hasattr(coords.distance, ) d = coords.distance.kpc if has_dist else None pix_idx = self._coords2idx(coords) mask_idx = (pix_idx == self._n_pix) if np.any(mask_idx): pix_idx[mask_idx] = 0 if mode == : samp_idx = np.random.randint(0, self._n_samples, pix_idx.size) n_samp_ret = 1 elif mode == : samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx] n_sample_ret = 1 else: samp_idx = slice(None) n_samp_ret = self._n_samples if has_dist: d = coords.distance.pc dist_idx_ceil = np.searchsorted(self._dists, d) if isinstance(samp_idx, slice): ret = np.empty((n_coords_ret, n_samp_ret), dtype=) else: ret = np.empty((n_coords_ret,), dtype=) idx_near = (dist_idx_ceil == 0) if np.any(idx_near): a = d[idx_near] / self._dists[0] if isinstance(samp_idx, slice): ret[idx_near] = a[:,None] * self._data[][pix_idx[idx_near], 0, samp_idx] else: ret[idx_near] = a[:] * self._data[][pix_idx[idx_near], 0, samp_idx[idx_near]] idx_far = (dist_idx_ceil == self._n_dists) if np.any(idx_far): if isinstance(samp_idx, slice): ret[idx_far] = self._data[][pix_idx[idx_far], -1, samp_idx] else: ret[idx_far] = self._data[][pix_idx[idx_far], -1, samp_idx[idx_far]] idx_btw = ~idx_near & ~idx_far if np.any(idx_btw): d_ceil = self._dists[dist_idx_ceil[idx_btw]] d_floor = self._dists[dist_idx_ceil[idx_btw]-1] a = (d_ceil - d[idx_btw]) / (d_ceil - d_floor) if isinstance(samp_idx, slice): ret[idx_btw] = ( (1.-a[:,None]) * self._data[][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx] + a[:,None] * self._data[][pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1, samp_idx]) else: ret[idx_btw] = ( (1.-a[:]) * self._data[][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx[idx_btw]] + a[:] * self._data[][pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1, samp_idx[idx_btw]]) else: ret = self._data[][pix_idx, :, samp_idx] samp_axis = 1 if has_dist else 2 if mode == : ret = np.median(ret, axis=samp_axis) elif mode == : ret = np.mean(ret, axis=samp_axis) if np.any(mask_idx): ret[mask_idx] = np.nan return ret
Returns A0 at the given coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Five different query modes are available: ``'random_sample'``, ``'random_sample_per_pix'``, ``'samples'``, ``'median'`` and ``'mean'``. The ``mode`` determines how the output will reflect the probabilistic nature of the IPHAS dust map. Returns: Monochromatic extinction, A0, at the specified coordinates, in mags. The shape of the output depends on the ``mode``, and on whether ``coords`` contains distances. If ``coords`` does not specify distance(s), then the shape of the output begins with `coords.shape`. If `coords` does specify distance(s), then the shape of the output begins with ``coords.shape + ([number of distance bins],)``. If ``mode`` is ``'random_sample'``, then at each coordinate/distance, a random sample of reddening is given. If ``mode`` is ``'random_sample_per_pix'``, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If ``mode`` is ``'median'``, then at each coordinate/distance, the median reddening is returned. If ``mode`` is ``'mean'``, then at each coordinate/distance, the mean reddening is returned. Finally, if ``mode`` is ``'samples'``, then all at each coordinate/distance, all samples are returned.
def running_objects(self): return [obj for obj in self.database_objects if obj.status in [obj.known_statuses.RUNNING]]
Return the objects associated with this workflow.
def report(self, name, ok, msg=None, deltat=20): r = self.reports[name] if time.time() < r.last_report + deltat: r.ok = ok return r.last_report = time.time() if ok and not r.ok: self.say("%s OK" % name) r.ok = ok if not r.ok: self.say(msg)
report a sensor error
def config(scope=None): global _cache_config config_cmd_line = [, , ] try: exc_json = json.decoder.JSONDecodeError except AttributeError: exc_json = ValueError try: if _cache_config is None: config_data = json.loads( subprocess.check_output(config_cmd_line).decode()) _cache_config = Config(config_data) if scope is not None: return _cache_config.get(scope) return _cache_config except (exc_json, UnicodeDecodeError) as e: log( .format(config_cmd_line, str(e)), level=ERROR) return None
Get the juju charm configuration (scope==None) or individual key, (scope=str). The returned value is a Python data structure loaded as JSON from the Juju config command. :param scope: If set, return the value for the specified key. :type scope: Optional[str] :returns: Either the whole config as a Config, or a key from it. :rtype: Any
def remove(self, auto_confirm=False, verbose=False): if not self.paths: logger.info( "Can%sUninstalling %s:Removing file or directory %sSuccessfully uninstalled %s', dist_name_version)
Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).
def preprocess(self, nb_man, resources, km=None): with self.setup_preprocessor(nb_man.nb, resources, km=km): if self.log_output: self.log.info("Executing notebook with kernel: {}".format(self.kernel_name)) nb, resources = self.papermill_process(nb_man, resources) info_msg = self._wait_for_reply(self.kc.kernel_info()) nb.metadata[] = info_msg[][] self.set_widgets_metadata() return nb, resources
Wraps the parent class process call slightly
def match_all(d_SMEFT, parameters=None): p = default_parameters.copy() if parameters is not None: p.update(parameters) C = wilson.util.smeftutil.wcxf2arrays_symmetrized(d_SMEFT) C[] = 246.22 C_WET = match_all_array(C, p) C_WET = wilson.translate.wet.rotate_down(C_WET, p) C_WET = wetutil.unscale_dict_wet(C_WET) d_WET = wilson.util.smeftutil.arrays2wcxf(C_WET) basis = wcxf.Basis[, ] keys = set(d_WET.keys()) & set(basis.all_wcs) d_WET = {k: d_WET[k] for k in keys} return d_WET
Match the SMEFT Warsaw basis onto the WET JMS basis.
def print_tensor(td_tensor, indent="| ", max_depth=-1, depth=0): offset = depth * indent line = "td tensor: %s" % td_tensor.name if td_tensor.value is not None: line += " (%s)" % (",".join(str(i) for i in td_tensor.value.shape),) print(offset + line) if td_tensor.op and (max_depth < 0 or max_depth > depth): print_op(td_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1)
print_tensor(td_tensor, indent=" ", max_depth=-1) Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each tensor and each op count as a level.