code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def create(cls, parent=None, **kwargs): if parent is None: raise Exception("Parent class is required") route = copy(parent.route) if cls.ID_NAME is not None: route[cls.ID_NAME] = "" obj = cls(key=parent.key, route=route, config=parent.config) start = datetime.now() response = requests.post(obj._url(), auth=(obj.key, ""), data=kwargs) cls._delay_for_ratelimits(start) if response.status_code not in cls.TRUTHY_CODES: return cls._handle_request_exception(response) data = response.json() obj.route[obj.ID_NAME] = data.get("id", data.get(obj.ID_NAME)) obj.data = data return obj
Create an object and return it
def from_csv(cls, path): with open(path) as f: fields = map(float, next(f).split()) if len(fields) == 3: return u.Quantity([[fields[0], 0, 0], [0, fields[1], 0], [0, 0, fields[2]]], unit=u.nanometers) elif len(fields) == 9: return u.Quantity([fields[0:3], fields[3:6], fields[6:9]], unit=u.nanometers) else: raise ValueError( )
Get box vectors from comma-separated values in file `path`. The csv file must containt only one line, which in turn can contain three values (orthogonal vectors) or nine values (triclinic box). The values should be in nanometers. Parameters ---------- path : str Path to CSV file Returns ------- vectors : simtk.unit.Quantity([3, 3], unit=nanometers
def _bucket_events(self, event_iterable): current_bucket_time = None current_bucket_events = None for event in event_iterable: event_bucket_time = self._bucket_time(event[TIMESTAMP_FIELD]) if current_bucket_time is None or current_bucket_time < event_bucket_time: if current_bucket_events is not None: yield current_bucket_events current_bucket_time = event_bucket_time current_bucket_events = [] current_bucket_events.append(event) if current_bucket_events is not None and current_bucket_events != []: yield current_bucket_events
Convert an iterable of events into an iterable of lists of events per bucket.
def deliver(self, message, to): s = smtplib.SMTP(self.host, self.port) s.sendmail(message[], to, message.as_string()) s.quit() return
Deliver our message Arguments: - `message`: MIMEMultipart Return: None Exceptions: None
def cfloat64_array_to_numpy(cptr, length): if isinstance(cptr, ctypes.POINTER(ctypes.c_double)): return np.fromiter(cptr, dtype=np.float64, count=length) else: raise RuntimeError()
Convert a ctypes double pointer array to a numpy array.
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name): if order is None: meta = self.query.get_meta() column = meta.pk.db_column or meta.pk.get_attname() order = .format( inner_table_name, self.connection.ops.quote_name(column), ) else: alias_id = 0 new_order = [] for x in order.split(): m = _re_find_order_direction.search(x) if m: direction = m.groups()[0] else: direction = x = _re_find_order_direction.sub(, x) col = x.rsplit(, 1)[-1] if x not in inner_select: alias_id += 1 col = .format( col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote), alias_id, left_sql_quote=self.connection.ops.left_sql_quote, right_sql_quote=self.connection.ops.right_sql_quote, ) inner_select = .format(x, col, inner_select) new_order.append(.format(inner_table_name, col, direction)) order = .join(new_order) return outer_fields, inner_select, order
Apply any necessary fixes to the outer_fields, inner_select, and order strings due to slicing.
def convert_to_adjacency_matrix(matrix): for i in range(matrix.shape[0]): if isspmatrix(matrix): col = find(matrix[:,i])[2] else: col = matrix[:,i].T.tolist()[0] coeff = max( Fraction(c).limit_denominator().denominator for c in col ) matrix[:,i] *= coeff return matrix
Converts transition matrix into adjacency matrix :param matrix: The matrix to be converted :returns: adjacency matrix
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True): od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list) for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): od[name].append(value) return od
Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
def get_processing_block_ids(self): _processing_block_ids = [] pattern = block_ids = self._db.get_ids(pattern) for block_id in block_ids: id_split = block_id.split()[-1] _processing_block_ids.append(id_split) return sorted(_processing_block_ids)
Get list of processing block ids using the processing block id
def exec_container(self, asset_url, algorithm_url, resource_group_name, account_name, account_key, location, share_name_input=, share_name_output=, docker_image=, memory=1.5, cpu=1): try: container_group_name = + str(int(time.time())) result_file = self._create_container_group(resource_group_name=resource_group_name, name=container_group_name, image=docker_image, location=location, memory=memory, cpu=cpu, algorithm=algorithm_url, asset=asset_url, input_mount_point=, output_moint_point=, account_name=account_name, account_key=account_key, share_name_input=share_name_input, share_name_output=share_name_output ) while self.client.container_groups.get(resource_group_name, container_group_name).provisioning_state != : logging.info("Waiting to resources ") while self.client.container_groups.get(resource_group_name, container_group_name). \ containers[0].instance_view.current_state.state != : logging.info("Waiting to terminate") self.delete_vm(container_group_name, resource_group_name) return result_file except Exception: logging.error("There was a problem executing your container") raise Exception
Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm. :param asset_url :param algorithm_url :param resource_group_name: :param account_name: :param account_key: :param share_name_input: :param share_name_output: :param location:
def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE, *args, **kwargs): if dtype in CASTABLE_TYPES: var = getter(name, shape, tf.float32, *args, **kwargs) return tf.cast(var, dtype=dtype, name=name + ) else: return getter(name, shape, dtype, *args, **kwargs)
Creates variables in fp32, then casts to fp16 if necessary. This function is a custom getter. A custom getter is a function with the same signature as tf.get_variable, except it has an additional getter parameter. Custom getters can be passed as the `custom_getter` parameter of tf.variable_scope. Then, tf.get_variable will call the custom getter, instead of directly getting a variable itself. This can be used to change the types of variables that are retrieved with tf.get_variable. The `getter` parameter is the underlying variable getter, that would have been called if no custom getter was used. Custom getters typically get a variable with `getter`, then modify it in some way. This custom getter will create an fp32 variable. If a low precision (e.g. float16) variable was requested it will then cast the variable to the requested dtype. The reason we do not directly create variables in low precision dtypes is that applying small gradients to such variables may cause the variable not to change. Args: getter: The underlying variable getter, that has the same signature as tf.get_variable and returns a variable. name: The name of the variable to get. shape: The shape of the variable to get. dtype: The dtype of the variable to get. Note that if this is a low precision dtype, the variable will be created as a tf.float32 variable, then cast to the appropriate dtype *args: Additional arguments to pass unmodified to getter. **kwargs: Additional keyword arguments to pass unmodified to getter. Returns: A variable which is cast to fp16 if necessary.
def do_NOTIFY(self): timestamp = time.time() headers = requests.structures.CaseInsensitiveDict(self.headers) seq = headers[] sid = headers[] content_length = int(headers[]) content = self.rfile.read(content_length) with _subscriptions_lock: subscription = _subscriptions.get(sid) if subscription: service = subscription.service log.info( "Event %s received for %s service on thread %s at %s", seq, service.service_id, threading.current_thread(), timestamp) log.debug("Event content: %s", content) variables = parse_event_xml(content) event = Event(sid, seq, service, timestamp, variables) service._update_cache_on_event(event) subscription.events.put(event) else: log.info("No service registered for %s", sid) self.send_response(200) self.end_headers()
Serve a ``NOTIFY`` request. A ``NOTIFY`` request will be sent by a Sonos device when a state variable changes. See the `UPnP Spec §4.3 [pdf] <http://upnp.org/specs/arch/UPnP-arch -DeviceArchitecture-v1.1.pdf>`_ for details.
def _check_pillar(kwargs, pillar=None): if kwargs.get(): return True pillar_dict = pillar if pillar is not None else __pillar__ if in pillar_dict: return False return True
Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors
def tree_line_generator(el, max_lines=None): def _trim_spaces(text): return MULTIPLE_WHITESPACE_RE.sub(, text).strip() counter = 1 if max_lines != None and counter > max_lines: return line = start_ref = None start_indentation_level = None for token in tree_token_generator(el): if token is None: continue elif isinstance(token, tuple): el, state, indentation_level = token tag_name = el.tag.lower() line_break = (tag_name == and state == BEGIN) is_block = (tag_name not in INLINE_TAGS) is_forward = (is_block and state == BEGIN and el.attrib.get() in FORWARD_STYLES) if is_block or line_break: line = _trim_spaces(line) if line or line_break or is_forward: end_ref = (el, state) yield start_ref, end_ref, start_indentation_level, line counter += 1 if max_lines != None and counter > max_lines: return line = if is_forward: yield (end_ref, end_ref, start_indentation_level, FORWARD_LINE) counter += 1 if max_lines != None and counter > max_lines: return if not line: start_ref = (el, state) start_indentation_level = indentation_level elif isinstance(token, string_class): line += token else: raise RuntimeError(.format(token)) line = _trim_spaces(line) if line: yield line
Internal generator that iterates through an LXML tree and yields a tuple per line. In this context, lines are blocks of text separated by <br> tags or by block elements. The tuples contain the following elements: - A tuple with the element reference (element, position) for the start of the line. The tuple consists of: - The LXML HTML element which references the line - Whether the text starts at the beginning of the referenced element, or after the closing tag - A similar tuple indicating the ending of the line. - The email indentation level, if detected. - The plain (non-HTML) text of the line If max_lines is specified, the generator stops after yielding the given amount of lines. For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields: - ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar') - ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz'). To illustrate the indentation level, the HTML tree '<div><blockquote>hi</blockquote>world</div>' yields: - ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi') - ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world')
def login(self, password=, captcha=, email_code=, twofactor_code=, language=): if self.logged_on: return self.session if password: self.password = password else: if self.password: password = self.password else: raise LoginIncorrect("password is not specified") if not captcha and self.captcha_code: captcha = self.captcha_code self._load_key() resp = self._send_login(password=password, captcha=captcha, email_code=email_code, twofactor_code=twofactor_code) if resp[] and resp[]: self.logged_on = True self.password = self.captcha_code = self.captcha_gid = -1 for cookie in list(self.session.cookies): for domain in [, , ]: self.session.cookies.set(cookie.name, cookie.value, domain=domain, secure=cookie.secure) self.session_id = generate_session_id() for domain in [, , ]: self.session.cookies.set(, language, domain=domain) self.session.cookies.set(, , domain=domain) self.session.cookies.set(, self.session_id, domain=domain) self._finalize_login(resp) return self.session else: if resp.get(, False): self.captcha_gid = resp[] self.captcha_code = if resp.get(, False): self.password = raise CaptchaRequiredLoginIncorrect(resp[]) else: raise CaptchaRequired(resp[]) elif resp.get(, False): self.steam_id = SteamID(resp[]) raise EmailCodeRequired(resp[]) elif resp.get(, False): raise TwoFactorCodeRequired(resp[]) else: self.password = raise LoginIncorrect(resp[]) return None
Attempts web login and returns on a session with cookies set :param password: password, if it wasn't provided on instance init :type password: :class:`str` :param captcha: text reponse for captcha challenge :type captcha: :class:`str` :param email_code: email code for steam guard :type email_code: :class:`str` :param twofactor_code: 2FA code for steam guard :type twofactor_code: :class:`str` :param language: select language for steam web pages (sets language cookie) :type language: :class:`str` :return: a session on success and :class:`None` otherwise :rtype: :class:`requests.Session`, :class:`None` :raises HTTPError: any problem with http request, timeouts, 5xx, 4xx etc :raises LoginIncorrect: wrong username or password :raises CaptchaRequired: when captcha is needed :raises CaptchaRequiredLoginIncorrect: when captcha is needed and login is incorrect :raises EmailCodeRequired: when email is needed :raises TwoFactorCodeRequired: when 2FA is needed
def pprint2columns(llist, max_length=60): if len(llist) == 0: return None col_width = max(len(word) for word in llist) + 2 if not len(llist) % 2 == 0: llist += [] if col_width > max_length: for el in llist: print(el) else: column1 = llist[:int(len(llist) / 2)] column2 = llist[int(len(llist) / 2):] for c1, c2 in zip(column1, column2): space = " " * (col_width - len(c1)) print("%s%s%s" % (c1, space, c2))
llist = a list of strings max_length = if a word is longer than that, for single col display > prints a list in two columns, taking care of alignment too
def _get_field(self, extras, field, default=None): long_f = .format(field) if long_f in extras: return extras[long_f] else: self.log.info(, field) return default
Fetches a field from extras, and returns it. This is some Airflow magic. The google_cloud_platform hook type adds custom UI elements to the hook page, which allow admins to specify service_account, key_path, etc. They get formatted as shown below.
def update_case_compounds(self, case_obj, build=): case_id = case_obj[] categories = set() variant_types = set() for file_type in FILE_TYPE_MAP: if case_obj.get(,{}).get(file_type): categories.add(FILE_TYPE_MAP[file_type][]) variant_types.add(FILE_TYPE_MAP[file_type][]) coding_intervals = self.get_coding_intervals(build=build) for chrom in CHROMOSOMES: intervals = coding_intervals.get(chrom, IntervalTree()) for var_type in variant_types: for category in categories: LOG.info("Updating compounds on chromosome:{0}, type:{1}, category:{2} for case:{3}".format( chrom, var_type, category, case_id)) query = { : var_type, : chrom, } variant_objs = self.variants( case_id=case_id, query=query, category=category, nr_of_variants=-1, sort_key= ) bulk = {} current_region = None special = False for var_obj in variant_objs: var_id = var_obj[] var_chrom = var_obj[] var_start = var_obj[] var_end = var_obj[] + 1 update_bulk = True new_region = None genomic_regions = coding_intervals.get(var_chrom, IntervalTree()).search(var_start, var_end) if genomic_regions: new_region = genomic_regions.pop().data if new_region and (new_region == current_region): update_bulk = False current_region = new_region if update_bulk and bulk: self.update_compounds(bulk) self.update_mongo_compound_variants(bulk) bulk = {} if new_region: bulk[var_id] = var_obj if not bulk: continue self.update_compounds(bulk) self.update_mongo_compound_variants(bulk) LOG.info("All compounds updated") return
Update the compounds for a case Loop over all coding intervals to get coordinates for all potential compound positions. Update all variants within a gene with a bulk operation.
def get_comment_group_for_path(self, pathname, default_content_type=None): content_type = self.guess_content_type(pathname) if not content_type: if default_content_type: content_type = default_content_type return self.get_comment_group(content_type) else: raise ValueError( "No content type defined for file path: %s" % pathname) else: try: return self.get_comment_group(content_type) except KeyError: raise KeyError( "No comment groups for content type `%s` for file `%s` found" % ( content_type, pathname))
Obtains the comment group for a specified pathname. :param pathname: The path for which the comment group will be obtained. :return: Returns the comment group for the specified pathname or raises a ``ValueError`` if a content type is not found or raises a ``KeyError`` if a comment group is not found. Usage: >>> db = ContentTypesDatabase() >>> db.add_config(db._test_config, 'test_config.yaml') >>> g = db.get_comment_group_for_path >>> g("foobar.py") [['#', '']] >>> g("foobar.js") [['/*', '*/'], ['//', '']] >>> g('foobar.rst') Traceback (most recent call last): ... KeyError: 'No comment groups for content type `structured-text` for file `foobar.rst` found' # If the content type cannot be determined, we assume the content # type to be ``python`` in this case. >>> g('foobar.f37993ajdha73', default_content_type='python') [['#', '']] >>> g("foobar.f37993ajdha73") Traceback (most recent call last): ... ValueError: No content type defined for file path: foobar.f37993ajdha73 >>> g("foobar.f37993ajdha73", default_content_type=None) Traceback (most recent call last): ... ValueError: No content type defined for file path: foobar.f37993ajdha73
def as_single_element(self): if self.as_return_etree is None: return None if len(self.as_return_etree.getchildren()) == 1: return _populate_bunch_with_element(self.as_return_etree. getchildren()[0]) return _populate_bunch_with_element(self.as_return_etree)
Processes the response as a single-element response, like config_get or system_counters_get. If there is more then one element in the response or no elements this raises a ResponseError
def _complete_values(self, symbol = ""): result = {} moddict = self._generic_filter_execs(self.context.module) self._cond_update(result, moddict, symbol) self._cond_update(result, self.context.module.interfaces, symbol) for depend in self.context.module.dependencies: if depend in self.context.module.parent.modules: filtdict = self._generic_filter_execs(self.context.module.parent.modules[depend]) self._cond_update(result, filtdict, symbol) self._cond_update(result, self.context.module.parent.modules[depend].interfaces, symbol) if (isinstance(self.context.element, Function) or isinstance(self.context.element, Subroutine)): self._cond_update(result, self.element.members, symbol) if self.context.module is not None: self._cond_update(result, self.context.module.members, symbol) for execkey in self.context.module.executables: iexec = self.context.module.executables[execkey] if isinstance(iexec, Function) and self._symbol_in(symbol, iexec.name): result[iexec.name] = iexec if symbol == "": self._cond_update(result, cache.common_builtin, symbol) else: self._cond_update(result, cache.builtin, symbol) return result
Compiles a list of possible symbols that can hold a value in place. These consist of local vars, global vars, and functions.
def _construct_axes_dict(self, axes=None, **kwargs): d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d
Return an axes dictionary for myself.
def shapes(self, simplify=None, predicate=None): from shapely.wkt import loads if not predicate: predicate = lambda row: True if simplify: return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)] else: return [loads(row.geometry) for row in self if predicate(row)]
Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects
def CSWAP(control, target_1, target_2): qubits = [unpack_qubit(q) for q in (control, target_1, target_2)] return Gate(name="CSWAP", params=[], qubits=qubits)
Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits:: CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]] :param control: The control qubit. :param target-1: The first target qubit. :param target-2: The second target qubit. The two target states are swapped if the control is in the ``|1>`` state.
def eeg_complexity(eeg, sampling_rate, times=None, index=None, include="all", exclude=None, hemisphere="both", central=True, verbose=True, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, names="Complexity"): data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central) if isinstance(data, dict) is False: data = {0: data} if isinstance(times, tuple): times = list(times) if isinstance(times, list): if isinstance(times[0], list) is False: times = [times] else: times = [[0, None]] if isinstance(names, str): prefix = [names] * len(times) if len(times) > 1: for time_index, time_window in enumerate(times): prefix[time_index] = prefix[time_index] + "_%.2f_%.2f" %(time_window[0], time_window[1]) else: prefix = names complexity_all = pd.DataFrame() for time_index, time_window in enumerate(times): if len(times) > 1 and verbose is True: print("Computing complexity features... window " + str(time_window) + "/" + str(len(times))) complexity_features = {} index = 0 for epoch_index, epoch in data.items(): if len(times) == 1 and verbose is True: print("Computing complexity features... " + str(round(index/len(data.items())*100, 2)) + "%") index +=1 df = epoch[time_window[0]:time_window[1]].copy() complexity_features[epoch_index] = {} for channel in df: signal = df[channel].values features = complexity(signal, sampling_rate=sampling_rate, shannon=shannon, sampen=sampen, multiscale=multiscale, spectral=spectral, svd=svd, correlation=correlation, higushi=higushi, petrosian=petrosian, fisher=fisher, hurst=hurst, dfa=dfa, lyap_r=lyap_r, lyap_e=lyap_e) for key, feature in features.items(): if key in complexity_features[epoch_index].keys(): complexity_features[epoch_index][key].append(feature) else: complexity_features[epoch_index][key] = [feature] for epoch_index, epoch in complexity_features.items(): for feature in epoch: complexity_features[epoch_index][feature] = pd.Series(complexity_features[epoch_index][feature]).mean() complexity_features = pd.DataFrame.from_dict(complexity_features, orient="index") complexity_features.columns = [prefix[time_index] + "_" + s for s in complexity_features.columns] complexity_all = pd.concat([complexity_all, complexity_features], axis=1) return(complexity_all)
Compute complexity indices of epochs or raw object. DOCS INCOMPLETE :(
def respond_redirect(self, location=): self.send_response(301) self.send_header(, 0) self.send_header(, location) self.end_headers() return
Respond to the client with a 301 message and redirect them with a Location header. :param str location: The new location to redirect the client to.
def create_project(self, name, description): return self._create_item_response( self.data_service.create_project(name, description), Project)
Create a new project with the specified name and description :param name: str: name of the project to create :param description: str: description of the project to create :return: Project
def get_random_string(): hash_string = "%8x" % random.getrandbits(32) hash_string = hash_string.strip() while is_number(hash_string): hash_string = "%8x" % random.getrandbits(32) hash_string = hash_string.strip() return hash_string
make a random string, which we can use for bsub job IDs, so that different jobs do not have the same job IDs.
def extract_upgrade_scripts(self): link_pattern = .format(self.pattern[1:-1]) page = urllib.request.urlopen(self.upgrades_url).read() for mo in re.finditer(link_pattern, page): scriptname = mo.group(0)[1:-1].strip() yield self.parse_script_name(scriptname)
Extract the OpenQuake upgrade scripts from the links in the GitHub page
def timedelta_seconds(value: datetime.timedelta) -> int: return SECONDS_PER_DAY * value.days + value.seconds
Return full number of seconds from timedelta. By default, Python returns only one day seconds, not all timedelta seconds. :param value: Timedelta instance.
def get_type_data(name): name = name.upper() try: return { : , : , : name, : , : HEADING_TYPES[name] + , : HEADING_TYPES[name], : ( + HEADING_TYPES[name] + ) } except KeyError: raise NotFound( + name)
Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type
def anno_parser(func): "Look at params (annotated with `Param`) in func and return an `ArgumentParser`" p = ArgumentParser(description=func.__doc__) for k,v in inspect.signature(func).parameters.items(): param = func.__annotations__.get(k, Param()) kwargs = param.kwargs if v.default != inspect.Parameter.empty: kwargs[] = v.default p.add_argument(f"{param.pre}{k}", **kwargs) return p
Look at params (annotated with `Param`) in func and return an `ArgumentParser`
def set_environment_variable(self, key, val): if self.get_environment_variable(key) in [None, val]: self.__dict__[][key] = val else: raise Contradiction("Could not set environment variable %s" % (key))
Sets a variable if that variable is not already set
def is50or60(msg, spd_ref, trk_ref, alt_ref): def vxy(v, angle): vx = v * np.sin(np.radians(angle)) vy = v * np.cos(np.radians(angle)) return vx, vy if not (bds50.is50(msg) and bds60.is60(msg)): return None h50 = bds50.trk50(msg) v50 = bds50.gs50(msg) if h50 is None or v50 is None: return h60 = bds60.hdg60(msg) m60 = bds60.mach60(msg) i60 = bds60.ias60(msg) if h60 is None or (m60 is None and i60 is None): return m60 = np.nan if m60 is None else m60 i60 = np.nan if i60 is None else i60 XY5 = vxy(v50*aero.kts, h50) XY6m = vxy(aero.mach2tas(m60, alt_ref*aero.ft), h60) XY6i = vxy(aero.cas2tas(i60*aero.kts, alt_ref*aero.ft), h60) allbds = [, , ] X = np.array([XY5, XY6m, XY6i]) Mu = np.array(vxy(spd_ref*aero.kts, trk_ref)) try: dist = np.linalg.norm(X-Mu, axis=1) BDS = allbds[np.nanargmin(dist)] except ValueError: return return BDS
Use reference ground speed and trk to determine BDS50 and DBS60. Args: msg (String): 28 bytes hexadecimal message string spd_ref (float): reference speed (ADS-B ground speed), kts trk_ref (float): reference track (ADS-B track angle), deg alt_ref (float): reference altitude (ADS-B altitude), ft Returns: String or None: BDS version, or possible versions, or None if nothing matches.
def get(self, id): try: if self.request.headers.get("Id"): object_ = yield self.client.find_one({self.request.headers.get("Id"): id}) else: object_ = yield self.client.find_one_by_id(id) if object_: self.write(object_) return self.raise_error(404, "%s/%s not found" % (self.object_name, id)) except InvalidId as ex: self.raise_error(400, message="Your ID is malformed: %s" % id) except Exception as ex: self.logger.error(ex) self.raise_error()
Get an by object by unique identifier :id string id: the bson id of an object :rtype: JSON
def organization_field_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_fields api_path = "/api/v2/organization_fields/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/organization_fields#show-organization-field
def shift_right(self, times=1): try: return Location(self._rank, self._file + times) except IndexError as e: raise IndexError(e)
Finds Location shifted right by 1 :rtype: Location
def find_le_index(self, k): i = bisect_right(self._keys, k) if i: return i - 1 raise ValueError( % (k,))
Return last item with a key <= k. Raise ValueError if not found.
def get_metadata(): motifs = _load_motifs() motif_names = sorted(list(motifs.keys())) df = pd.Series(motif_names).str.split(expand=True) df.rename(columns={0: "PWM_id", 1: "info1", 2: "info2"}, inplace=True) consensus = pd.Series([PWM(motifs[m]).get_consensus() for m in motif_names]) df["consensus"] = consensus return df
Get pandas.DataFrame with metadata about the PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - info1 - additional information about the motifs - info2 - consensus: PWM consensus sequence
def clearContents(cls): log_msg = logging.debug(log_msg) pb = AppKit.NSPasteboard.generalPasteboard() pb.clearContents() return True
Clear contents of general pasteboard. Future enhancement can include specifying which clipboard to clear Returns: True on success; caller should expect to catch exceptions, probably from AppKit (ValueError)
def unpack(self, source: IO): count = unpack(, source.read(2))[0] for _ in repeat(None, count): name_index, length = unpack(, source.read(6)) info_blob = source.read(length) self._table.append((name_index, info_blob))
Read the ConstantPool from the file-like object `source`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when loading a ClassFile. :param source: Any file-like object providing `read()`
def compile_theme(theme_id=None): from engineer.processors import convert_less from engineer.themes import ThemeManager if theme_id is None: themes = ThemeManager.themes().values() else: themes = [ThemeManager.theme(theme_id)] with(indent(2)): puts(colored.yellow("Compiling %s themes." % len(themes))) for theme in themes: theme_output_path = (theme.static_root / ( % theme.id)).normpath() puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path))) with indent(4): puts("Compiling...") convert_less(theme.static_root / ( % theme.id), theme_output_path, minify=True) puts(colored.green("Done.", bold=True))
Compiles a theme.
def visit_call(self, node, parent): newnode = nodes.Call(node.lineno, node.col_offset, parent) starargs = _visit_or_none(node, "starargs", self, newnode) kwargs = _visit_or_none(node, "kwargs", self, newnode) args = [self.visit(child, newnode) for child in node.args] if node.keywords: keywords = [self.visit(child, newnode) for child in node.keywords] else: keywords = None if starargs: new_starargs = nodes.Starred( col_offset=starargs.col_offset, lineno=starargs.lineno, parent=starargs.parent, ) new_starargs.postinit(value=starargs) args.append(new_starargs) if kwargs: new_kwargs = nodes.Keyword( arg=None, col_offset=kwargs.col_offset, lineno=kwargs.lineno, parent=kwargs.parent, ) new_kwargs.postinit(value=kwargs) if keywords: keywords.append(new_kwargs) else: keywords = [new_kwargs] newnode.postinit(self.visit(node.func, newnode), args, keywords) return newnode
visit a CallFunc node by returning a fresh instance of it
def get_beam(header): if "BPA" not in header: log.warning("BPA not present in fits header, using 0") bpa = 0 else: bpa = header["BPA"] if "BMAJ" not in header: log.warning("BMAJ not present in fits header.") bmaj = None else: bmaj = header["BMAJ"] if "BMIN" not in header: log.warning("BMIN not present in fits header.") bmin = None else: bmin = header["BMIN"] if None in [bmaj, bmin, bpa]: return None beam = Beam(bmaj, bmin, bpa) return beam
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header. BPA may be missing but will be assumed to be zero. if BMAJ or BMIN are missing then return None instead of a beam object. Parameters ---------- header : HDUHeader The fits header. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` Beam object, with a, b, and pa in degrees.
def safe_join(base, *paths): base = base paths = [p for p in paths] final_path = abspath(os.path.join(base, *paths)) base_path = abspath(base) base_path_len = len(base_path) if not os.path.normcase(final_path).startswith(os.path.normcase(base_path)) \ or final_path[base_path_len:base_path_len + 1] not in ("", os.path.sep): raise ValueError("The joined path (%s) is located outside of the base " "path component (%s)" % (final_path, base_path)) return final_path
Joins one or more path components to the base path component intelligently. Returns a normalized, absolute version of the final path. The final path must be located inside of the base path component (otherwise a ValueError is raised).
def device_characteristics_str(self, indent): s = "{}\n".format(self.label) s += indent + "MAC Address: {}\n".format(self.mac_addr) s += indent + "IP Address: {}\n".format(self.ip_addr) s += indent + "Port: {}\n".format(self.port) s += indent + "Power: {}\n".format(str_map(self.power_level)) s += indent + "Location: {}\n".format(self.location) s += indent + "Group: {}\n".format(self.group) return s
Convenience to string method.
def set(self, name, value, index=-1): if isinstance(value, ElementProxy): value = value[0].to_er7() name = name.upper() reference = None if name is None else self.element.find_child_reference(name) child_ref, child_name = (None, None) if reference is None else (reference[], reference[]) if isinstance(value, basestring): child = self.element.parse_child(value, child_name=child_name, reference=child_ref) elif isinstance(value, Element): child = value elif isinstance(value, BaseDataType): child = self.create_element(name, False, reference) child.value = value else: raise ChildNotValid(value, child_name) if child.name != child_name: raise ChildNotValid(value, child_name) child_to_remove = self.child_at_index(child_name, index) if child_to_remove is None: self.append(child) else: self.replace_child(child_to_remove, child) self.element.set_parent_to_traversal()
Assign the ``value`` to the child having the given ``name`` at the ``index`` position :type name: ``str`` :param name: the child name (e.g. PID) :type value: an instance of :class:`Element <hl7apy.core.Element>`, a `str` or an instance of :class:`ElementProxy <hl7apy.core.ElementProxy>` :param value: the child value :type index: ``int`` :param index: the child position (e.g. 1)
def create_perturb_params(countsmat, transmat=None): norm = np.sum(countsmat, axis=1) if not transmat: transmat = (countsmat.transpose() / norm).transpose() counts = (np.ones((len(transmat), len(transmat))) * norm).transpose() scale = ((transmat - transmat ** 2) ** 0.5 / counts ** 0.5) + 10 ** -15 return transmat, scale
Computes transition probabilities and standard errors of the transition probabilities due to finite sampling using the MSM counts matrix. First, the transition probabilities are computed by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then computed by first computing the standard deviation of the transition probability, treating each count as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the square root of the row-summed counts of row i to obtain the standard error. Parameters: ---------- countsmat: np.ndarray The msm counts matrix transmat: np.ndarray If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations, which will be divided by the row-summed counts in the original supplied counts matrix. Returns: ----------- transmat, np.ndarray: The MSM transition matrix scale, np.ndarray: The matrix of standard errors for each transition probability
def _resolve_graph(self, distribution_names=None, leaf_name=): if distribution_names is None or any(self._dist_fn_args): distribution_names = _resolve_distribution_names( self._dist_fn_args, distribution_names, leaf_name) if len(set(distribution_names)) != len(distribution_names): raise ValueError(.format( distribution_names)) if len(distribution_names) != len(self._dist_fn_wrapped): raise ValueError() return tuple(zip(distribution_names, tuple(() if a is None else a for a in self._dist_fn_args)))
Creates a `tuple` of `tuple`s of dependencies. This function is **experimental**. That said, we encourage its use and ask that you report problems to `tfprobability@tensorflow.org`. Args: distribution_names: `list` of `str` or `None` names corresponding to each of `model` elements. (`None`s are expanding into the appropriate `str`.) leaf_name: `str` used when no maker depends on a particular `model` element. Returns: graph: `tuple` of `(str tuple)` pairs representing the name of each distribution (maker) and the names of its dependencies. #### Example ```python d = tfd.JointDistributionSequential([ tfd.Independent(tfd.Exponential(rate=[100, 120]), 1), lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]), tfd.Normal(loc=0, scale=2.), lambda n, g: tfd.Normal(loc=n, scale=g), ]) d._resolve_graph() # ==> ( # ('e', ()), # ('g', ('e',)), # ('n', ()), # ('x', ('n', 'g')), # ) ```
def process_actions(self, actions): notices = {} notification_contacts = {} for action in actions: resource = action[] action_status = ActionStatus.SUCCEED try: if action[] == AuditActions.REMOVE: action_status = self.process_action( resource, AuditActions.REMOVE ) if action_status == ActionStatus.SUCCEED: db.session.delete(action[].issue) elif action[] == AuditActions.STOP: action_status = self.process_action( resource, AuditActions.STOP ) if action_status == ActionStatus.SUCCEED: action[].update({ : action[], : action[], : action[], : action[] }) elif action[] == AuditActions.FIXED: db.session.delete(action[].issue) elif action[] == AuditActions.ALERT: action[].update({ : action[], : action[], : action[], : action[] }) db.session.commit() if action_status == ActionStatus.SUCCEED: for owner in [ dict(t) for t in {tuple(d.items()) for d in (action[] + self.permanent_emails)} ]: if owner[] not in notification_contacts: contact = NotificationContact(type=owner[], value=owner[]) notification_contacts[owner[]] = contact notices[contact] = { : [], : [] } else: contact = notification_contacts[owner[]] if action[] == AuditActions.FIXED: notices[contact][].append(action) else: notices[contact][].append(action) except Exception as ex: self.log.exception(.format( action[].account.account_name, action[].id, action[], ex )) return notices
Process the actions we want to take Args: actions (`list`): List of actions we want to take Returns: `list` of notifications
def predict(self, timeseriesX, n, m): new_entries = [] for entry in timeseriesX: predicted_value = m * entry[1] + n new_entries.append([entry[0], predicted_value]) return TimeSeries.from_twodim_list(new_entries)
Calculates the dependent timeseries Y for the given parameters and independent timeseries. (y=m*x + n) :param TimeSeries timeseriesX: the independent Timeseries. :param float n: The interception with the x access that has been calculated during regression :param float m: The slope of the function that has been calculated during regression :return TimeSeries timeseries_y: the predicted values for the dependent TimeSeries. Its length and first dimension will equal to timeseriesX.
def _is_pid_running_on_windows(pid): import ctypes.wintypes kernel32 = ctypes.windll.kernel32 handle = kernel32.OpenProcess(1, 0, pid) if handle == 0: return False exit_code = ctypes.wintypes.DWORD() ret = kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) is_alive = (ret == 0 or exit_code.value == _STILL_ALIVE) kernel32.CloseHandle(handle) return is_alive
Check if PID is running for Windows systems
def router_fabric_virtual_gateway_address_family_ipv6_gateway_mac_address(self, **kwargs): config = ET.Element("config") router = ET.SubElement(config, "router", xmlns="urn:brocade.com:mgmt:brocade-common-def") fabric_virtual_gateway = ET.SubElement(router, "fabric-virtual-gateway", xmlns="urn:brocade.com:mgmt:brocade-anycast-gateway") address_family = ET.SubElement(fabric_virtual_gateway, "address-family") ipv6 = ET.SubElement(address_family, "ipv6") gateway_mac_address = ET.SubElement(ipv6, "gateway-mac-address") gateway_mac_address.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def get_topology(self): topology = {: self._name, : , : {}, : , : } if self._links: topology[][] = self._links if self._nodes: topology[][] = self._nodes if self._servers: topology[][] = self._servers if self._notes: topology[][] = self._notes if self._shapes[]: topology[][] = self._shapes[] if self._shapes[]: topology[][] = \ self._shapes[] if self._images: topology[][] = self._images return topology
Get the converted topology ready for JSON encoding :return: converted topology assembled into a single dict :rtype: dict
def datastore(self, domain, data_type, mapping=None): from .tcex_datastore import TcExDataStore return TcExDataStore(self, domain, data_type, mapping)
Get instance of the DataStore module. Args: domain (str): The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type (str): The data type descriptor (e.g., tc:whois:cache). Returns: object: An instance of the DataStore Class.
def update_avatar(self): def do_update_avatar(info, error=None): if error or not info: logger.error( % (info, error)) else: uk, uname, img_path = info self.img_avatar.set_from_file(img_path) self.img_avatar.props.tooltip_text = .join([ self.profile[], uname, ]) if not self.profile[]: return self.img_avatar.props.tooltip_text = cache_path = Config.get_cache_path(self.profile[]) gutil.async_call(gutil.update_avatar, self.cookie, self.tokens, cache_path, callback=do_update_avatar)
更新用户头像
def clusterdown_wrapper(func): @wraps(func) async def inner(*args, **kwargs): for _ in range(0, 3): try: return await func(*args, **kwargs) except ClusterDownError: pass raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster") return inner
Wrapper for CLUSTERDOWN error handling. If the cluster reports it is down it is assumed that: - connection_pool was disconnected - connection_pool was reseted - refereh_table_asap set to True It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail.
def get_journal_abstracts(self, refresh=True): return [abstract for abstract in self.get_abstracts(refresh=refresh) if abstract.aggregationType == ]
Return a list of ScopusAbstract objects using ScopusSearch, but only if belonging to a Journal.
def _is_in_max_difference(value_1, value_2, max_difference): if value_1 <= value_2: return value_2 - value_1 <= max_difference return value_1 - value_2 <= max_difference
Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode. Circumvents numba bug #1653
def start_connect(self): Log.debug("In start_connect() of %s" % self._get_classname()) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self._connecting = True self.connect(self.endpoint)
Tries to connect to the Heron Server ``loop()`` method needs to be called after this.
def pdu_to_function_code_or_raise_error(resp_pdu): function_code = struct.unpack(, resp_pdu[0:1])[0] if function_code not in function_code_to_function_map.keys(): error_code = struct.unpack(, resp_pdu[1:2])[0] raise error_code_to_exception_map[error_code] return function_code
Parse response PDU and return of :class:`ModbusFunction` or raise error. :param resp_pdu: PDU of response. :return: Subclass of :class:`ModbusFunction` matching the response. :raises ModbusError: When response contains error code.
def fit(self, X, y, cv=None, class_weight=): self.X = X self.y = y self.set_class_weight(class_weight=class_weight, y=y) self.clf = self.clf.fit(X, y) return self.clf
Fits X to outcomes y, using clf
def new(cls, settings, *args, **kwargs): logger.debug( % settings[]) cloud = settings[] if cloud == : self = BareInstance(settings=settings, *args, **kwargs) elif cloud == : self = AWSInstance(settings=settings, *args, **kwargs) elif cloud == : self = GCPInstance(settings=settings, *args, **kwargs) else: raise DSBException( % cloud) return self
Create a new Cloud instance based on the Settings
def _build_command(self, cmd_1, cmd_2=None, select=False, select_command=None): return CommandLegacy(cmd_1, cmd_2, self._group_number, select, select_command)
Constructs the complete command. :param cmd_1: Light command 1. :param cmd_2: Light command 2. :param select: If command requires selection. :param select_command: Selection command bytes. :return: The complete command.
def get_tshark_interfaces(tshark_path=None): parameters = [get_process_path(tshark_path), ] with open(os.devnull, ) as null: tshark_interfaces = subprocess.check_output(parameters, stderr=null).decode("utf-8") return [line.split()[0] for line in tshark_interfaces.splitlines()]
Returns a list of interface numbers from the output tshark -D. Used internally to capture on multiple interfaces.
def remove_threadlocal(self, name): with self._lock: if self._tpayload is not None: if name in self._tpayload.context: del self._tpayload.context[name] if not self._tpayload.context: self._tpayload = None
Args: name (str | unicode): Remove entry with `name` from current thread's context
def _update_parent_attachments(self): try: self._parent.has_attachments = bool(len(self.__attachments)) except AttributeError: pass
Tries to update the parent property 'has_attachments'
def update_flagfile(flags_path, new_threshold): if abs(new_threshold) > 1: raise ValueError("Invalid new percentile for resign threshold") with tf.gfile.GFile(flags_path) as f: lines = f.read() if new_threshold > 0: new_threshold *= -1 if not RESIGN_FLAG_REGEX.search(lines): print("Resign threshold flag not found in flagfile {}! Aborting.".format(flags_path)) sys.exit(1) old_threshold = RESIGN_FLAG_REGEX.search(lines).groups(1) lines = re.sub(RESIGN_FLAG_REGEX, "--resign_threshold={:.3f}".format(new_threshold), lines) if abs(float(old_threshold[0]) - new_threshold) < 0.001: print("Not updating percentiles; {} ~= {:.3f}".format( old_threshold[0], new_threshold), flush=True) else: print("Updated percentile from {} to {:.3f}".format( old_threshold[0], new_threshold), flush=True) with tf.gfile.GFile(flags_path, ) as f: f.write(lines)
Updates the flagfile at `flags_path`, changing the value for `resign_threshold` to `new_threshold`
def _from_binary_reparse(cls, binary_stream): reparse_tag, data_len = cls._REPR.unpack(binary_stream[:cls._REPR.size]) reparse_type = ReparseType(reparse_tag & 0x0000FFFF) reparse_flags = ReparseFlags((reparse_tag & 0xF0000000) >> 28) guid = None if reparse_flags & ReparseFlags.IS_MICROSOFT: if reparse_type is ReparseType.SYMLINK: data = SymbolicLink.create_from_binary(binary_stream[cls._REPR.size:]) elif reparse_type is ReparseType.MOUNT_POINT: data = JunctionOrMount.create_from_binary(binary_stream[cls._REPR.size:]) else: data = binary_stream[cls._REPR.size:].tobytes() else: guid = UUID(bytes_le=binary_stream[cls._REPR.size:cls._REPR.size+16].tobytes()) data = binary_stream[cls._REPR.size+16:].tobytes() nw_obj = cls((reparse_type, reparse_flags, data_len, guid, data)) _MOD_LOGGER.debug("Attempted to unpack REPARSE_POINT from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
See base class.
def stop(self): self._server.shutdown() self._server.server_close() self._thread.join() self.running = False
Shuts the server down and waits for server thread to join
def _set_internal_compiler_error(self): self.severity = "Low" self.description_tail += ( " This issue is reported for internal compiler generated code." ) self.description = "%s\n%s" % (self.description_head, self.description_tail) self.code = ""
Adds the false positive to description and changes severity to low
def unauthorized(cls, errors=None): if cls.expose_status: cls.response.content_type = cls.response._status_line = return cls(401, errors=errors).to_json
Shortcut API for HTTP 401 `Unauthorized` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
def _duplicate_example(self, request): index = int(request.args.get()) if index >= len(self.examples): return http_util.Respond(request, {: }, , code=400) new_example = self.example_class() new_example.CopyFrom(self.examples[index]) self.examples.append(new_example) self.updated_example_indices.add(len(self.examples) - 1) self.generate_sprite([ex.SerializeToString() for ex in self.examples]) return http_util.Respond(request, {}, )
Duplicates the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
def write_table_file(self, table_file=None): if self._table is None: raise RuntimeError("No table to write") if table_file is not None: self._table_file = table_file if self._table_file is None: raise RuntimeError("No output file specified for table") write_tables_to_fits(self._table_file, [self._table], clobber=True, namelist=[])
Write the table to self._table_file
def getConnectionInfo(self, wanInterfaceId=1, timeout=1): namespace = Wan.getServiceType("getConnectionInfo") + str(wanInterfaceId) uri = self.getControlURL(namespace) results = self.execute(uri, namespace, "GetInfo", timeout=timeout) return ConnectionInfo(results)
Execute GetInfo action to get WAN connection information's. :param int wanInterfaceId: the id of the WAN device :param float timeout: the timeout to wait for the action to be executed :return: WAN connection information's. :rtype: ConnectionInfo
def validate(self): depr = self._all_deprecated return dict((key, val[1]) for key, val in six.iteritems(self.defaultParams) if key not in depr)
Dictionary with validation methods as values
def _get_string_match_value(self, string, string_match_type): if string_match_type == Type(**get_type_data()): return string elif string_match_type == Type(**get_type_data()): return re.compile( + string, re.I) elif string_match_type == Type(**get_type_data()): return re.compile( + string + ) elif string_match_type == Type(**get_type_data()): return re.compile( + string + , re.I)
Gets the match value
def _grow_overlaps(dna, melting_temp, require_even, length_max, overlap_min, min_exception): oligo_n = len(dna) // length_max + 1 if require_even: oligo_increment = 2 if oligo_n % 2 == 1: oligo_n += 1 else: oligo_increment = 1 while float(len(dna)) / oligo_n > length_max: oligo_n += oligo_increment tm_met = False len_met = False while(not tm_met or not len_met): overlap_n = oligo_n - 1 overlap_interval = float(len(dna)) / oligo_n starts = [int(overlap_interval * (i + 1)) for i in range(overlap_n)] ends = [index + 1 for index in starts] overlaps = [dna[start:end] for start, end in zip(starts, ends)] overlap_tms = [coral.analysis.tm(overlap) for overlap in overlaps] index = overlap_tms.index(min(overlap_tms)) oligo_starts = [0] + starts oligo_ends = ends + [len(dna)] oligo_indices = [oligo_starts, oligo_ends] oligos = [dna[start:end] for start, end in zip(*oligo_indices)] if min_exception: len_met = all([len(x) >= overlap_min for x in overlaps]) if len_met: break else: while not len_met and not maxed: overlaps = _recalculate_overlaps(dna, overlaps, oligo_indices) overlap_lens = [len(overlap) for overlap in overlaps] index = overlap_lens.index(min(overlap_lens)) oligos = _expand_overlap(dna, oligo_indices, index, oligos, length_max) maxed = any([len(x) == length_max for x in oligos]) len_met = all([len(x) >= overlap_min for x in overlaps]) overlap_tms[index] = coral.analysis.tm(overlaps[index]) oligo_n += oligo_increment overlap_indices = [(oligo_indices[0][x + 1], oligo_indices[1][x]) for x in range(overlap_n)] return oligos, overlaps, overlap_tms, overlap_indices
Grows equidistant overlaps until they meet specified constraints. :param dna: Input sequence. :type dna: coral.DNA :param melting_temp: Ideal Tm of the overlaps, in degrees C. :type melting_temp: float :param require_even: Require that the number of oligonucleotides is even. :type require_even: bool :param length_max: Maximum oligo size (e.g. 60bp price point cutoff) range. :type length_range: int :param overlap_min: Minimum overlap size. :type overlap_min: int :param min_exception: In order to meet melting_temp and overlap_min settings, allow overlaps less than overlap_min to continue growing above melting_temp. :type min_exception: bool :returns: Oligos, their overlapping regions, overlap Tms, and overlap indices. :rtype: tuple
def _extract_datasets_to_harvest(cls, report): assert isinstance(report, string_types + (list,)) if (isinstance(report, list) and all([isinstance(x, tuple) and len(x) == 2 for x in report])): return report table = readers.read_table(report) table_keys = table[0].keys() expected_keys = ["catalog_metadata_url", "dataset_title", "dataset_accrualPeriodicity"] for key in expected_keys: if key not in table_keys: raise KeyError(.format(key)) if "harvest" in table_keys: datasets_to_harvest = [ (row["catalog_metadata_url"], row["dataset_title"]) for row in table if int(row["harvest"])] else: datasets_to_harvest = [ (row["catalog_metadata_url"], row["dataset_title"]) for row in table] return datasets_to_harvest
Extrae de un reporte los datos necesarios para reconocer qué datasets marcar para cosecha en cualquier generador. Args: report (str o list): Reporte (lista de dicts) o path a uno. Returns: list: Lista de tuplas con los títulos de catálogo y dataset de cada reporte extraído.
def _getCachedValue(obj, relicFunc, resultType): if not obj.cached: obj.cached = resultType() relicFunc(byref(obj.cached)) return obj.cached
Retrieves a value from obj.cached (if not None) or calls @relicFunc and caches the result (of @resultType) int obj.cached. This is a common implementation for orderG1/G2/Gt and generatotG1/G2/Gt
def sem(self, ddof=1): return self.std(ddof=ddof) / np.sqrt(self.count())
Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom
def IsAllSpent(self): for item in self.Items: if item == CoinState.Confirmed: return False return True
Flag indicating if all balance is spend. Returns: bool:
def _from_docstring_rst(doc): def format_fn(line, status): if re_from_data.match(line): line = re_from_data.sub(r"**\1** ", line) status["add_line"] = True line = re_from_defaults.sub(r"*\1*", line) if status["listing"]: if re_from_param.match(line): m = re_from_param.match(line) line = " - ``{}`` {}".format(m.group(1), m.group(3)) elif re_from_status.match(line): m = re_from_status.match(line) line = " - ``{}`` {}".format(m.group(1), m.group(3)) elif re_from_item.match(line): line = re_from_item.sub(r" -", line) else: line = " " * 4 + line.lstrip() line = re_lone_backtick.sub("``", line) return line return _reformat_docstring(doc, format_fn, code_newline="\n")
format from docstring to ReStructured Text
def hash_data(salt, value, hash_alg=None): hash_alg = hash_alg or hasher = hashlib.new(hash_alg) hasher.update(value.encode()) hasher.update(salt.encode()) value_hashed = hasher.hexdigest() return value_hashed
Hashes a value together with a salt with the given hash algorithm. :type salt: str :type hash_alg: str :type value: str :param salt: hash salt :param hash_alg: the hash algorithm to use (default: SHA512) :param value: value to hash together with the salt :return: hashed value
def listTheExtras(self, deleteAlso): extras = configobj.get_extra_values(self) expanded = [ (x+ \ ( bool(len(x[0])<1 and hasattr(self[x[1]], )), ) \ ) for x in extras] retval = if expanded: retval = flattened2str(expanded, extra=1) if deleteAlso: for tup_to_del in extras: target = self location = tup_to_del[0] for subdict in location: target = target[subdict] target.pop(tup_to_del[1]) return retval
Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items.
def get_resampled_top_edge(self, angle_var=0.1): mesh = self.mesh top_edge = [Point(mesh.lons[0][0], mesh.lats[0][0], mesh.depths[0][0])] for i in range(len(mesh.triangulate()[1][0]) - 1): v1 = numpy.asarray(mesh.triangulate()[1][0][i]) v2 = numpy.asarray(mesh.triangulate()[1][0][i + 1]) cosang = numpy.dot(v1, v2) sinang = numpy.linalg.norm(numpy.cross(v1, v2)) angle = math.degrees(numpy.arctan2(sinang, cosang)) if abs(angle) > angle_var: top_edge.append(Point(mesh.lons[0][i + 1], mesh.lats[0][i + 1], mesh.depths[0][i + 1])) top_edge.append(Point(mesh.lons[0][-1], mesh.lats[0][-1], mesh.depths[0][-1])) line_top_edge = Line(top_edge) return line_top_edge
This methods computes a simplified representation of a fault top edge by removing the points that are not describing a change of direction, provided a certain tolerance angle. :param float angle_var: Number representing the maximum deviation (in degrees) admitted without the creation of a new segment :returns: A :class:`~openquake.hazardlib.geo.line.Line` representing the rupture surface's top edge.
def collect_num(self): element = self.soup.find("a", { "data-za-a": "click_answer_collected_count" }) if element is None: return 0 else: return int(element.get_text())
获取答案收藏数 :return: 答案收藏数量 :rtype: int
def db_optimize(name, table=None, **connection_args): * ret = [] if table is None: tables = db_tables(name, **connection_args) for table in tables: log.info(%s\%s\, name, table) ret.append(__optimize_table(name, table, **connection_args)) else: log.info(%s\%s\, name, table) ret = __optimize_table(name, table, **connection_args) return ret
Optimizes the full database or just a given table CLI Example: .. code-block:: bash salt '*' mysql.db_optimize dbname
def __dict_invert(self, data): outdict = {} for k,lst in data.items(): if isinstance(lst, str): lst = lst.split() for entry in lst: outdict[entry] = k return outdict
Helper function for merge. Takes a dictionary whose values are lists and returns a dict with the elements of each list as keys and the original keys as values.
def _find_own_cgroups(): try: with open(, ) as ownCgroupsFile: for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile): yield cgroup except IOError: logging.exception()
For all subsystems, return the information in which (sub-)cgroup this process is in. (Each process is in exactly cgroup in each hierarchy.) @return a generator of tuples (subsystem, cgroup)
def missing(self, field, last=True): priceprice if last: self.append({field: {: }}) else: self.append({field: {: }}) return self
Numeric fields support specific handling for missing fields in a doc. The missing value can be _last, _first, or a custom value (that will be used for missing docs as the sort value). missing('price') > {"price" : {"missing": "_last" } } missing('price',False) > {"price" : {"missing": "_first"} }
def Read(self, file_object): config_parser = configparser.RawConfigParser() config_parser.readfp(file_object) for section_name in config_parser.sections(): dependency_definition = DependencyDefinition(section_name) for value_name in self._VALUE_NAMES: value = self._GetConfigValue(config_parser, section_name, value_name) setattr(dependency_definition, value_name, value) yield dependency_definition
Reads dependency definitions. Args: file_object (file): file-like object to read from. Yields: DependencyDefinition: dependency definition.
def create_environment(self, env_name, version_label=None, solution_stack_name=None, cname_prefix=None, description=None, option_settings=None, tier_name=, tier_type=, tier_version=): out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type)) self.ebs.create_environment(self.app_name, env_name, version_label=version_label, solution_stack_name=solution_stack_name, cname_prefix=cname_prefix, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
Creates a new environment
def _autoinsert_quotes(self, key): char = {Qt.Key_QuoteDbl: , Qt.Key_Apostrophe: soleolsolcursorsolcursorsolcursorcursoreol,:;)]}')): self.editor.insert_text(char) elif (unmatched_quotes_in_line(line_text) and (not last_three == 3*char)): self.editor.insert_text(char) elif self.editor.next_char() == char: cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor, 1) cursor.clearSelection() self.editor.setTextCursor(cursor) elif last_three == 3*char: self.editor.insert_text(3*char) cursor = self.editor.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor, 3) cursor.clearSelection() self.editor.setTextCursor(cursor) elif last_two == 2*char: self.editor.insert_text(char) self.editor.delayed_popup_docstring() else: self.editor.insert_text(2*char) cursor = self.editor.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter) self.editor.setTextCursor(cursor)
Control how to automatically insert quotes in various situations.
def key_exists(key_id, region=None, key=None, keyid=None, profile=None): alias/mykey conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: key = conn.describe_key(key_id) r[] = True except boto.exception.BotoServerError as e: if isinstance(e, boto.kms.exceptions.NotFoundException): r[] = False return r r[] = __utils__[](e) return r
Check for the existence of a key. CLI example:: salt myminion boto_kms.key_exists 'alias/mykey'
def _collapse_outgroup(tree, taxdicts): outg = taxdicts[0]["p4"] if not all([i["p4"] == outg for i in taxdicts]): raise Exception("no good") tre = ete.Tree(tree.write(format=1)) alltax = [i for i in tre.get_leaf_names() if i not in outg] alltax += [outg[0]] tre.prune(alltax) tre.search_nodes(name=outg[0])[0].name = "outgroup" tre.ladderize() taxd = copy.deepcopy(taxdicts) newtaxdicts = [] for test in taxd: test["p4"] = ["outgroup"] newtaxdicts.append(test) return tre, newtaxdicts
collapse outgroup in ete Tree for easier viewing
def _shuffled(seq): fixed_random = random.Random() if six.PY2: fixed_random.seed(FIXED_RANDOM_SEED) else: fixed_random.seed(FIXED_RANDOM_SEED, version=1) seq = list(seq) random.shuffle(seq, random=fixed_random.random) return seq
Deterministically shuffle identically under both py2 + py3.
def bind(self, fn: Callable[[Any], ]) -> : r return Cont(lambda c: self.run(lambda a: fn(a).run(c)))
r"""Chain continuation passing functions. Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
def brpoplpush(self, source, destination, timeout=0): transfer_item = self.brpop(source, timeout) if transfer_item is None: return None key, val = transfer_item self.lpush(destination, val) return val
Emulate brpoplpush
def get_agile_board(self, board_id): url = .format(str(board_id)) return self.get(url)
Get agile board info by id :param board_id: :return: