code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _copy_new_parent(self, parent): if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent)
Copy the current param to a new parent, must be a dummy param.
def _format_char(char): if char is None: return -1 if isinstance(char, _STRTYPES) and len(char) == 1: return ord(char) try: return int(char) except: raise TypeError('char single character string, integer, or None\nReceived: ' + repr(char))
Prepares a single character for passing to ctypes calls, needs to return an integer but can also pass None which will keep the current character instead of overwriting it. This is called often and needs to be optimized whenever possible.
def _contigs_dict_to_file(self, contigs, fname): f = pyfastaq.utils.open_file_write(fname) for contig in sorted(contigs, key=lambda x:len(contigs[x]), reverse=True): print(contigs[contig], file=f) pyfastaq.utils.close(f)
Writes dictionary of contigs to file
def patch(*args, **kwargs): from caliendo.patch import patch as p return p(*args, **kwargs)
Deprecated. Patch should now be imported from caliendo.patch.patch
def transform(self, transform, desc=None): if desc is None: desc = u'transform({})'.format(getattr(transform, '__name__', '')) return self.replace( transforms=self.transforms + [transform], desc_stack=self.desc_stack + [desc] )
Create a copy of this query, transformed by `transform`. Args: transform (callable): Callable that takes an iterable of values and returns an iterable of transformed values. Keyword Args: desc (str): A description of the transform, to use in log messages. Defaults to the name of the `transform` function. Returns: Query
def transform(self, X, y=None): insuffix = X._libsuffix cast_fn = utils.get_lib_fn('locallyBlurAntsImage%s' % (insuffix)) casted_ptr = cast_fn(X.pointer, self.iters, self.conductance) return iio.ANTsImage(pixeltype=X.pixeltype, dimension=X.dimension, components=X.components, pointer=casted_ptr)
Locally blur an image by applying a gradient anisotropic diffusion filter. Arguments --------- X : ANTsImage image to transform y : ANTsImage (optional) another image to transform. Example ------- >>> import ants >>> blur = ants.contrib.LocallyBlurIntensity(1,5) >>> img2d = ants.image_read(ants.get_data('r16')) >>> img2d_b = blur.transform(img2d) >>> ants.plot(img2d) >>> ants.plot(img2d_b) >>> img3d = ants.image_read(ants.get_data('mni')) >>> img3d_b = blur.transform(img3d) >>> ants.plot(img3d) >>> ants.plot(img3d_b)
def has_roles(self, *requirements): user_manager = current_app.user_manager role_names = user_manager.db_manager.get_user_roles(self) for requirement in requirements: if isinstance(requirement, (list, tuple)): tuple_of_role_names = requirement authorized = False for role_name in tuple_of_role_names: if role_name in role_names: authorized = True break if not authorized: return False else: role_name = requirement if not role_name in role_names: return False return True
Return True if the user has all of the specified roles. Return False otherwise. has_roles() accepts a list of requirements: has_role(requirement1, requirement2, requirement3). Each requirement is either a role_name, or a tuple_of_role_names. role_name example: 'manager' tuple_of_role_names: ('funny', 'witty', 'hilarious') A role_name-requirement is accepted when the user has this role. A tuple_of_role_names-requirement is accepted when the user has ONE of these roles. has_roles() returns true if ALL of the requirements have been accepted. For example: has_roles('a', ('b', 'c'), d) Translates to: User has role 'a' AND (role 'b' OR role 'c') AND role 'd
def value(self): if self.filter_.get('field') is not None: try: result = getattr(self.model, self.filter_['field']) except AttributeError: raise InvalidFilters("{} has no attribute {}".format(self.model.__name__, self.filter_['field'])) else: return result else: if 'val' not in self.filter_: raise InvalidFilters("Can't find value or field in a filter") return self.filter_['val']
Get the value to filter on :return: the value to filter on
def results( self ): return self.report(self.parameters(), self.metadata(), self.experimentalResults())
Return a complete results dict. Only really makes sense for recently-executed experimental runs. :returns: the results dict
def setupMovie(self): if self.state == self.INIT: self.sendRtspRequest(self.SETUP)
Setup button handler.
def transform_dot(self, node, results): module_dot = results.get("bare_with_attr") member = results.get("member") new_name = None if isinstance(member, list): member = member[0] for change in MAPPING[module_dot.value]: if member.value in change[1]: new_name = change[0] break if new_name: module_dot.replace(Name(new_name, prefix=module_dot.prefix)) else: self.cannot_convert(node, "This is an invalid module element")
Transform for calls to module members in code.
def release_value_set(self): if self._remotelib: self._remotelib.run_keyword('release_value_set', [self._my_id], {}) else: _PabotLib.release_value_set(self, self._my_id)
Release a reserved value set so that other executions can use it also.
def connect_discussion_signals(): post_save.connect( count_discussions_handler, sender=comment_model, dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS) post_delete.connect( count_discussions_handler, sender=comment_model, dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS) comment_was_flagged.connect( count_discussions_handler, sender=comment_model, dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS) comment_was_posted.connect( count_comments_handler, sender=comment_model, dispatch_uid=COMMENT_WP_COUNT_COMMENTS) pingback_was_posted.connect( count_pingbacks_handler, sender=comment_model, dispatch_uid=PINGBACK_WF_COUNT_PINGBACKS) trackback_was_posted.connect( count_trackbacks_handler, sender=comment_model, dispatch_uid=TRACKBACK_WF_COUNT_TRACKBACKS)
Connect all the signals on the Comment model to maintains a valid discussion count on each entries when an action is done with the comments.
def pop_context(self): processor = getattr(self, 'processor', None) if processor is not None: pop_context = getattr(processor, 'pop_context', None) if pop_context is None: pop_context = getattr(processor, 'pop', None) if pop_context is not None: return pop_context() if self._pop_next: self._pop_next = False
Pops the last set of keyword arguments provided to the processor.
def add_title(self, title, subtitle=None, source=None): title_entry = self._sourced_dict( source, title=title, ) if subtitle is not None: title_entry['subtitle'] = subtitle self._append_to('titles', title_entry)
Add title. :param title: title for the current document :type title: string :param subtitle: subtitle for the current document :type subtitle: string :param source: source for the given title :type source: string
def get_cardinality(self, node=None): if node: for factor in self.factors: for variable, cardinality in zip(factor.scope(), factor.cardinality): if node == variable: return cardinality else: cardinalities = defaultdict(int) for factor in self.factors: for variable, cardinality in zip(factor.scope(), factor.cardinality): cardinalities[variable] = cardinality return cardinalities
Returns the cardinality of the node Parameters ---------- node: any hashable python object (optional) The node whose cardinality we want. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Returns ------- int or dict : If node is specified returns the cardinality of the node. If node is not specified returns a dictionary with the given variable as keys and their respective cardinality as values. Examples -------- >>> from pgmpy.models import ClusterGraph >>> from pgmpy.factors.discrete import DiscreteFactor >>> student = ClusterGraph() >>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2], ... values=np.random.rand(4)) >>> student.add_node(('Alice', 'Bob')) >>> student.add_factors(factor) >>> student.get_cardinality() defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2}) >>> student.get_cardinality(node='Alice') 2
def _channel_exists_and_not_settled( self, participant1: Address, participant2: Address, block_identifier: BlockSpecification, channel_identifier: ChannelID = None, ) -> bool: try: channel_state = self._get_channel_state( participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_identifier, ) except RaidenRecoverableError: return False exists_and_not_settled = ( channel_state > ChannelState.NONEXISTENT and channel_state < ChannelState.SETTLED ) return exists_and_not_settled
Returns if the channel exists and is in a non-settled state
def tableiswritable(tablename): result = True try: t = table(tablename, readonly=False, ack=False) result = t.iswritable() except: result = False return result
Test if a table is writable.
def save(self, path): writer = csv.writer(open(path, 'w', newline=''), delimiter=' ') rows = list(self.items()) rows.sort(key=lambda x: x[0]) writer.writerows(rows)
Saves this catalogue's data to `path`. :param path: file path to save catalogue data to :type path: `str`
def verify_firebase_token(id_token, request, audience=None): return verify_token( id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)
Verifies an ID Token issued by Firebase Authentication. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your Firebase application ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token.
def _get_serv(ret=None): _options = _get_options(ret) host = _options.get('host') port = _options.get('port') database = _options.get('db') user = _options.get('user') password = _options.get('password') version = _get_version(host, port, user, password) if version and "v0.8" in version: return influxdb.influxdb08.InfluxDBClient(host=host, port=port, username=user, password=password, database=database ) else: return influxdb.InfluxDBClient(host=host, port=port, username=user, password=password, database=database )
Return an influxdb client object
def authentication(self, username, password): _auth_text = '{}:{}'.format(username, password) if int(sys.version[0]) > 2: _auth_bin = base64.encodebytes(_auth_text.encode()) _auth = _auth_bin.decode() _auth = _auth.replace('\n', '') self._auth = _auth else: _auth = base64.encodestring(_auth_text) self._auth = str(_auth).replace('\n', '') _LOGGER.debug('Autentication string is: {}:***'.format(username))
Configures the user authentication for eAPI This method configures the username and password combination to use for authenticating to eAPI. Args: username (str): The username to use to authenticate the eAPI connection with password (str): The password in clear text to use to authenticate the eAPI connection with
def _cfactory(attr, func, argtypes, restype, errcheck=None): meth = getattr(attr, func) meth.argtypes = argtypes meth.restype = restype if errcheck: meth.errcheck = errcheck
Factory to create a ctypes function and automatically manage errors.
def unlink(self, key, *keys): return wait_convert(self.execute(b'UNLINK', key, *keys), int)
Delete a key asynchronously in another thread.
def run(self): salt.utils.process.appendproctitle(self.__class__.__name__) salt.daemons.masterapi.clean_fsbackend(self.opts) for interval in self.buckets: self.update_threads[interval] = threading.Thread( target=self.update_fileserver, args=(interval, self.buckets[interval]), ) self.update_threads[interval].start() while True: time.sleep(60)
Start the update threads
def day_publications_card(date): d = date.strftime(app_settings.DATE_FORMAT) card_title = 'Reading on {}'.format(d) return { 'card_title': card_title, 'publication_list': day_publications(date=date), }
Displays Publications that were being read on `date`. `date` is a date tobject.
def clone(self): clone = copy(self) clone.variables = {k: v.clone() for (k, v) in self.variables.items()} return clone
Returns a shallow copy of the current instance, except that all variables are deep-cloned.
def single_node_env(args): if isinstance(args, list): sys.argv = args elif args.argv: sys.argv = args.argv num_gpus = args.num_gpus if 'num_gpus' in args else 1 util.single_node_env(num_gpus)
Sets up environment for a single-node TF session. Args: :args: command line arguments as either argparse args or argv list
def get_version(): reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]') with open('requests_kerberos/__init__.py') as fd: matches = list(filter(lambda x: x, map(reg.match, fd))) if not matches: raise RuntimeError( 'Could not find the version information for requests_kerberos' ) return matches[0].group(1)
Simple function to extract the current version using regular expressions.
def swo_start(self, swo_speed=9600): if self.swo_enabled(): self.swo_stop() info = structs.JLinkSWOStartInfo() info.Speed = swo_speed res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.START, ctypes.byref(info)) if res < 0: raise errors.JLinkException(res) self._swo_enabled = True return None
Starts collecting SWO data. Note: If SWO is already enabled, it will first stop SWO before enabling it again. Args: self (JLink): the ``JLink`` instance swo_speed (int): the frequency in Hz used by the target to communicate Returns: ``None`` Raises: JLinkException: on error
def _parse_tag(self, el): ns = None tag = el.tag if tag[0] == '{': ns, tag = tag[1:].split('}', 1) if self.ns_map and ns in self.ns_map: ns = self.ns_map[ns] return ns, tag
Extract the namespace and tag from an element.
def download(self, file_name, save_as=None): self._check_session() try: if save_as: save_as = os.path.normpath(save_as) save_dir = os.path.dirname(save_as) if save_dir: if not os.path.exists(save_dir): os.makedirs(save_dir) elif not os.path.isdir(save_dir): raise RuntimeError(save_dir + " is not a directory") status, save_path, bytes = self._rest.download_file( 'files', file_name, save_as, 'application/octet-stream') except resthttp.RestHttpError as e: raise RuntimeError('failed to download "%s": %s' % (file_name, e)) return save_path, bytes
Download the specified file from the server. Arguments: file_name -- Name of file resource to save. save_as -- Optional path name to write file to. If not specified, then file named by the last part of the resource path is downloaded to current directory. Return: (save_path, bytes) save_path -- Path where downloaded file was saved. bytes -- Bytes downloaded.
def _peek(self, chars=1): line = self._socket.recv(chars, socket.MSG_PEEK) logger.debug('Server sent (peek): ' + line.rstrip()) return line
Peek at the data in the server response. Peeking should only be done when the response can be predicted. Make sure that the socket will not block by requesting too much data from it while peeking. Args: chars -- the number of characters to peek.
def good_port_ranges(ports=None, min_range_len=20, border=3): min_range_len += border*2 if ports is None: ports = available_ports() ranges = utils.to_ranges(list(ports)) lenghts = sorted([(r[1]-r[0], r) for r in ranges], reverse=True) long_ranges = [l[1] for l in lenghts if l[0] >= min_range_len] without_borders = [(low+border, high-border) for low, high in long_ranges] return without_borders
Returns a list of 'good' port ranges. Such ranges are large and don't contain ephemeral or well-known ports. Ranges borders are also excluded.
def locate(self, pattern): top_matches = self.top.locate(pattern) bottom_matches = self.bottom.locate(pattern) return [top_matches, bottom_matches]
Find sequences matching a pattern. For a circular sequence, the search extends over the origin. :param pattern: str or NucleicAcidSequence for which to find matches. :type pattern: str or coral.DNA :returns: A list of top and bottom strand indices of matches. :rtype: list of lists of indices (ints) :raises: ValueError if the pattern is longer than either the input sequence (for linear DNA) or twice as long as the input sequence (for circular DNA).
def get_multiple_devices(self, rids): headers = { 'User-Agent': self.user_agent(), 'Content-Type': self.content_type() } headers.update(self.headers()) url = self.portals_url()+'/users/_this/devices/' + str(rids).replace("'", "").replace(' ','') r = requests.get( url, headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("get_multiple_devices: Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) r.raise_for_status()
Implements the 'Get Multiple Devices' API. Param rids: a python list object of device rids. http://docs.exosite.com/portals/#get-multiple-devices
def _update_record_with_id(self, identifier, rtype, content): record = self._create_request_record(identifier, rtype, None, content, self._get_lexicon_option('ttl'), self._get_lexicon_option('priority')) self._request_modify_dns_record(record)
Updates existing record with no sub-domain name changes
def is1d(a:Collection)->bool: "Return `True` if `a` is one-dimensional" return len(a.shape) == 1 if hasattr(a, 'shape') else True
Return `True` if `a` is one-dimensional
def get_ips_from_string(ip_str): ip_list = [] for ip in ip_str.split(','): clean_ip = ip.strip().lower() if clean_ip: ip_list.append(clean_ip) ip_count = len(ip_list) if ip_count > 0: if is_valid_ip(ip_list[0]) and is_valid_ip(ip_list[-1]): return ip_list, ip_count return [], 0
Given a string, it returns a list of one or more valid IP addresses
def content_status(self, content_status): if content_status is None: raise ValueError("Invalid value for `content_status`, must not be `None`") allowed_values = ["INVALID", "NOT_LOADED", "HIDDEN", "VISIBLE"] if content_status not in allowed_values: raise ValueError( "Invalid value for `content_status` ({0}), must be one of {1}" .format(content_status, allowed_values) ) self._content_status = content_status
Sets the content_status of this IntegrationStatus. Status of integration content, e.g. dashboards # noqa: E501 :param content_status: The content_status of this IntegrationStatus. # noqa: E501 :type: str
def sanitize(s, strict=True): allowed = ''.join( [ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz', '0123456789', ] ) if not strict: allowed += '-_.' s = str(s).replace(' ', '_') return ''.join([i for i in s if i in allowed])
Sanitize a string. Spaces are converted to underscore; if strict=True they are then removed. Parameters ---------- s : str String to sanitize strict : bool If True, only alphanumeric characters are allowed. If False, a limited set of additional characters (-._) will be allowed.
def get_tag(self, name): res = self.get_request('/tag/' + name) return Tag(cloud_manager=self, **res['tag'])
Return the tag as Tag object.
def profile_list(**kwargs): ctx = Context(**kwargs) ctx.execute_action('profile:list', **{ 'storage': ctx.repo.create_secure_service('storage'), })
Show uploaded profiles.
def _select_in_voltage_range(self, min_voltage=None, max_voltage=None): min_voltage = min_voltage if min_voltage is not None \ else self.min_voltage max_voltage = max_voltage if max_voltage is not None \ else self.max_voltage return list(filter(lambda p: min_voltage <= p.voltage <= max_voltage, self.voltage_pairs))
Selects VoltagePairs within a certain voltage range. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. Returns: A list of VoltagePair objects
def runSharedFeatures(noiseLevel=None, profile=False): exp = L4L2Experiment( "shared_features", enableLateralSP=True, enableFeedForwardSP=True ) pairs = createThreeObjects() objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024 ) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 10, "noiseLevel": noiseLevel, "pairs": { 0: zip(range(10), range(10)) } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], )
Runs a simple experiment where three objects share a number of location, feature pairs. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference
def search_user(self, user_name, quiet=False, limit=9): result = self.search(user_name, search_type=1002, limit=limit) if result['result']['userprofileCount'] <= 0: LOG.warning('User %s not existed!', user_name) raise SearchNotFound('user {} not existed'.format(user_name)) else: users = result['result']['userprofiles'] if quiet: user_id, user_name = users[0]['userId'], users[0]['nickname'] user = User(user_id, user_name) return user else: return self.display.select_one_user(users)
Search user by user name. :params user_name: user name. :params quiet: automatically select the best one. :params limit: user count returned by weapi. :return: a User object.
def controlled(self, control_qubit): control_qubit = unpack_qubit(control_qubit) self.modifiers.insert(0, "CONTROLLED") self.qubits.insert(0, control_qubit) return self
Add the CONTROLLED modifier to the gate with the given control qubit.
def get_group_usage_link(self): first_element = self.group_list[0] usage_link = getattr(first_element.form, 'usage_link', None) return usage_link
Get the usage link for the group element.
def rename(self, src_basename, dest_basename, datadir="outdir"): directory = { "indir": self.indir, "outdir": self.outdir, "tmpdir": self.tmpdir, }[datadir] src = directory.path_in(src_basename) dest = directory.path_in(dest_basename) os.rename(src, dest)
Rename a file located in datadir. src_basename and dest_basename are the basename of the source file and of the destination file, respectively.
def parse_number(self): value = self.current_token.value suffix = value[-1].lower() try: if suffix in NUMBER_SUFFIXES: return NUMBER_SUFFIXES[suffix](value[:-1]) return Double(value) if '.' in value else Int(value) except (OutOfRange, ValueError): return String(value)
Parse a number from the token stream.
def recalculate(self, amount, billing_info): order = Order(pk=-1) order.amount = amount order.currency = self.get_currency() country = getattr(billing_info, 'country', None) if not country is None: country = country.code tax_number = getattr(billing_info, 'tax_number', None) tax_session_key = "tax_%s_%s" % (tax_number, country) tax = self.request.session.get(tax_session_key) if tax is None: taxation_policy = getattr(settings, 'PLANS_TAXATION_POLICY', None) if not taxation_policy: raise ImproperlyConfigured('PLANS_TAXATION_POLICY is not set') taxation_policy = import_name(taxation_policy) tax = str(taxation_policy.get_tax_rate(tax_number, country)) self.request.session[tax_session_key] = tax order.tax = Decimal(tax) if tax != 'None' else None return order
Calculates and return pre-filled Order
def remove_entry(data, entry): file_field = entry['fields'].get('file') if file_field: try: os.remove(file_field) except IOError: click.echo('This entry\'s file was missing') data.remove(entry)
Remove an entry in place.
def cast2theano_var(self, array_like, name=None): array = np.asarray(array_like) args = (name, array.dtype) ndim = array.ndim if ndim == 0: return T.scalar(*args) elif ndim == 1: return T.vector(*args) elif ndim == 2: return T.matrix(*args) elif ndim == 3: return T.tensor3(*args) elif ndim == 4: return T.tensor4(*args) else: raise ValueError('extheano.jit.Compiler: Unsupported type or shape')
Cast `numpy.ndarray` into `theano.tensor` keeping `dtype` and `ndim` compatible
def copy(self, extra=None): if extra is None: extra = dict() newTVS = Params.copy(self, extra) if self.isSet(self.estimator): newTVS.setEstimator(self.getEstimator().copy(extra)) if self.isSet(self.evaluator): newTVS.setEvaluator(self.getEvaluator().copy(extra)) return newTVS
Creates a copy of this instance with a randomly generated uid and some extra params. This copies creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
def estimate(phenotype, G=None, K=None, covariates=None, overdispersion=True): logger = logging.getLogger(__name__) logger.info('Heritability estimation has started.') G, K = _background_standardize(G, K) if G is None and K is None: raise Exception('G and K cannot be all None.') Q0, Q1, S0 = _background_decomposition(G, K) if covariates is None: logger.debug('Inserting offset covariate.') covariates = ones((phenotype.sample_size, 1)) logger.debug('Constructing EP.') from limix_inference.glmm import ExpFamEP ep = ExpFamEP(phenotype.to_likelihood(), covariates, Q0, Q1, S0, overdispersion) logger.debug('EP optimization.') ep.learn() h2 = ep.heritability logger.info('Found heritability before correction: %.5f.', h2) return h2
Estimate the so-called narrow-sense heritability. It supports Bernoulli and Binomial phenotypes (see `outcome_type`). The user must specifiy only one of the parameters G, K, and QS for defining the genetic background. Let :math:`N` be the sample size, :math:`S` the number of covariates, and :math:`P_b` the number of genetic markers used for Kinship estimation. :param numpy.ndarray y: Phenotype. The domain has be the non-negative integers. Dimension (:math:`N\\times 0`). :param numpy.ndarray G: Genetic markers matrix used internally for kinship estimation. Dimension (:math:`N\\times P_b`). :param numpy.ndarray K: Kinship matrix. Dimension (:math:`N\\times N`). :param tuple QS: Economic eigen decomposition of the Kinship matrix. :param numpy.ndarray covariate: Covariates. Default is an offset. Dimension (:math:`N\\times S`). :param object oucome_type: Either :class:`limix_qep.Bernoulli` (default) or a :class:`limix_qep.Binomial` instance. :param float prevalence: Population rate of cases for dichotomous phenotypes. Typically useful for case-control studies. :return: a tuple containing the estimated heritability and additional information, respectively.
def checkout_deploy_branch(deploy_branch, canpush=True): create_deploy_branch(deploy_branch, push=canpush) remote_branch = "doctr_remote/{}".format(deploy_branch) print("Checking out doctr working branch tracking", remote_branch) clear_working_branch() if run(['git', 'rev-parse', '--verify', remote_branch], exit=False) == 0: extra_args = ['--track', remote_branch] else: extra_args = [] run(['git', 'checkout', '-b', DOCTR_WORKING_BRANCH] + extra_args) print("Done") return canpush
Checkout the deploy branch, creating it if it doesn't exist.
def get_hour_dirs(root=None): root = root or selfplay_dir() return list(filter(lambda s: re.match(r"\d{4}-\d{2}-\d{2}-\d{2}", s), gfile.ListDirectory(root)))
Gets the directories under selfplay_dir that match YYYY-MM-DD-HH.
def _find(self, id_): cur = self._connection.cursor() cur.execute( 'SELECT fileNumber, offset FROM sequences WHERE id = ?', (id_,)) row = cur.fetchone() if row is None: return None else: return self._getFilename(row[0]), row[1]
Find the filename and offset of a sequence, given its id. @param id_: A C{str} sequence id. @return: A 2-tuple, containing the C{str} file name and C{int} offset within that file of the sequence.
def cast_scalar_to_array(shape, value, dtype=None): if dtype is None: dtype, fill_value = infer_dtype_from_scalar(value) else: fill_value = value values = np.empty(shape, dtype=dtype) values.fill(fill_value) return values
create np.ndarray of specified shape and dtype, filled with values Parameters ---------- shape : tuple value : scalar value dtype : np.dtype, optional dtype to coerce Returns ------- ndarray of shape, filled with value, of specified / inferred dtype
def get_dict_from_response(response): if getattr(response, '_resp') and response._resp.code > 400: raise OAuthResponseError( 'Application mis-configuration in Globus', None, response ) return response.data
Check for errors in the response and return the resulting JSON.
def request_finished(key): with event_lock: threads[key] = threads[key][1:] if threads[key]: threads[key][0].run()
Remove finished Thread from queue. :param key: data source key
def _safemembers(members): base = _resolved(".") for finfo in members: if _badpath(finfo.name, base): print(finfo.name, "is blocked (illegal path)") elif finfo.issym() and _badlink(finfo, base): print(finfo.name, "is blocked: Hard link to", finfo.linkname) elif finfo.islnk() and _badlink(finfo, base): print(finfo.name, "is blocked: Symlink to", finfo.linkname) else: yield finfo
Check members of a tar archive for safety. Ensure that they do not contain paths or links outside of where we need them - this would only happen if the archive wasn't made by eqcorrscan. :type members: :class:`tarfile.TarFile` :param members: an open tarfile.
def get_pdos(dos, lm_orbitals=None, atoms=None, elements=None): if not elements: symbols = dos.structure.symbol_set elements = dict(zip(symbols, [None] * len(symbols))) pdos = {} for el in elements: if atoms and el not in atoms: continue element_sites = [site for site in dos.structure.sites if site.specie == get_el_sp(el)] sites = [site for i, site in enumerate(element_sites) if not atoms or (el in atoms and i in atoms[el])] lm = lm_orbitals[el] if (lm_orbitals and el in lm_orbitals) else None orbitals = elements[el] if elements and el in elements else None pdos[el] = get_element_pdos(dos, el, sites, lm, orbitals) return pdos
Extract the projected density of states from a CompleteDos object. Args: dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The density of states. elements (:obj:`dict`, optional): The elements and orbitals to extract from the projected density of states. Should be provided as a :obj:`dict` with the keys as the element names and corresponding values as a :obj:`tuple` of orbitals. For example, the following would extract the Bi s, px, py and d orbitals:: {'Bi': ('s', 'px', 'py', 'd')} If an element is included with an empty :obj:`tuple`, all orbitals for that species will be extracted. If ``elements`` is not set or set to ``None``, all elements for all species will be extracted. lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into their lm contributions (e.g. p -> px, py, pz). Should be provided as a :obj:`dict`, with the elements names as keys and a :obj:`tuple` of orbitals as the corresponding values. For example, the following would be used to decompose the oxygen p and d orbitals:: {'O': ('p', 'd')} atoms (:obj:`dict`, optional): Which atomic sites to use when calculating the projected density of states. Should be provided as a :obj:`dict`, with the element names as keys and a :obj:`tuple` of :obj:`int` specifying the atomic indices as the corresponding values. The elemental projected density of states will be summed only over the atom indices specified. If an element is included with an empty :obj:`tuple`, then all sites for that element will be included. The indices are 0 based for each element specified in the POSCAR. For example, the following will calculate the density of states for the first 4 Sn atoms and all O atoms in the structure:: {'Sn': (1, 2, 3, 4), 'O': (, )} If ``atoms`` is not set or set to ``None`` then all atomic sites for all elements will be considered. Returns: dict: The projected density of states. Formatted as a :obj:`dict` of :obj:`dict` mapping the elements and their orbitals to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example:: { 'Bi': {'s': Dos, 'p': Dos ... }, 'S': {'s': Dos} }
def _convert_dict_inputs(inputs, tensor_info_map): dict_inputs = _prepare_dict_inputs(inputs, tensor_info_map) return tensor_info.convert_dict_to_compatible_tensor(dict_inputs, tensor_info_map)
Converts from inputs into dict of input tensors. This handles: - putting inputs into a dict, per _prepare_dict_inputs(), - converting all input values into tensors compatible with the expected input tensor (dtype, shape). - check sparse/non-sparse tensor types. Args: inputs: inputs fed to Module.__call__(). tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo` describing the signature inputs. Returns: A dict of tensors to feed to the signature instantiation. Raises: TypeError: If it fails to convert the input values into a dict of tensors to feed to the signature instantiation.
def run_type(self): METAGGA_TYPES = {"TPSS", "RTPSS", "M06L", "MBJL", "SCAN", "MS0", "MS1", "MS2"} if self.parameters.get("LHFCALC", False): rt = "HF" elif self.parameters.get("METAGGA", "").strip().upper() in METAGGA_TYPES: rt = self.parameters["METAGGA"].strip().upper() elif self.parameters.get("LUSE_VDW", False): vdw_gga = {"RE": "DF", "OR": "optPBE", "BO": "optB88", "MK": "optB86b", "ML": "DF2"} gga = self.parameters.get("GGA").upper() rt = "vdW-" + vdw_gga[gga] elif self.potcar_symbols[0].split()[0] == 'PAW': rt = "LDA" else: rt = "GGA" if self.is_hubbard: rt += "+U" return rt
Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs. TODO: Fix for other functional types like PW91, other vdW types, etc.
def _process_input_wcs_single(fname, wcskey, updatewcs): if wcskey in ['', ' ', 'INDEF', None]: if updatewcs: uw.updatewcs(fname, checkfiles=False) else: numext = fileutil.countExtn(fname) extlist = [] for extn in range(1, numext + 1): extlist.append(('SCI', extn)) if wcskey in string.ascii_uppercase: wkey = wcskey wname = ' ' else: wname = wcskey wkey = ' ' altwcs.restoreWCS(fname, extlist, wcskey=wkey, wcsname=wname) if wcskey not in ['', ' ', 'INDEF', None] or updatewcs: wcscorr.init_wcscorr(fname)
See docs for _process_input_wcs. This is separated to be spawned in parallel.
def run_once(function, state={}, errors={}): @six.wraps(function) def _wrapper(*args, **kwargs): if function in errors: six.reraise(*errors[function]) try: return state[function] except KeyError: try: state[function] = result = function(*args, **kwargs) return result except Exception: errors[function] = sys.exc_info() raise return _wrapper
A memoization decorator, whose purpose is to cache calls.
def preview(self): if self._preview is None: from twilio.rest.preview import Preview self._preview = Preview(self) return self._preview
Access the Preview Twilio Domain :returns: Preview Twilio Domain :rtype: twilio.rest.preview.Preview
def _create_local_agent_channel(self, io_loop): logger.info('Initializing Jaeger Tracer with UDP reporter') return LocalAgentSender( host=self.local_agent_reporting_host, sampling_port=self.local_agent_sampling_port, reporting_port=self.local_agent_reporting_port, throttling_port=self.throttler_port, io_loop=io_loop )
Create an out-of-process channel communicating to local jaeger-agent. Spans are submitted as SOCK_DGRAM Thrift, sampling strategy is polled via JSON HTTP. :param self: instance of Config
def _time_request(self, uri, method, **kwargs): start_time = time.time() resp, body = self.request(uri, method, **kwargs) self.times.append(("%s %s" % (method, uri), start_time, time.time())) return resp, body
Wraps the request call and records the elapsed time.
def get_lon_variable(nc): if 'longitude' in nc.variables: return 'longitude' longitudes = nc.get_variables_by_attributes(standard_name="longitude") if longitudes: return longitudes[0].name return None
Returns the variable for longitude :param netCDF4.Dataset nc: netCDF dataset
def delete(self, uri, default_response=None): url = self.api_url + uri response = requests.delete( url, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return self.success_or_raise(response, default_response=default_response)
Call DELETE on the Gitlab server >>> gitlab = Gitlab(host='http://localhost:10080', verify_ssl=False) >>> gitlab.login(user='root', password='5iveL!fe') >>> gitlab.delete('/users/5') :param uri: String with the URI you wish to delete :param default_response: Return value if JSONDecodeError :return: Dictionary containing response data :raise: HttpError: If invalid response returned
def authorized(remote_app=None): if remote_app not in current_oauthclient.handlers: return abort(404) state_token = request.args.get('state') try: assert state_token state = serializer.loads(state_token) assert state['sid'] == _create_identifier() assert state['app'] == remote_app set_session_next_url(remote_app, state['next']) except (AssertionError, BadData): if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or ( not(current_app.debug or current_app.testing)): abort(403) try: handler = current_oauthclient.handlers[remote_app]() except OAuthException as e: if e.type == 'invalid_response': abort(500) else: raise return handler
Authorized handler callback.
def process_associations(self, limit): myfile = '/'.join((self.rawdir, self.files['data']['file'])) f = gzip.open(myfile, 'rb') filereader = io.TextIOWrapper(f, newline="") filereader.readline() for event, elem in ET.iterparse(filereader): self.process_xml_table( elem, 'Article_Breed', self._process_article_breed_row, limit) self.process_xml_table( elem, 'Article_Phene', self._process_article_phene_row, limit) self.process_xml_table( elem, 'Breed_Phene', self._process_breed_phene_row, limit) self.process_xml_table( elem, 'Lida_Links', self._process_lida_links_row, limit) self.process_xml_table( elem, 'Phene_Gene', self._process_phene_gene_row, limit) self.process_xml_table( elem, 'Group_MPO', self._process_group_mpo_row, limit) f.close() return
Loop through the xml file and process the article-breed, article-phene, breed-phene, phene-gene associations, and the external links to LIDA. :param limit: :return:
def upgrade(self, package, force=False): self.install(package, upgrade=True, force=force)
Shortcut method to upgrade a package. If `force` is set to True, the package and all of its dependencies will be reinstalled, otherwise if the package is up to date, this command is a no-op.
def offset(self, offset): span = self if offset > 0: for i in iter_range(offset): span = span.next_period() elif offset < 0: for i in iter_range(-offset): span = span.prev_period() return span
Offset the date range by the given amount of periods. This differs from :meth:`~spans.types.OffsetableRangeMixin.offset` on :class:`spans.types.daterange` by not accepting a ``timedelta`` object. Instead it expects an integer to adjust the typed date range by. The given value may be negative as well. :param offset: Number of periods to offset this range by. A period is either a day, week, american week, month, quarter or year, depending on this range's period type. :return: New offset :class:`~spans.types.PeriodRange`
def state_size(self) -> Sequence[Shape]: return self._sizes(self._compiler.rddl.state_size)
Returns the MDP state size.
def fill_n(self, values, weights=None, dropna: bool = True): values = np.asarray(values) if dropna: values = values[~np.isnan(values)] if self._binning.is_adaptive(): map = self._binning.force_bin_existence(values) self._reshape_data(self._binning.bin_count, map) if weights: weights = np.asarray(weights) self._coerce_dtype(weights.dtype) (frequencies, errors2, underflow, overflow, stats) = \ calculate_frequencies(values, self._binning, dtype=self.dtype, weights=weights, validate_bins=False) self._frequencies += frequencies self._errors2 += errors2 if self.keep_missed: self.underflow += underflow self.overflow += overflow if self._stats: for key in self._stats: self._stats[key] += stats.get(key, 0.0)
Update histograms with a set of values. Parameters ---------- values: array_like weights: Optional[array_like] drop_na: Optional[bool] If true (default), all nan's are skipped.
def get_queue_acl(self, queue_name, timeout=None): _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path(queue_name) request.query = [ ('comp', 'acl'), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_xml_to_signed_identifiers(response.body)
Returns details about any stored access policies specified on the queue that may be used with Shared Access Signatures. :param str queue_name: The name of an existing queue. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the queue. :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`
def wait_until_element_has_focus(self, locator, timeout=None): self._info("Waiting for focus on '%s'" % (locator)) self._wait_until_no_error(timeout, self._check_element_focus_exp, True, locator, timeout)
Waits until the element identified by `locator` has focus. You might rather want to use `Element Focus Should Be Set` | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
def distinct(iterable, keyfunc=None): seen = set() for item in iterable: key = item if keyfunc is None else keyfunc(item) if key not in seen: seen.add(key) yield item
Yields distinct items from `iterable` in the order that they appear.
def get_resources(self): json_resources = self.rest_client.make_request(self.resource_url)['resources'] return [RestResource(resource, self.rest_client) for resource in json_resources]
Retrieves a list of all known Streams high-level REST resources. Returns: :py:obj:`list` of :py:class:`~.rest_primitives.RestResource`: List of all Streams high-level REST resources.
def to_pascal_case(s): return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), s.capitalize())
Transform underscore separated string to pascal case
def argset(name, *args, **kwargs): def _arg(f): if not hasattr(f, '_subcommand_argsets'): f._subcommand_argsets = {} f._subcommand_argsets.setdefault(name, []).append((args, kwargs)) return f return _arg
Decorator to add sets of required mutually exclusive args to subcommands.
def read_features_and_groups(features_path, groups_path): "Reader for data and groups" try: if not pexists(features_path): raise ValueError('non-existent features file') if not pexists(groups_path): raise ValueError('non-existent groups file') if isinstance(features_path, str): features = np.genfromtxt(features_path, dtype=float) else: raise ValueError('features input must be a file path ') if isinstance(groups_path, str): groups = np.genfromtxt(groups_path, dtype=str) else: raise ValueError('groups input must be a file path ') except: raise IOError('error reading the specified features and/or groups.') if len(features) != len(groups): raise ValueError("lengths of features and groups do not match!") return features, groups
Reader for data and groups
def get_method_name(method): name = get_object_name(method) if name.startswith("__") and not name.endswith("__"): name = "_{0}{1}".format(get_object_name(method.im_class), name) return name
Returns given method name. :param method: Method to retrieve the name. :type method: object :return: Method name. :rtype: unicode
def setup(addr, user, remote_path, local_key=None): port = find_port(addr, user) if not port or not is_alive(addr, user): port = new_port() scp(addr, user, __file__, '~/unixpipe', local_key) ssh_call = ['ssh', '-fL%d:127.0.0.1:12042' % port, '-o', 'ExitOnForwardFailure=yes', '-o', 'ControlPath=~/.ssh/unixpipe_%%r@%%h_%d' % port, '-o', 'ControlMaster=auto', '%s@%s' % (user, addr,), 'python', '~/unixpipe', 'server', remote_path] if local_key: ssh_call.insert(1, local_key) ssh_call.insert(1, '-i') subprocess.call(ssh_call) time.sleep(1) return port
Setup the tunnel
def stamp_allowing_unusual_version_table( config: Config, revision: str, sql: bool = False, tag: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> None: script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise CommandError("Range revision not allowed") starting_rev, revision = revision.split(':', 2) def do_stamp(rev: str, context): return script._stamp_revs(revision, rev) with EnvironmentContext(config, script, fn=do_stamp, as_sql=sql, destination_rev=revision, starting_rev=starting_rev, tag=tag, version_table=version_table): script.run_env()
Stamps the Alembic version table with the given revision; don't run any migrations. This function is a clone of ``alembic.command.stamp()``, but allowing ``version_table`` to change. See http://alembic.zzzcomputing.com/en/latest/api/commands.html#alembic.command.stamp
def from_file(cls, path, format='infer'): format = _infer_format(path, format=format) origin = os.path.abspath(os.path.dirname(path)) with open(path) as f: data = f.read() if format == 'json': obj = json.loads(data) else: obj = yaml.safe_load(data) return cls.from_dict(obj, _origin=origin)
Create an instance from a json or yaml file. Parameters ---------- path : str The path to the file to load. format : {'infer', 'json', 'yaml'}, optional The file format. By default the format is inferred from the file extension.
def _register_bounds_validator_if_needed(parser, name, flag_values): if parser.lower_bound is not None or parser.upper_bound is not None: def checker(value): if value is not None and parser.is_outside_bounds(value): message = '%s is not %s' % (value, parser.syntactic_help) raise _exceptions.ValidationError(message) return True _validators.register_validator(name, checker, flag_values=flag_values)
Enforces lower and upper bounds for numeric flags. Args: parser: NumericParser (either FloatParser or IntegerParser), provides lower and upper bounds, and help text to display. name: str, name of the flag flag_values: FlagValues.
def _set_int(self, commands, name): if name in commands: try: value = int(commands[name]) setattr(self, name, value) except ValueError: pass
set integer value from commands
def list_source_code(self, context: int = 5) -> None: self._verify_entrypoint_selected() current_trace_frame = self.trace_tuples[ self.current_trace_frame_index ].trace_frame filename = os.path.join(self.repository_directory, current_trace_frame.filename) file_lines: List[str] = [] try: with open(filename, "r") as file: file_lines = file.readlines() except FileNotFoundError: self.warning(f"Couldn't open {filename}.") return self._output_file_lines(current_trace_frame, file_lines, context)
Show source code around the current trace frame location. Parameters: context: int number of lines to show above and below trace location (default: 5)
def complete_server(self, text, line, begidx, endidx): return [i for i in PsiturkShell.server_commands if i.startswith(text)]
Tab-complete server command
def contexts(self): if not hasattr(self, "_contexts"): cs = {} for cr in self.doc["contexts"]: cs[cr["name"]] = copy.deepcopy(cr["context"]) self._contexts = cs return self._contexts
Returns known contexts by exposing as a read-only property.
def valid_hash_value(hashname): custom_hash_prefix_re = re.compile(r"^x_") if hashname in enums.HASH_ALGO_OV or custom_hash_prefix_re.match(hashname): return True else: return False
Return true if given value is a valid, recommended hash name according to the STIX 2 specification.
def number_of_statements(self, n): stmt_type = n.type if stmt_type == syms.compound_stmt: return 1 elif stmt_type == syms.stmt: return self.number_of_statements(n.children[0]) elif stmt_type == syms.simple_stmt: return len(n.children) // 2 else: raise AssertionError("non-statement node")
Compute the number of AST statements contained in a node.
def describe_processors(cls): for processor in cls.post_processors(cls): yield {'name': processor.__name__, 'description': processor.__doc__, 'processor': processor}
List all postprocessors and their description
def inspect(self): timeout_manager = future_timeout_manager(self.sync_timeout) sensor_index_before = copy.copy(self._sensors_index) request_index_before = copy.copy(self._requests_index) try: request_changes = yield self.inspect_requests( timeout=timeout_manager.remaining()) sensor_changes = yield self.inspect_sensors( timeout=timeout_manager.remaining()) except Exception: self._sensors_index = sensor_index_before self._requests_index = request_index_before raise model_changes = AttrDict() if request_changes: model_changes.requests = request_changes if sensor_changes: model_changes.sensors = sensor_changes if model_changes: raise Return(model_changes)
Inspect device requests and sensors, update model Returns ------- Tornado future that resolves with: model_changes : Nested AttrDict or None Contains sets of added/removed request/sensor names Example structure: {'requests': { 'added': set(['req1', 'req2']), 'removed': set(['req10', 'req20'])} 'sensors': { 'added': set(['sens1', 'sens2']), 'removed': set(['sens10', 'sens20'])} } If there are no changes keys may be omitted. If an item is in both the 'added' and 'removed' sets that means that it changed. If neither request not sensor changes are present, None is returned instead of a nested structure.
def make_certificate_authority(**name): key = make_pkey() csr = make_certificate_signing_request(key, **name) crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')]) return key, crt
Make a certificate authority. A certificate authority can sign certificates. For clients to be able to validate certificates signed by your certificate authorithy, they must trust the certificate returned by this function. :param name: Key word arguments containing subject name parts: C, ST, L, O, OU, CN. :return: A root self-signed certificate to act as an authority. :rtype: :class:`OpenSSL.crypto.X509`
def create(self, key=None, label=None): key = '%s' % key url = self.bitbucket.url('SET_SSH_KEY') return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label)
Associate an ssh key with your account and return it.