text
stringlengths
78
104k
score
float64
0
0.18
def create(self, max_wait=300, allocated_storage=None, encryption_at_rest=None, restore_to_time=None, **kwargs): """ Create an instance of the PostgreSQL service with the typical starting settings. :param max_wait: service is created asynchronously, so will only wait this number of seconds before giving up. :param allocated_storage: int for GBs to be allocated for storage :param encryption_at_rest: boolean for encrypting data that is stored :param restore_to_time: UTC date within recovery period for db backup to be used when initiating """ # MAINT: Add these if there is demand for it and validated if allocated_storage or encryption_at_rest or restore_to_time: raise NotImplementedError() # Will need to wait for the service to be provisioned before can add # service keys and get env details. self.service.create(async=True, create_keys=False) while self._create_in_progress() and max_wait > 0: if max_wait % 5 == 0: logging.warning('Can take {}s for create to finish.'.format(max_wait)) time.sleep(1) max_wait -= 1 # Now get the service env (via service keys) cfg = self.service._get_service_config() self.service.settings.save(cfg) hostname = predix.config.get_env_key(self.use_class, 'hostname') os.environ[hostname] = self.service.settings.data['hostname'] password = predix.config.get_env_key(self.use_class, 'password') os.environ[password] = self.service.settings.data['password'] port = predix.config.get_env_key(self.use_class, 'port') os.environ[port] = str(self.service.settings.data['port']) username = predix.config.get_env_key(self.use_class, 'username') os.environ[username] = self.service.settings.data['username'] uri = predix.config.get_env_key(self.use_class, 'uri') os.environ[uri] = self.service.settings.data['uri']
0.002408
def run(self, records): """Runs the batch upload :param records: an iterable containing queue entries """ self_name = type(self).__name__ for i, batch in enumerate(grouper(records, self.BATCH_SIZE, skip_missing=True), 1): self.logger.info('%s processing batch %d', self_name, i) try: for j, proc_batch in enumerate(grouper( process_records(batch).iteritems(), self.BATCH_SIZE, skip_missing=True), 1): self.logger.info('%s uploading chunk #%d (batch %d)', self_name, j, i) self.upload_records({k: v for k, v in proc_batch}, from_queue=True) except Exception: self.logger.exception('%s could not upload batch', self_name) return self.logger.info('%s finished batch %d', self_name, i) self.processed_records(batch) self.logger.info('%s finished', self_name)
0.006154
def get_acmg(acmg_terms): """Use the algorithm described in ACMG paper to get a ACMG calssification Args: acmg_terms(set(str)): A collection of prediction terms Returns: prediction(int): 0 - Uncertain Significanse 1 - Benign 2 - Likely Benign 3 - Likely Pathogenic 4 - Pathogenic """ prediction = 'uncertain_significance' # This variable indicates if Pathogenecity Very Strong exists pvs = False # Collection of terms with Pathogenecity Strong ps_terms = [] # Collection of terms with Pathogenecity moderate pm_terms = [] # Collection of terms with Pathogenecity supporting pp_terms = [] # This variable indicates if Benign impact stand-alone exists ba = False # Collection of terms with Benign evidence Strong bs_terms = [] # Collection of terms with supporting Benign evidence bp_terms = [] for term in acmg_terms: if term.startswith('PVS'): pvs = True elif term.startswith('PS'): ps_terms.append(term) elif term.startswith('PM'): pm_terms.append(term) elif term.startswith('PP'): pp_terms.append(term) elif term.startswith('BA'): ba = True elif term.startswith('BS'): bs_terms.append(term) elif term.startswith('BP'): bp_terms.append(term) # We need to start by checking for Pathogenecity pathogenic = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms) likely_pathogenic = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms) benign = is_benign(ba, bs_terms) likely_benign = is_likely_benign(bs_terms, bp_terms) if (pathogenic or likely_pathogenic): if (benign or likely_benign): prediction = 'uncertain_significance' elif pathogenic: prediction = 'pathogenic' else: prediction = 'likely_pathogenic' else: if benign: prediction = 'benign' if likely_benign: prediction = 'likely_benign' return prediction
0.000463
def _ancestors_or_self( self, qname: Union[QualName, bool] = None) -> List[InstanceNode]: """XPath - return the list of receiver's ancestors including itself.""" res = [] if qname and self.qual_name != qname else [self] return res + self.up()._ancestors(qname)
0.006757
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """This successively appends each element of an array to a single list of values. This takes a list of values and puts all the values generated for each element in the list into a single list of values. It uses the :func:`itertools.chain` function to achieve this. This function is particularly useful for specifying multiple types of simulations with different parameters. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from append iterator") if not isinstance(variables, list): raise ValueError( f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}" ) # Create a single list containing all the values yield list( chain.from_iterable( variable_matrix(item, parent, "product") for item in variables ) )
0.007143
def _render_batch(self, non_fluents: NonFluents, states: Fluents, actions: Fluents, interms: Fluents, rewards: np.array, horizon: Optional[int] = None) -> None: '''Prints `non_fluents`, `states`, `actions`, `interms` and `rewards` for given `horizon`. Args: states (Sequence[Tuple[str, np.array]]): A state trajectory. actions (Sequence[Tuple[str, np.array]]): An action trajectory. interms (Sequence[Tuple[str, np.array]]): An interm state trajectory. rewards (np.array): Sequence of rewards (1-dimensional array). horizon (Optional[int]): Number of timesteps. ''' if horizon is None: horizon = len(states[0][1]) self._render_round_init(horizon, non_fluents) for t in range(horizon): s = [(s[0], s[1][t]) for s in states] f = [(f[0], f[1][t]) for f in interms] a = [(a[0], a[1][t]) for a in actions] r = rewards[t] self._render_timestep(t, s, a, f, r) self._render_round_end(rewards)
0.006029
def load_name(self, load): ''' Return the primary name associate with the load, if an empty string is returned then the load does not match the function ''' if 'eauth' not in load: return '' fstr = '{0}.auth'.format(load['eauth']) if fstr not in self.auth: return '' try: pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])['args'][0] return load[pname_arg] except IndexError: return ''
0.003831
def zpk(self, zeros, poles, gain, analog=True, **kwargs): """Filter this `TimeSeries` by applying a zero-pole-gain filter Parameters ---------- zeros : `array-like` list of zero frequencies (in Hertz) poles : `array-like` list of pole frequencies (in Hertz) gain : `float` DC gain of filter analog : `bool`, optional type of ZPK being applied, if `analog=True` all parameters will be converted in the Z-domain for digital filtering Returns ------- timeseries : `TimeSeries` the filtered version of the input data See Also -------- TimeSeries.filter for details on how a digital ZPK-format filter is applied Examples -------- To apply a zpk filter with file poles at 100 Hz, and five zeros at 1 Hz (giving an overall DC gain of 1e-10):: >>> data2 = data.zpk([100]*5, [1]*5, 1e-10) """ return self.filter(zeros, poles, gain, analog=analog, **kwargs)
0.00182
def _has_x(self, kwargs): '''Returns True if x is explicitly defined in kwargs''' return (('x' in kwargs) or (self._element_x in kwargs) or (self._type == 3 and self._element_1mx in kwargs))
0.009009
def record_xml_output(rec, tags=None, order_fn=None): """Generate the XML for record 'rec'. :param rec: record :param tags: list of tags to be printed :return: string """ if tags is None: tags = [] if isinstance(tags, str): tags = [tags] if tags and '001' not in tags: # Add the missing controlfield. tags.append('001') marcxml = ['<record>'] # Add the tag 'tag' to each field in rec[tag] fields = [] if rec is not None: for tag in rec: if not tags or tag in tags: for field in rec[tag]: fields.append((tag, field)) if order_fn is None: record_order_fields(fields) else: record_order_fields(fields, order_fn) for field in fields: marcxml.append(field_xml_output(field[1], field[0])) marcxml.append('</record>') return '\n'.join(marcxml)
0.00106
def _create_reference_value_options(self, keys, finished_keys): """this method steps through the option definitions looking for alt paths. On finding one, it creates the 'reference_value_from' links within the option definitions and populates it with copied options.""" # a set of known reference_value_from_links set_of_reference_value_option_names = set() for key in keys: if key in finished_keys: continue an_option = self.option_definitions[key] if an_option.reference_value_from: fully_qualified_reference_name = '.'.join(( an_option.reference_value_from, an_option.name )) if fully_qualified_reference_name in keys: continue # this referenced value has already been defined # no need to repeat it - skip on to the next key reference_option = an_option.copy() reference_option.reference_value_from = None reference_option.name = fully_qualified_reference_name # wait, aren't we setting a fully qualified dotted name into # the name field? Yes, 'add_option' below sees that # full pathname and does the right thing with it to ensure # that the reference_option is created within the # correct namespace set_of_reference_value_option_names.add( fully_qualified_reference_name ) self.option_definitions.add_option(reference_option) for a_reference_value_option_name in set_of_reference_value_option_names: for x in range(a_reference_value_option_name.count('.')): namespace_path = \ a_reference_value_option_name.rsplit('.', x + 1)[0] self.option_definitions[namespace_path].ref_value_namespace() return set_of_reference_value_option_names
0.002426
def add_changes_markup(dom, ins_nodes, del_nodes): """ Add <ins> and <del> tags to the dom to show changes. """ # add markup for inserted and deleted sections for node in reversed(del_nodes): # diff algorithm deletes nodes in reverse order, so un-reverse the # order for this iteration insert_or_append(node.orig_parent, node, node.orig_next_sibling) wrap(node, 'del') for node in ins_nodes: wrap(node, 'ins') # Perform post-processing and cleanup. remove_nesting(dom, 'del') remove_nesting(dom, 'ins') sort_del_before_ins(dom) merge_adjacent(dom, 'del') merge_adjacent(dom, 'ins')
0.001495
def rename(self, old_table, new_table): """ Rename a table. You must have ALTER and DROP privileges for the original table, and CREATE and INSERT privileges for the new table. """ try: command = 'RENAME TABLE {0} TO {1}'.format(wrap(old_table), wrap(new_table)) except: command = 'ALTER TABLE {0} RENAME {1}'.format(wrap(old_table), wrap(new_table)) self.execute(command) self._printer('Renamed {0} to {1}'.format(wrap(old_table), wrap(new_table))) return old_table, new_table
0.010327
def grind_hash_for_weapon(hashcode): """ Grinds the given hashcode for a weapon to draw on the pixelmap. Utilizes the second six characters from the hashcode.""" weaponlist = init_weapon_list() # The second six characters of the hash # control the weapon decision. weapon_control = hashcode[ASPECT_CONTROL_LEN:(ASPECT_CONTROL_LEN * 2)] # Decimal value of the hash chunk to map. hash_dec_value = int(weapon_control, HEX_BASE) decision = map_decision(MAX_DECISION_VALUE, len(weaponlist), hash_dec_value) return choose_weapon(decision, weaponlist)
0.003384
def format_string(m, l, capture, is_bytes): """Perform a string format.""" for fmt_type, value in capture[1:]: if fmt_type == FMT_ATTR: # Attribute l = getattr(l, value) elif fmt_type == FMT_INDEX: # Index l = l[value] elif fmt_type == FMT_CONV: if is_bytes: # Conversion if value in ('r', 'a'): l = repr(l).encode('ascii', 'backslashreplace') elif value == 's': # If the object is not string or byte string already l = _to_bstr(l) else: # Conversion if value == 'a': l = ascii(l) elif value == 'r': l = repr(l) elif value == 's': # If the object is not string or byte string already l = str(l) elif fmt_type == FMT_SPEC: # Integers and floats don't have an explicit 's' format type. if value[3] and value[3] == 's': if isinstance(l, int): # pragma: no cover raise ValueError("Unknown format code 's' for object of type 'int'") if isinstance(l, float): # pragma: no cover raise ValueError("Unknown format code 's' for object of type 'float'") # Ensure object is a byte string l = _to_bstr(l) if is_bytes else str(l) spec_type = value[1] if spec_type == '^': l = l.center(value[2], value[0]) elif spec_type == ">": l = l.rjust(value[2], value[0]) else: l = l.ljust(value[2], value[0]) # Make sure the final object is a byte string return _to_bstr(l) if is_bytes else str(l)
0.008039
def to_xdr_object(self): """Creates an XDR Operation object that represents this :class:`CreateAccount`. """ destination = account_xdr_object(self.destination) create_account_op = Xdr.types.CreateAccountOp( destination, Operation.to_xdr_amount(self.starting_balance)) self.body.type = Xdr.const.CREATE_ACCOUNT self.body.createAccountOp = create_account_op return super(CreateAccount, self).to_xdr_object()
0.004141
def _input_as_lines(self,data): """Writes data to tempfile and sets -infile parameter data -- list of lines, ready to be written to file """ if data: self.Parameters['--in']\ .on(super(Clearcut,self)._input_as_lines(data)) return ''
0.013289
def _update_file(self, seek_to_end=True): """Open the file for tailing""" try: self.close() self._file = self.open() except IOError: pass else: if not self._file: return self.active = True try: st = os.stat(self._filename) except EnvironmentError, err: if err.errno == errno.ENOENT: self._log_info('file removed') self.close() fid = self.get_file_id(st) if not self._fid: self._fid = fid if fid != self._fid: self._log_info('file rotated') self.close() elif seek_to_end: self._seek_to_end()
0.002472
def close(self): """Close a port on dummy_serial.""" if VERBOSE: _print_out('\nDummy_serial: Closing port\n') if not self._isOpen: raise IOError('Dummy_serial: The port is already closed') self._isOpen = False self.port = None
0.009868
def compose(self, bbox=None, **kwargs): """ Compose the artboard. See :py:func:`~psd_tools.compose` for available extra arguments. :param bbox: Viewport tuple (left, top, right, bottom). :return: :py:class:`PIL.Image`, or `None` if there is no pixel. """ from psd_tools.api.composer import compose return compose(self, bbox=bbox or self.bbox, **kwargs)
0.004785
def patch_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): # noqa: E501 """patch_namespaced_stateful_set_scale # noqa: E501 partially update scale of the specified StatefulSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_stateful_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
0.001255
def dict2resource(raw, top=None, options=None, session=None): """Convert a dictionary into a Jira Resource object. Recursively walks a dict structure, transforming the properties into attributes on a new ``Resource`` object of the appropriate type (if a ``self`` link is present) or a ``PropertyHolder`` object (if no ``self`` link is present). """ if top is None: top = PropertyHolder(raw) seqs = tuple, list, set, frozenset for i, j in iteritems(raw): if isinstance(j, dict): if 'self' in j: resource = cls_for_resource(j['self'])(options, session, j) setattr(top, i, resource) elif i == 'timetracking': setattr(top, 'timetracking', TimeTracking(options, session, j)) else: setattr( top, i, dict2resource(j, options=options, session=session)) elif isinstance(j, seqs): seq_list = [] for seq_elem in j: if isinstance(seq_elem, dict): if 'self' in seq_elem: resource = cls_for_resource(seq_elem['self'])( options, session, seq_elem) seq_list.append(resource) else: seq_list.append( dict2resource(seq_elem, options=options, session=session)) else: seq_list.append(seq_elem) setattr(top, i, seq_list) else: setattr(top, i, j) return top
0.002513
def save(self): """Update the configuration file on disk with the current contents of self.contents. Previous contents are overwritten. """ try: with open(self.path, "w") as f: f.writelines(self.contents) except IOError as e: raise InternalError("Could not write RCFile contents", name=self.name, path=self.path, error_message=str(e))
0.009639
def _isValidTrigger(block, ch): """check if the trigger characters are in the right context, otherwise running the indenter might be annoying to the user """ if ch == "" or ch == "\n": return True # Explicit align or new line match = rxUnindent.match(block.text()) return match is not None and \ match.group(3) == ""
0.010204
def queue(self): """ Get a queue of notifications Use it with Python with """ queue = NotificationQueue() self._listeners.add(queue) yield queue self._listeners.remove(queue)
0.008368
def addMPCandHumanWealth(self,solution): ''' Take a solution and add human wealth and the bounding MPCs to it. Parameters ---------- solution : ConsumerSolution The solution to this period's consumption-saving problem. Returns: ---------- solution : ConsumerSolution The solution to this period's consumption-saving problem, but now with human wealth and the bounding MPCs. ''' solution.hNrm = self.hNrmNow solution.MPCmin = self.MPCminNow solution.MPCmax = self.MPCmaxEff return solution
0.006329
def is_directory(value, **kwargs): """Indicate whether ``value`` is a directory that exists on the local filesystem. :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.directory_exists(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
0.004862
def create_extraction_file(bim_filename, out_prefix): """Creates an extraction file (keeping only markers on autosomes). :param bim_filename: the name of the BIM file. :param out_prefix: the prefix for the output file. :type bim_filename: str :type out_prefix: str """ o_file = None try: o_file = open(out_prefix + ".to_extract", "w") except IOError: raise ProgramError("{}: cannot write file".format( out_prefix + ".to_extract" )) # Reading the BIM file and extracts only the markers on autosome autosomes = set(map(str, range(1, 23))) nb_markers = 0 header = dict(zip(["chrom", "name", "cm", "pos", "a1", "a2"], range(6))) with open(bim_filename, "r") as i_file: for line in i_file: row = line.rstrip("\r\n").split() if row[header["chrom"]] in autosomes: print >>o_file, row[header["name"]] nb_markers += 1 # Closing the file o_file.close() logger.info(" - {:,d} markers will be used for contamination " "estimation".format(nb_markers))
0.000886
def decode_chain_list(in_bytes): """Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings""" bstrings = numpy.frombuffer(in_bytes, numpy.dtype('S' + str(mmtf.utils.constants.CHAIN_LEN))) return [s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE) for s in bstrings]
0.01023
def build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_type: Type[T], logger: Logger = None) -> Parser: """ Returns the most appropriate parser to use to parse object obj_on_filesystem as an object of type object_type :param obj_on_filesystem: the filesystem object to parse :param object_type: the type of object that the parser is expected to produce :param logger: :return: """ pass
0.012868
def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result
0.001025
def columbus_day(year, country='usa'): '''in USA: 2nd Monday in Oct Elsewhere: Oct 12''' if country == 'usa': return nth_day_of_month(2, MON, OCT, year) else: return (year, OCT, 12)
0.00463
def load_dialect_impl(self, dialect): # type: (DefaultDialect) -> TypeEngine """Select impl by dialect.""" if self.__use_json(dialect): return dialect.type_descriptor(self.__json_type) return dialect.type_descriptor(sqlalchemy.UnicodeText)
0.007246
def AddLogFileOptions(self, argument_group): """Adds the log file option to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group. """ argument_group.add_argument( '--logfile', '--log_file', '--log-file', action='store', metavar='FILENAME', dest='log_file', type=str, default='', help=( 'Path of the file in which to store log messages, by default ' 'this file will be named: "{0:s}-YYYYMMDDThhmmss.log.gz". Note ' 'that the file will be gzip compressed if the extension is ' '".gz".').format(self.NAME))
0.00159
def gaussian1d_moments(data, mask=None): """ Estimate 1D Gaussian parameters from the moments of 1D data. This function can be useful for providing initial parameter values when fitting a 1D Gaussian to the ``data``. Parameters ---------- data : array_like (1D) The 1D array. mask : array_like (1D bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- amplitude, mean, stddev : float The estimated parameters of a 1D Gaussian. """ if np.any(~np.isfinite(data)): data = np.ma.masked_invalid(data) warnings.warn('Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.', AstropyUserWarning) else: data = np.ma.array(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask data.fill_value = 0. data = data.filled() x = np.arange(data.size) x_mean = np.sum(x * data) / np.sum(data) x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data))) amplitude = np.ptp(data) return amplitude, x_mean, x_stddev
0.000717
def plot_gaussian_2D(mu, lmbda, color='b', centermarker=True,label='',alpha=1.,ax=None,artists=None): ''' Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix. ''' assert len(mu) == 2 ax = ax if ax else plt.gca() # TODO use artists! t = np.hstack([np.arange(0,2*np.pi,0.01),0]) circle = np.vstack([np.sin(t),np.cos(t)]) ellipse = np.dot(np.linalg.cholesky(lmbda),circle) if artists is None: point = ax.scatter([mu[0]],[mu[1]],marker='D',color=color,s=4,alpha=alpha) \ if centermarker else None line, = ax.plot(ellipse[0,:] + mu[0], ellipse[1,:] + mu[1],linestyle='-', linewidth=2,color=color,label=label,alpha=alpha) else: line, point = artists if centermarker: point.set_offsets(np.atleast_2d(mu)) line.set_xdata(ellipse[0,:] + mu[0]) line.set_ydata(ellipse[1,:] + mu[1]) line.set_alpha(alpha) line.set_color(color) return line, point
0.027158
def eagerload_includes(self, query, qs): """Use eagerload feature of sqlalchemy to optimize data retrieval for include querystring parameter :param Query query: sqlalchemy queryset :param QueryStringManager qs: a querystring manager to retrieve information from url :return Query: the query with includes eagerloaded """ for include in qs.include: joinload_object = None if '.' in include: current_schema = self.resource.schema for obj in include.split('.'): try: field = get_model_field(current_schema, obj) except Exception as e: raise InvalidInclude(str(e)) if joinload_object is None: joinload_object = joinedload(field) else: joinload_object = joinload_object.joinedload(field) related_schema_cls = get_related_schema(current_schema, obj) if isinstance(related_schema_cls, SchemaABC): related_schema_cls = related_schema_cls.__class__ else: related_schema_cls = class_registry.get_class(related_schema_cls) current_schema = related_schema_cls else: try: field = get_model_field(self.resource.schema, include) except Exception as e: raise InvalidInclude(str(e)) joinload_object = joinedload(field) query = query.options(joinload_object) return query
0.00355
def pad_repeat_border(data, padwidth): """ Similar to `pad`, except the border value from ``data`` is used to pad. Parameters ---------- data : ndarray Numpy array of any dimension and type. padwidth : int or tuple If int, it will pad using this amount at the beginning and end of all dimensions. If it is a tuple (of same length as `ndim`), then the padding amount will be specified per axis. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array by repeating its borders: >>> shape = (3, 4) >>> x = np.arange(np.prod(shape)).reshape(shape) >>> dd.util.pad_repeat_border(x, 2) array([[ 0, 0, 0, 1, 2, 3, 3, 3], [ 0, 0, 0, 1, 2, 3, 3, 3], [ 0, 0, 0, 1, 2, 3, 3, 3], [ 4, 4, 4, 5, 6, 7, 7, 7], [ 8, 8, 8, 9, 10, 11, 11, 11], [ 8, 8, 8, 9, 10, 11, 11, 11], [ 8, 8, 8, 9, 10, 11, 11, 11]]) """ data = np.asarray(data) shape = data.shape if isinstance(padwidth, int): padwidth = (padwidth,)*len(shape) padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2, enumerate(shape))) new_data = np.empty(padded_shape, dtype=data.dtype) new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data for i, pw in enumerate(padwidth): if pw > 0: selection = [slice(None)] * data.ndim selection2 = [slice(None)] * data.ndim # Lower boundary selection[i] = slice(0, pw) selection2[i] = slice(pw, pw+1) new_data[tuple(selection)] = new_data[tuple(selection2)] # Upper boundary selection[i] = slice(-pw, None) selection2[i] = slice(-pw-1, -pw) new_data[tuple(selection)] = new_data[tuple(selection2)] return new_data
0.000513
def sort_dictionary_list(dict_list, sort_key): """ sorts a list of dictionaries based on the value of the sort_key dict_list - a list of dictionaries sort_key - a string that identifies the key to sort the dictionaries with. Test sorting a list of dictionaries: >>> sort_dictionary_list([{'b' : 1, 'value' : 2}, {'c' : 2, 'value' : 3}, {'a' : 3, 'value' : 1}], 'value') [{'a': 3, 'value': 1}, {'b': 1, 'value': 2}, {'c': 2, 'value': 3}] """ if not dict_list or len(dict_list) == 0: return dict_list dict_list.sort(key=itemgetter(sort_key)) return dict_list
0.003231
def _atomicModification(func): """Decorator Make document modification atomic """ def wrapper(*args, **kwargs): self = args[0] with self._qpart: func(*args, **kwargs) return wrapper
0.007663
def stop_NoteContainer(self, nc, channel=1): """Stop playing the notes in NoteContainer nc.""" self.notify_listeners(self.MSG_PLAY_NC, {'notes': nc, 'channel': channel}) if nc is None: return True for note in nc: if not self.stop_Note(note, channel): return False return True
0.008174
def less_naive(gold_schemes): """find 'less naive' baseline (most common scheme of a given length in subcorpus)""" best_schemes = defaultdict(lambda: defaultdict(int)) for g in gold_schemes: best_schemes[len(g)][tuple(g)] += 1 for i in best_schemes: best_schemes[i] = tuple(max(best_schemes[i].items(), key=lambda x: x[1])[0]) naive_schemes = [] for g in gold_schemes: naive_schemes.append(best_schemes[len(g)]) return naive_schemes
0.006173
def update_metadata_queue(self, agent: BaseAgent): """ Adds a new instance of AgentMetadata into the `agent_metadata_queue` using `agent` data. :param agent: An instance of an agent. """ pids = {os.getpid(), *agent.get_extra_pids()} helper_process_request = agent.get_helper_process_request() self.agent_metadata_queue.put(AgentMetadata(self.index, self.name, self.team, pids, helper_process_request))
0.008715
def end(self): """End access to the SD interface and close the HDF file. Args:: no argument Returns:: None The instance should not be used afterwards. The 'end()' method is implicitly called when the SD instance is deleted. C library equivalent : SDend """ status = _C.SDend(self._id) _checkErr('end', status, "cannot execute") self._id = None
0.003937
def getPythonVarName(name): """Get the python variable name """ return SUB_REGEX.sub('', name.replace('+', '_').replace('-', '_').replace('.', '_').replace(' ', '').replace('/', '_')).upper()
0.009852
def bury(self, priority=None): """Bury this job.""" if self.reserved: self.conn.bury(self.jid, priority or self._priority()) self.reserved = False
0.010753
def get_consensus_at(self, block_id): """ Get the consensus hash at a given block. Return the consensus hash if we have one for this block. Return None if we don't """ query = 'SELECT consensus_hash FROM snapshots WHERE block_id = ?;' args = (block_id,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['consensus_hash'] con.close() return res
0.003591
def generate_code_challenge(verifier): """ source: https://github.com/openstack/deb-python-oauth2client Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by generate_code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest, without '=' padding. """ digest = hashlib.sha256(verifier.encode('utf-8')).digest() return base64.urlsafe_b64encode(digest).rstrip(b'=').decode('utf-8')
0.001515
def encode_sentence(obj): """Encode a single sentence.""" warnings.warn("deprecated. Please use bioc.biocxml.encoder.encode_sentence", DeprecationWarning) return bioc.biocxml.encoder.encode_sentence(obj)
0.009174
def read_raster_no_crs(input_file, indexes=None, gdal_opts=None): """ Wrapper function around rasterio.open().read(). Parameters ---------- input_file : str Path to file indexes : int or list Band index or list of band indexes to be read. Returns ------- MaskedArray Raises ------ FileNotFoundError if file cannot be found. """ with warnings.catch_warnings(): warnings.simplefilter("ignore") try: with rasterio.Env( **get_gdal_options( gdal_opts, is_remote=path_is_remote(input_file, s3=True) ) ): with rasterio.open(input_file, "r") as src: return src.read(indexes=indexes, masked=True) except RasterioIOError as e: for i in ("does not exist in the file system", "No such file or directory"): if i in str(e): raise FileNotFoundError("%s not found" % input_file) else: raise
0.001885
def get_privkey(self, address: AddressHex, password: str) -> PrivateKey: """Find the keystore file for an account, unlock it and get the private key Args: address: The Ethereum address for which to find the keyfile in the system password: Mostly for testing purposes. A password can be provided as the function argument here. If it's not then the user is interactively queried for one. Returns The private key associated with the address """ address = add_0x_prefix(address).lower() if not self.address_in_keystore(address): raise ValueError('Keystore file not found for %s' % address) with open(self.accounts[address]) as data_file: data = json.load(data_file) acc = Account(data, password, self.accounts[address]) return acc.privkey
0.004329
def native(self): """ The native Python datatype representation of this value :return: An integer or None """ if self.contents is None: return None if self._native is None: self._native = self.__int__() if self._map is not None and self._native in self._map: self._native = self._map[self._native] return self._native
0.004545
def get_remainder_set(self, j): """Return the set of children with indices less than j of all ancestors of j. The set C from (arXiv:1701.07072). :param int j: fermionic site index :return: children of j-ancestors, with indices less than j :rtype: list(FenwickNode) """ result = [] ancestors = self.get_update_set(j) # This runs in O(log(N)log(N)) where N is the number of qubits. for a in ancestors: for c in a.children: if c.index < j: result.append(c) return result
0.003295
def parse_expression(val, acceptable_types, name=None, raise_type=ValueError): """Attempts to parse the given `val` as a python expression of the specified `acceptable_types`. :param string val: A string containing a python expression. :param acceptable_types: The acceptable types of the parsed object. :type acceptable_types: type|tuple of types. The tuple may be nested; ie anything `isinstance` accepts. :param string name: An optional logical name for the value being parsed; ie if the literal val represents a person's age, 'age'. :param type raise_type: The type of exception to raise for all failures; ValueError by default. :raises: If `val` is not a valid python literal expression or it is but evaluates to an object that is not a an instance of one of the `acceptable_types`. """ def format_type(typ): return typ.__name__ if not isinstance(val, string_types): raise raise_type('The raw `val` is not a string. Given {} of type {}.' .format(val, format_type(type(val)))) def get_name(): return repr(name) if name else 'value' def format_raw_value(): lines = val.splitlines() for line_number in range(0, len(lines)): lines[line_number] = "{line_number:{width}}: {line}".format( line_number=line_number + 1, line=lines[line_number], width=len(str(len(lines)))) return '\n'.join(lines) try: parsed_value = eval(val) except Exception as e: raise raise_type(dedent("""\ The {name} cannot be evaluated as a literal expression: {error} Given raw value: {value} """.format(name=get_name(), error=e, value=format_raw_value()))) if not isinstance(parsed_value, acceptable_types): def iter_types(types): if isinstance(types, type): yield types elif isinstance(types, tuple): for item in types: for typ in iter_types(item): yield typ else: raise ValueError('The given acceptable_types is not a valid type (tuple): {}' .format(acceptable_types)) raise raise_type(dedent("""\ The {name} is not of the expected type(s): {types}: Given the following raw value that evaluated to type {type}: {value} """.format(name=get_name(), types=', '.join(format_type(t) for t in iter_types(acceptable_types)), type=format_type(type(parsed_value)), value=format_raw_value()))) return parsed_value
0.008494
def _eval_wrapper(self, fit_key, q, chiA, chiB, **kwargs): """Evaluates the surfinBH7dq2 model. """ chiA = np.array(chiA) chiB = np.array(chiB) # Warn/Exit if extrapolating allow_extrap = kwargs.pop('allow_extrap', False) self._check_param_limits(q, chiA, chiB, allow_extrap) omega0 = kwargs.pop('omega0', None) PN_approximant = kwargs.pop('PN_approximant', 'SpinTaylorT4') PN_dt = kwargs.pop('PN_dt', 0.1) PN_spin_order = kwargs.pop('PN_spin_order', 7) PN_phase_order = kwargs.pop('PN_phase_order', 7) omega_switch = kwargs.pop('omega_switch', 0.018) self._check_unused_kwargs(kwargs) if omega0 is None: # If omega0 is given, assume chiA, chiB are the coorbital frame # spins at t=-100 M. x = np.concatenate(([q], chiA, chiB)) else: # If omega0 is given, evolve the spins from omega0 # to t = -100 M from the peak. chiA_coorb_fitnode, chiB_coorb_fitnode, quat_fitnode, \ orbphase_fitnode \ = self._evolve_spins(q, chiA, chiB, omega0, PN_approximant, PN_dt, PN_spin_order, PN_phase_order, omega_switch) # x should contain coorbital frame spins at t=-100M x = np.concatenate(([q], chiA_coorb_fitnode, chiB_coorb_fitnode)) def eval_vector_fit(x, fit_key): res = self._evaluate_fits(x, fit_key) fit_val = res.T[0] fit_err = res.T[1] if omega0 is not None: # If spins were given in inertial frame at omega0, # transform vectors and errors back to the same frame. fit_val = utils.transform_vector_coorb_to_inertial(fit_val, orbphase_fitnode, quat_fitnode) fit_err = utils.transform_error_coorb_to_inertial(fit_val, fit_err, orbphase_fitnode, quat_fitnode) return fit_val, fit_err if fit_key == 'mf' or fit_key == 'all': mf, mf_err = self._evaluate_fits(x, 'mf') if fit_key == 'mf': return mf, mf_err if fit_key == 'chif' or fit_key == 'all': chif, chif_err = eval_vector_fit(x, 'chif') if fit_key == 'chif': return chif, chif_err if fit_key == 'vf' or fit_key == 'all': vf, vf_err = eval_vector_fit(x, 'vf') if fit_key == 'vf': return vf, vf_err if fit_key == 'all': return mf, chif, vf, mf_err, chif_err, vf_err
0.003019
def random_string(length=8, charset=None): ''' Generates a string with random characters. If no charset is specified, only letters and digits are used. Args: length (int) length of the returned string charset (string) list of characters to choose from Returns: (str) with random characters from charset Raises: - ''' if length < 1: raise ValueError('Length must be > 0') if not charset: charset = string.letters + string.digits return ''.join(random.choice(charset) for unused in xrange(length))
0.001712
def subscribe_sns_topic_to_sqs(self, region): """Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed Args: region (`str`): Name of the AWS region Returns: `str` """ sns = self.session.resource('sns', region_name=region) topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)) topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue) auditlog( event='cloudtrail.subscribe_sns_topic_to_sqs', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return topic.attributes['TopicArn']
0.005222
def maskQuality(self, umi, umi_quals): '''mask low quality bases and return masked umi''' masked_umi = mask_umi(umi, umi_quals, self.quality_encoding, self.quality_filter_mask) if masked_umi != umi: self.read_counts['UMI masked'] += 1 return masked_umi else: return umi
0.005038
def tree(self): """Tree with branch lengths in codon substitutions per site. The tree is a `Bio.Phylo.BaseTree.Tree` object. This is the current tree after whatever optimizations have been performed so far. """ bs = self.model.branchScale for node in self._tree.find_clades(): if node != self._tree.root: node.branch_length = self.t[self.name_to_nodeindex[node]] * bs return self._tree
0.004175
def PCO_protocol_dispatcher(s): """Choose the correct PCO element.""" proto_num = orb(s[0]) * 256 + orb(s[1]) cls = PCO_PROTOCOL_CLASSES.get(proto_num, Raw) return cls(s)
0.005376
def append_line(filename, **line): """Safely (i.e. with locking) append a line to the given file, serialized as JSON. """ global lock data = json.dumps(line, separators=(',', ':')) + '\n' with lock: with file(filename, 'a') as fp: fp.seek(0, SEEK_END) fp.write(data)
0.003096
def prt_results(self, goea_results): """Print GOEA results to the screen or to a file.""" # objaart = self.prepgrp.get_objaart(goea_results) if self.prepgrp is not None else None if self.args.outfile is None: self._prt_results(goea_results) else: # Users can print to both tab-separated file and xlsx file in one run. outfiles = self.args.outfile.split(",") grpwr = self.prepgrp.get_objgrpwr(goea_results) if self.prepgrp else None if grpwr is None: self.prt_outfiles_flat(goea_results, outfiles) else: grpwr.prt_outfiles_grouped(outfiles)
0.007407
def preprocess_incoming_content(content, encrypt_func, max_size_bytes): """ Apply preprocessing steps to file/notebook content that we're going to write to the database. Applies ``encrypt_func`` to ``content`` and checks that the result is smaller than ``max_size_bytes``. """ encrypted = encrypt_func(content) if max_size_bytes != UNLIMITED and len(encrypted) > max_size_bytes: raise FileTooLarge() return encrypted
0.002169
def delete(self, request, *args, **kwargs): """Delete auth token when `delete` request was issued.""" # Logic repeated from DRF because one cannot easily reuse it auth = get_authorization_header(request).split() if not auth or auth[0].lower() != b'token': return response.Response(status=status.HTTP_400_BAD_REQUEST) if len(auth) == 1: msg = 'Invalid token header. No credentials provided.' return response.Response(msg, status=status.HTTP_400_BAD_REQUEST) elif len(auth) > 2: msg = 'Invalid token header. Token string should not contain spaces.' return response.Response(msg, status=status.HTTP_400_BAD_REQUEST) try: token = self.model.objects.get(key=auth[1]) except self.model.DoesNotExist: pass else: token.delete() signals.user_logged_out.send( type(self), user=token.user, request=request, ) return response.Response(status=status.HTTP_204_NO_CONTENT)
0.002708
def _confirm_pos(self, pos): """look up widget for pos and default to None""" candidate = None if self._get_node(self._treelist, pos) is not None: candidate = pos return candidate
0.008969
async def _quit(self): """Quits the bot.""" await self.bot.responses.failure(message="Bot shutting down") await self.bot.logout()
0.013072
def setOverlayAlpha(self, ulOverlayHandle, fAlpha): """Sets the alpha of the overlay quad. Use 1.0 for 100 percent opacity to 0.0 for 0 percent opacity.""" fn = self.function_table.setOverlayAlpha result = fn(ulOverlayHandle, fAlpha) return result
0.010714
def publish(self, topic="/controller", qos=0, payload=None): """ publish(self, topic, payload=None, qos=0, retain=False) Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to indicate success or MQTT_ERR_NO_CONN if the client is not currently connected. mid is the message ID for the publish request. The mid value can be used to track the publish request by checking against the mid argument in the on_publish() callback if it is defined. """ result = self.client.publish(topic, payload=json.dumps(payload), qos=qos) if result[0] == mqtt.MQTT_ERR_NO_CONN: raise RuntimeError("No connection") return result[1]
0.002509
def runSearchFeatures(self, request): """ Returns a SearchFeaturesResponse for the specified SearchFeaturesRequest object. :param request: JSON string representing searchFeaturesRequest :return: JSON string representing searchFeatureResponse """ return self.runSearchRequest( request, protocol.SearchFeaturesRequest, protocol.SearchFeaturesResponse, self.featuresGenerator)
0.004292
def random_string(length): """ Return a pseudo-random string of specified length. """ valid_chars = string_ascii_letters + string_digits return ''.join(random.choice(valid_chars) for i in range(length))
0.004484
def _cancel_send_messages(self, d): """Cancel a `send_messages` request First check if the request is in a waiting batch, of so, great, remove it from the batch. If it's not found, we errback() the deferred and the downstream processing steps take care of aborting further processing. We check if there's a current _batch_send_d to determine where in the chain we were (getting partitions, or already sent request to Kafka) and errback differently. """ # Is the request in question in an unsent batch? for req in self._batch_reqs: if req.deferred == d: # Found the request, remove it and return. msgs = req.messages self._waitingMsgCount -= len(msgs) for m in (_m for _m in msgs if _m is not None): self._waitingByteCount -= len(m) # This _should_ be safe as we abort the iteration upon removal self._batch_reqs.remove(req) d.errback(CancelledError(request_sent=False)) return # If it wasn't found in the unsent batch. We just rely on the # downstream processing of the request to check if the deferred # has been called and skip further processing for this request # Errback the deferred with whether or not we sent the request # to Kafka already d.errback( CancelledError(request_sent=(self._batch_send_d is not None))) return
0.001294
def convert_padding(builder, layer, input_names, output_names, keras_layer): """ Convert padding layer from keras to coreml. Keras only supports zero padding at this time. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ _check_data_format(keras_layer) # Get input and output names input_name, output_name = (input_names[0], output_names[0]) is_1d = isinstance(keras_layer, _keras.layers.ZeroPadding1D) padding = keras_layer.padding top = left = bottom = right = 0 if is_1d: if type(padding) is int: left = right = padding elif type(padding) is tuple: if type(padding[0]) is int: left, right = padding elif type(padding[0]) is tuple and len(padding[0]) == 2: left, right = padding[0] else: raise ValueError("Unrecognized padding option: %s" % (str(padding))) else: raise ValueError("Unrecognized padding option: %s" % (str(padding))) else: if type(padding) is int: top = left = bottom = right = padding elif type(padding) is tuple: if type(padding[0]) is int: top, left = padding bottom, right = padding elif type(padding[0]) is tuple: top, bottom = padding[0] left, right = padding[1] else: raise ValueError("Unrecognized padding option: %s" % (str(padding))) else: raise ValueError("Unrecognized padding option: %s" % (str(padding))) # Now add the layer builder.add_padding(name = layer, left = left, right=right, top=top, bottom=bottom, value = 0, input_name = input_name, output_name=output_name )
0.008448
def revcomp(sequence): "returns reverse complement of a string" sequence = sequence[::-1].strip()\ .replace("A", "t")\ .replace("T", "a")\ .replace("C", "g")\ .replace("G", "c").upper() return sequence
0.00304
def filter(args): """ %prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features """ p = OptionParser(filter.__doc__) p.add_option("--type", default="mRNA", help="The feature to scan for the attributes [default: %default]") g1 = OptionGroup(p, "Filter by identity/coverage attribute values") g1.add_option("--id", default=95, type="float", help="Minimum identity [default: %default]") g1.add_option("--coverage", default=90, type="float", help="Minimum coverage [default: %default]") g1.add_option("--nocase", default=False, action="store_true", help="Case insensitive lookup of attribute names [default: %default]") p.add_option_group(g1) g2 = OptionGroup(p, "Filter by child feature bp length") g2.add_option("--child_ftype", default=None, type="str", help="Child featuretype to consider") g2.add_option("--child_bp", default=None, type="int", help="Filter by total bp of children of chosen ftype") p.add_option_group(g2) p.set_outfile() opts, args = p.parse_args(args) otype, oid, ocov = opts.type, opts.id, opts.coverage cftype, clenbp = opts.child_ftype, opts.child_bp id_attr, cov_attr = "Identity", "Coverage" if opts.nocase: id_attr, cov_attr = id_attr.lower(), cov_attr.lower() if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gffdb = make_index(gffile) bad = set() ptype = None for g in gffdb.features_of_type(otype, order_by=('seqid', 'start')): if not ptype: parent = list(gffdb.parents(g)) ptype = parent[0].featuretype \ if len(parent) > 0 else otype if cftype and clenbp: if gffdb.children_bp(g, child_featuretype=cftype) < clenbp: bad.add(g.id) elif oid and ocov: identity = float(g.attributes[id_attr][0]) coverage = float(g.attributes[cov_attr][0]) if identity < oid or coverage < ocov: bad.add(g.id) logging.debug("{0} bad accns marked.".format(len(bad))) fw = must_open(opts.outfile, "w") for g in gffdb.features_of_type(ptype, order_by=('seqid', 'start')): if ptype != otype: feats = list(gffdb.children(g, featuretype=otype, order_by=('start'))) ok_feats = [f for f in feats if f.id not in bad] if len(ok_feats) > 0: print(g, file=fw) for feat in ok_feats: print(feat, file=fw) for child in gffdb.children(feat, order_by=('start')): print(child, file=fw) else: if g.id not in bad: print(g, file=fw) for child in gffdb.children(g, order_by=('start')): print(child, file=fw) fw.close()
0.002895
def aside_view_declaration(self, view_name): """ Find and return a function object if one is an aside_view for the given view_name Aside methods declare their view provision via @XBlockAside.aside_for(view_name) This function finds those declarations for a block. Arguments: view_name (string): the name of the view requested. Returns: either the function or None """ if view_name in self._combined_asides: # pylint: disable=unsupported-membership-test return getattr(self, self._combined_asides[view_name]) # pylint: disable=unsubscriptable-object else: return None
0.008683
def ls(dataset_uri): """ List the overlays in the dataset. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) for overlay_name in dataset.list_overlay_names(): click.secho(overlay_name)
0.00463
def get_shape(self, ds_id, ds_info): """Return data array shape for item specified. """ var_path = ds_info.get('file_key', '{}'.format(ds_id.name)) if var_path + '/shape' not in self: # loading a scalar value shape = 1 else: shape = self[var_path + "/shape"] if "index" in ds_info: shape = shape[1:] if "pressure_index" in ds_info: shape = shape[:-1] return shape
0.003976
def _get(self, end_point, params=None, **kwargs): """Send a HTTP GET request to a Todoist API end-point. :param end_point: The Todoist API end-point. :type end_point: str :param params: The required request parameters. :type params: dict :param kwargs: Any optional parameters. :type kwargs: dict :return: The HTTP response to the request. :rtype: :class:`requests.Response` """ return self._request(requests.get, end_point, params, **kwargs)
0.003766
def categorize_by_attr(self, attribute): ''' Function to categorize a FileList by a File object attribute (eg. 'segment', 'ifo', 'description'). Parameters ----------- attribute : string File object attribute to categorize FileList Returns -------- keys : list A list of values for an attribute groups : list A list of FileLists ''' # need to sort FileList otherwise using groupby without sorting does # 'AAABBBCCDDAABB' -> ['AAA','BBB','CC','DD','AA','BB'] # and using groupby with sorting does # 'AAABBBCCDDAABB' -> ['AAAAA','BBBBB','CC','DD'] flist = sorted(self, key=attrgetter(attribute), reverse=True) # use groupby to create lists groups = [] keys = [] for k, g in groupby(flist, attrgetter(attribute)): groups.append(FileList(g)) keys.append(k) return keys, groups
0.001996
def _make_input(self, action, old_quat): """ Helper function that returns a dictionary with keys dpos, rotation from a raw input array. The first three elements are taken to be displacement in position, and a quaternion indicating the change in rotation with respect to @old_quat. """ return { "dpos": action[:3], # IK controller takes an absolute orientation in robot base frame "rotation": T.quat2mat(T.quat_multiply(old_quat, action[3:7])), }
0.007449
def _create_select_window_handler(self, window): " Return a mouse handler that selects the given window when clicking. " def handler(mouse_event): if mouse_event.event_type == MouseEventType.MOUSE_DOWN: self.pymux.arrangement.set_active_window(window) self.pymux.invalidate() else: return NotImplemented # Event not handled here. return handler
0.004535
def service(self, service): """ Sets the service of this TrustedCertificateInternalResp. Service name where the certificate is to be used. :param service: The service of this TrustedCertificateInternalResp. :type: str """ if service is None: raise ValueError("Invalid value for `service`, must not be `None`") allowed_values = ["lwm2m", "bootstrap"] if service not in allowed_values: raise ValueError( "Invalid value for `service` ({0}), must be one of {1}" .format(service, allowed_values) ) self._service = service
0.003003
def get_stp_mst_detail_output_msti_port_auto_edge(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") auto_edge = ET.SubElement(port, "auto-edge") auto_edge.text = kwargs.pop('auto_edge') callback = kwargs.pop('callback', self._callback) return callback(config)
0.002837
def kp_pan_set(self, viewer, event, data_x, data_y, msg=True): """Sets the pan position under the cursor.""" if self.canpan: self._panset(viewer, data_x, data_y, msg=msg) return True
0.009174
def com_google_fonts_check_metadata_match_name_familyname(family_metadata, font_metadata): """METADATA.pb: Check font name is the same as family name.""" if font_metadata.name != family_metadata.name: yield FAIL, ("METADATA.pb: {}: Family name \"{}\"" " does not match" " font name: \"{}\"").format(font_metadata.filename, family_metadata.name, font_metadata.name) else: yield PASS, "Font name is the same as family name."
0.008897
def getCountry(self, default=None): """Return the Country from the Physical or Postal Address """ physical_address = self.getPhysicalAddress().get("country", default) postal_address = self.getPostalAddress().get("country", default) return physical_address or postal_address
0.00639
def _read_hdr_file(ktlx_file): """Reads header of one KTLX file. Parameters ---------- ktlx_file : Path name of one of the ktlx files inside the directory (absolute path) Returns ------- dict dict with information about the file Notes ----- p.3: says long, but python-long requires 8 bytes, so we use f.read(4) GUID is correct, BUT little/big endian problems somewhere """ with ktlx_file.open('rb') as f: hdr = {} assert f.tell() == 0 hdr['file_guid'] = hexlify(f.read(16)) hdr['file_schema'], = unpack('<H', f.read(2)) if not hdr['file_schema'] in (1, 3, 7, 8, 9): raise NotImplementedError('Reading header not implemented for ' + 'file_schema ' + str(hdr['file_schema'])) hdr['base_schema'], = unpack('<H', f.read(2)) if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think raise NotImplementedError('Reading header not implemented for ' + 'base_schema ' + str(hdr['base_schema'])) hdr['creation_time'] = datetime.fromtimestamp(unpack('<i', f.read(4))[0]) hdr['patient_id'], = unpack('<i', f.read(4)) hdr['study_id'], = unpack('<i', f.read(4)) hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80))) hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80))) hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80))) hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80))) assert f.tell() == 352 if hdr['file_schema'] >= 7: hdr['sample_freq'], = unpack('<d', f.read(8)) n_chan, = unpack('<i', f.read(4)) hdr['num_channels'] = n_chan hdr['deltabits'], = unpack('<i', f.read(4)) hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'], f.read(hdr['num_channels'] * 4)) f.seek(4464) hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16)) hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16)) hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40))) hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10))) hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10))) hdr['discardbits'], = unpack('<i', f.read(4)) if hdr['file_schema'] >= 8: hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan] hdr['frequency_factor'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan] return hdr
0.000359
def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result)
0.001342
def _get_url(url): """Retrieve requested URL""" try: data = HTTP_SESSION.get(url, stream=True) data.raise_for_status() except requests.exceptions.RequestException as exc: raise FetcherException(exc) return data
0.003984
def _contentful_user_agent(self): """ Sets the X-Contentful-User-Agent header. """ header = {} from . import __version__ header['sdk'] = { 'name': 'contentful-management.py', 'version': __version__ } header['app'] = { 'name': self.application_name, 'version': self.application_version } header['integration'] = { 'name': self.integration_name, 'version': self.integration_version } header['platform'] = { 'name': 'python', 'version': platform.python_version() } os_name = platform.system() if os_name == 'Darwin': os_name = 'macOS' elif not os_name or os_name == 'Java': os_name = None elif os_name and os_name not in ['macOS', 'Windows']: os_name = 'Linux' header['os'] = { 'name': os_name, 'version': platform.release() } def format_header(key, values): header = "{0} {1}".format(key, values['name']) if values['version'] is not None: header = "{0}/{1}".format(header, values['version']) return "{0};".format(header) result = [] for k, values in header.items(): if not values['name']: continue result.append(format_header(k, values)) return ' '.join(result)
0.001338
def launch_shell(username, hostname, password, port=22): """ Launches an ssh shell """ if not username or not hostname or not password: return False with tempfile.NamedTemporaryFile() as tmpFile: os.system(sshCmdLine.format(password, tmpFile.name, username, hostname, port)) return True
0.002755
def _ensure_array_list(arrays): """Ensures that every element in a list is an instance of a numpy array.""" # Note: the isinstance test is needed below so that instances of FieldArray # are not converted to numpy arrays return [numpy.array(arr, ndmin=1) if not isinstance(arr, numpy.ndarray) else arr for arr in arrays]
0.002882
def characters(self, numberOfCharacters): """Returns characters at index + number of characters""" return self.code[self.index:self.index + numberOfCharacters]
0.011429
def search_all(self): '''a "show all" search that doesn't require a query''' # This should be your apis url for a search url = '...' # paginte get is what it sounds like, and what you want for multiple # pages of results results = self._paginate_get(url) if len(results) == 0: bot.info("No container collections found.") sys.exit(1) bot.info("Collections") # Here is how to create a simple table. You of course must parse your # custom result and form the fields in the table to be what you think # are important! rows = [] for result in results: if "containers" in result: for c in result['containers']: rows.append([ c['uri'], c['detail'] ]) bot.table(rows) return rows
0.004866
def _filterByPaddingNum(cls, iterable, num): """ Yield only path elements from iterable which have a frame padding that matches the given target padding number Args: iterable (collections.Iterable): num (int): Yields: str: """ _check = DISK_RE.match for item in iterable: # Add a filter for paths that don't match the frame # padding of a given number matches = _check(item) if not matches: if num <= 0: # Not a sequence pattern, but we were asked # to match on a zero padding yield item continue frame = matches.group(3) or '' if not frame: if num <= 0: # No frame value was parsed, but we were asked # to match on a zero padding yield item continue # We have a frame number if frame[0] == '0' or frame[:2] == '-0': if len(frame) == num: # A frame leading with '0' is explicitly # padded and can only be a match if its exactly # the target padding number yield item continue if len(frame) >= num: # A frame that does not lead with '0' can match # a padding width >= to the target padding number yield item continue
0.001256
def _parser_options(): """Parses the options and arguments from the command line.""" #We have two options: get some of the details from the config file, import argparse from acorn import base pdescr = "ACORN setup and custom configuration" parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr) for arg, options in script_options.items(): parser.add_argument(arg, **options) args = base.exhandler(examples, parser) if args is None: return return args
0.007435
def params(self, **kwargs): """ Specify query params to be used when executing the search. All the keyword arguments will override the current values. See https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search for all available parameters. Example:: s = Search() s = s.params(routing='user-1', preference='local') """ s = self._clone() s._params.update(kwargs) return s
0.003891
def setup_service(api_name, api_version, credentials=None): """Configures genomics API client. Args: api_name: Name of the Google API (for example: "genomics") api_version: Version of the API (for example: "v2alpha1") credentials: Credentials to be used for the gcloud API calls. Returns: A configured Google Genomics API client with appropriate credentials. """ if not credentials: credentials = oauth2client.client.GoogleCredentials.get_application_default( ) return apiclient.discovery.build( api_name, api_version, credentials=credentials)
0.008518
def visible_devices(self): """Unify all visible devices across all connected adapters Returns: dict: A dictionary mapping UUIDs to device information dictionaries """ devs = {} for device_id, adapters in self._devices.items(): dev = None max_signal = None best_adapter = None for adapter_id, devinfo in adapters.items(): connstring = "adapter/{0}/{1}".format(adapter_id, devinfo['connection_string']) if dev is None: dev = copy.deepcopy(devinfo) del dev['connection_string'] if 'adapters' not in dev: dev['adapters'] = [] best_adapter = adapter_id dev['adapters'].append((adapter_id, devinfo['signal_strength'], connstring)) if max_signal is None: max_signal = devinfo['signal_strength'] elif devinfo['signal_strength'] > max_signal: max_signal = devinfo['signal_strength'] best_adapter = adapter_id # If device has been seen in no adapters, it will get expired # don't return it if dev is None: continue dev['connection_string'] = "device/%x" % dev['uuid'] dev['adapters'] = sorted(dev['adapters'], key=lambda x: x[1], reverse=True) dev['best_adapter'] = best_adapter dev['signal_strength'] = max_signal devs[device_id] = dev return devs
0.003115
def query(self, query='', name='', sortlist='', columns='', limit=0, offset=0, style='Python'): """Query the table and return the result as a reference table. This method queries the table. It forms a `TaQL <../../doc/199.html>`_ command from the given arguments and executes it using the :func:`taql` function. The result is returned in a so-called reference table which references the selected columns and rows in the original table. Usually a reference table is temporary, but it can be made persistent by giving it a name. Note that a reference table is handled as any table, thus can be queried again. All arguments are optional, but at least one of `query`, `name`, `sortlist`, and `columns` should be used. See the `TaQL note <../../doc/199.html>`_ for the detailed description of the the arguments representing the various parts of a TaQL command. `query` The WHERE part of a TaQL command. `name` The name of the reference table if it is to be made persistent. `sortlist` The ORDERBY part of a TaQL command. It is a single string in which commas have to be used to separate sort keys. `columns` The columns to be selected (projection in data base terms). It is a single string in which commas have to be used to separate column names. Apart from column names, expressions can be given as well. `limit` If > 0, maximum number of rows to be selected. `offset` If > 0, ignore the first N matches. `style` The TaQL syntax style to be used (defaults to Python). """ if not query and not sortlist and not columns and \ limit <= 0 and offset <= 0: raise ValueError('No selection done (arguments query, ' + 'sortlist, columns, limit, and offset are empty)') command = 'select ' if columns: command += columns command += ' from $1' if query: command += ' where ' + query if sortlist: command += ' orderby ' + sortlist if limit > 0: command += ' limit %d' % limit if offset > 0: command += ' offset %d' % offset if name: command += ' giving ' + name return tablecommand(command, style, [self])
0.001198