text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def _do_create_keywords(args, kw): """This converts any arguments after the action argument into their equivalent keywords and adds them to the kw argument. """ v = kw.get('varlist', ()) # prevent varlist="FOO" from being interpreted as ['F', 'O', 'O'] if is_String(v): v = (v,) kw['varlist'] = tuple(v) if args: # turn positional args into equivalent keywords cmdstrfunc = args[0] if cmdstrfunc is None or is_String(cmdstrfunc): kw['cmdstr'] = cmdstrfunc elif callable(cmdstrfunc): kw['strfunction'] = cmdstrfunc else: raise SCons.Errors.UserError( 'Invalid command display variable type. ' 'You must either pass a string or a callback which ' 'accepts (target, source, env) as parameters.') if len(args) > 1: kw['varlist'] = tuple(SCons.Util.flatten(args[1:])) + kw['varlist'] if kw.get('strfunction', _null) is not _null \ and kw.get('cmdstr', _null) is not _null: raise SCons.Errors.UserError( 'Cannot have both strfunction and cmdstr args to Action()')
[ "def", "_do_create_keywords", "(", "args", ",", "kw", ")", ":", "v", "=", "kw", ".", "get", "(", "'varlist'", ",", "(", ")", ")", "# prevent varlist=\"FOO\" from being interpreted as ['F', 'O', 'O']", "if", "is_String", "(", "v", ")", ":", "v", "=", "(", "v", ",", ")", "kw", "[", "'varlist'", "]", "=", "tuple", "(", "v", ")", "if", "args", ":", "# turn positional args into equivalent keywords", "cmdstrfunc", "=", "args", "[", "0", "]", "if", "cmdstrfunc", "is", "None", "or", "is_String", "(", "cmdstrfunc", ")", ":", "kw", "[", "'cmdstr'", "]", "=", "cmdstrfunc", "elif", "callable", "(", "cmdstrfunc", ")", ":", "kw", "[", "'strfunction'", "]", "=", "cmdstrfunc", "else", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "'Invalid command display variable type. '", "'You must either pass a string or a callback which '", "'accepts (target, source, env) as parameters.'", ")", "if", "len", "(", "args", ")", ">", "1", ":", "kw", "[", "'varlist'", "]", "=", "tuple", "(", "SCons", ".", "Util", ".", "flatten", "(", "args", "[", "1", ":", "]", ")", ")", "+", "kw", "[", "'varlist'", "]", "if", "kw", ".", "get", "(", "'strfunction'", ",", "_null", ")", "is", "not", "_null", "and", "kw", ".", "get", "(", "'cmdstr'", ",", "_null", ")", "is", "not", "_null", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "'Cannot have both strfunction and cmdstr args to Action()'", ")" ]
44.384615
0.002545
def pixlen(text, widthlist, fontsize): """Calculate the length of text in pixels, given a list of (font-specific) glyph widths and the fontsize. Parameter 'widthlist' should have been created by 'doc._getCharWidths()'.""" pl = 0.0 for t in text: pl += widthlist[ord(t)] return pl * fontsize
[ "def", "pixlen", "(", "text", ",", "widthlist", ",", "fontsize", ")", ":", "pl", "=", "0.0", "for", "t", "in", "text", ":", "pl", "+=", "widthlist", "[", "ord", "(", "t", ")", "]", "return", "pl", "*", "fontsize" ]
39.375
0.003106
def copy(self, outpoint=None, stack_script=None, redeem_script=None, sequence=None): ''' TxIn -> TxIn ''' return TxIn( outpoint=outpoint if outpoint is not None else self.outpoint, stack_script=(stack_script if stack_script is not None else self.stack_script), redeem_script=(redeem_script if redeem_script is not None else self.redeem_script), sequence=sequence if sequence is not None else self.sequence)
[ "def", "copy", "(", "self", ",", "outpoint", "=", "None", ",", "stack_script", "=", "None", ",", "redeem_script", "=", "None", ",", "sequence", "=", "None", ")", ":", "return", "TxIn", "(", "outpoint", "=", "outpoint", "if", "outpoint", "is", "not", "None", "else", "self", ".", "outpoint", ",", "stack_script", "=", "(", "stack_script", "if", "stack_script", "is", "not", "None", "else", "self", ".", "stack_script", ")", ",", "redeem_script", "=", "(", "redeem_script", "if", "redeem_script", "is", "not", "None", "else", "self", ".", "redeem_script", ")", ",", "sequence", "=", "sequence", "if", "sequence", "is", "not", "None", "else", "self", ".", "sequence", ")" ]
45.083333
0.005435
def create(cls, scheduled_analysis, tags=None, json_report_objects=None, raw_report_objects=None, additional_metadata=None, analysis_date=None): """ Create a new report. For convenience :func:`~mass_api_client.resources.scheduled_analysis.ScheduledAnalysis.create_report` of class :class:`.ScheduledAnalysis` can be used instead. :param scheduled_analysis: The :class:`.ScheduledAnalysis` this report was created for :param tags: A list of strings :param json_report_objects: A dictionary of JSON reports, where the key is the object name. :param raw_report_objects: A dictionary of binary file reports, where the key is the file name. :param analysis_date: A datetime object of the time the report was generated. Defaults to current time. :return: The newly created report object """ if tags is None: tags = [] if additional_metadata is None: additional_metadata = {} if analysis_date is None: analysis_date = datetime.datetime.now() url = cls._creation_point.format(scheduled_analysis=scheduled_analysis.id) return cls._create(url=url, analysis_date=analysis_date, additional_json_files=json_report_objects, additional_binary_files=raw_report_objects, tags=tags, additional_metadata=additional_metadata, force_multipart=True)
[ "def", "create", "(", "cls", ",", "scheduled_analysis", ",", "tags", "=", "None", ",", "json_report_objects", "=", "None", ",", "raw_report_objects", "=", "None", ",", "additional_metadata", "=", "None", ",", "analysis_date", "=", "None", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "if", "additional_metadata", "is", "None", ":", "additional_metadata", "=", "{", "}", "if", "analysis_date", "is", "None", ":", "analysis_date", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "url", "=", "cls", ".", "_creation_point", ".", "format", "(", "scheduled_analysis", "=", "scheduled_analysis", ".", "id", ")", "return", "cls", ".", "_create", "(", "url", "=", "url", ",", "analysis_date", "=", "analysis_date", ",", "additional_json_files", "=", "json_report_objects", ",", "additional_binary_files", "=", "raw_report_objects", ",", "tags", "=", "tags", ",", "additional_metadata", "=", "additional_metadata", ",", "force_multipart", "=", "True", ")" ]
52.740741
0.008276
def parse_bamPEFragmentSizeDistribution(self): """Find bamPEFragmentSize output. Supports the --outRawFragmentLengths option""" self.deeptools_bamPEFragmentSizeDistribution = dict() for f in self.find_log_files('deeptools/bamPEFragmentSizeDistribution', filehandles=False): parsed_data = self.parseBamPEFDistributionFile(f) for k, v in parsed_data.items(): if k in self.deeptools_bamPEFragmentSizeDistribution: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_bamPEFragmentSizeDistribution[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='bamPEFragmentSizeDistribution') if len(self.deeptools_bamPEFragmentSizeDistribution) > 0: config = { 'id': 'fragment_size_distribution_plot', 'title': 'deeptools: Fragment Size Distribution Plot', 'ylab': 'Occurrence', 'xlab': 'Fragment Size (bp)', 'smooth_points': 50, 'xmax': 1000, 'xDecimals': False, 'tt_label': '<b>Fragment Size (bp) {point.x}</b>: {point.y} Occurrence', } self.add_section ( name = 'Fragment size distribution', anchor = 'fragment_size_distribution', description="Distribution of paired-end fragment sizes", plot=linegraph.plot(self.deeptools_bamPEFragmentSizeDistribution, config) ) return len(self.deeptools_bamPEFragmentSizeDistribution)
[ "def", "parse_bamPEFragmentSizeDistribution", "(", "self", ")", ":", "self", ".", "deeptools_bamPEFragmentSizeDistribution", "=", "dict", "(", ")", "for", "f", "in", "self", ".", "find_log_files", "(", "'deeptools/bamPEFragmentSizeDistribution'", ",", "filehandles", "=", "False", ")", ":", "parsed_data", "=", "self", ".", "parseBamPEFDistributionFile", "(", "f", ")", "for", "k", ",", "v", "in", "parsed_data", ".", "items", "(", ")", ":", "if", "k", "in", "self", ".", "deeptools_bamPEFragmentSizeDistribution", ":", "log", ".", "warning", "(", "\"Replacing duplicate sample {}.\"", ".", "format", "(", "k", ")", ")", "self", ".", "deeptools_bamPEFragmentSizeDistribution", "[", "k", "]", "=", "v", "if", "len", "(", "parsed_data", ")", ">", "0", ":", "self", ".", "add_data_source", "(", "f", ",", "section", "=", "'bamPEFragmentSizeDistribution'", ")", "if", "len", "(", "self", ".", "deeptools_bamPEFragmentSizeDistribution", ")", ">", "0", ":", "config", "=", "{", "'id'", ":", "'fragment_size_distribution_plot'", ",", "'title'", ":", "'deeptools: Fragment Size Distribution Plot'", ",", "'ylab'", ":", "'Occurrence'", ",", "'xlab'", ":", "'Fragment Size (bp)'", ",", "'smooth_points'", ":", "50", ",", "'xmax'", ":", "1000", ",", "'xDecimals'", ":", "False", ",", "'tt_label'", ":", "'<b>Fragment Size (bp) {point.x}</b>: {point.y} Occurrence'", ",", "}", "self", ".", "add_section", "(", "name", "=", "'Fragment size distribution'", ",", "anchor", "=", "'fragment_size_distribution'", ",", "description", "=", "\"Distribution of paired-end fragment sizes\"", ",", "plot", "=", "linegraph", ".", "plot", "(", "self", ".", "deeptools_bamPEFragmentSizeDistribution", ",", "config", ")", ")", "return", "len", "(", "self", ".", "deeptools_bamPEFragmentSizeDistribution", ")" ]
49.875
0.007376
def is_email_simple(value): """Return True if value looks like an email address.""" # An @ must be in the middle of the value. if '@' not in value or value.startswith('@') or value.endswith('@'): return False try: p1, p2 = value.split('@') except ValueError: # value contains more than one @. return False # Dot must be in p2 (e.g. example.com) if '.' not in p2 or p2.startswith('.'): return False return True
[ "def", "is_email_simple", "(", "value", ")", ":", "# An @ must be in the middle of the value.", "if", "'@'", "not", "in", "value", "or", "value", ".", "startswith", "(", "'@'", ")", "or", "value", ".", "endswith", "(", "'@'", ")", ":", "return", "False", "try", ":", "p1", ",", "p2", "=", "value", ".", "split", "(", "'@'", ")", "except", "ValueError", ":", "# value contains more than one @.", "return", "False", "# Dot must be in p2 (e.g. example.com)", "if", "'.'", "not", "in", "p2", "or", "p2", ".", "startswith", "(", "'.'", ")", ":", "return", "False", "return", "True" ]
37.142857
0.003752
def edges_to_coo(edges, count=None, data=None): """ Given an edge list, return a boolean scipy.sparse.coo_matrix representing the edges in matrix form. Parameters ------------ edges : (n,2) int Edges of a graph count : int The total number of nodes in the graph if None: count = edges.max() + 1 data : (n,) any Assign data to each edge, if None will be bool True for each specified edge Returns ------------ matrix: (count, count) scipy.sparse.coo_matrix Sparse COO """ edges = np.asanyarray(edges, dtype=np.int64) if not (len(edges) == 0 or util.is_shape(edges, (-1, 2))): raise ValueError('edges must be (n,2)!') # if count isn't specified just set it to largest # value referenced in edges if count is None: count = edges.max() + 1 count = int(count) # if no data is specified set every specified edge # to True if data is None: data = np.ones(len(edges), dtype=np.bool) matrix = coo_matrix((data, edges.T), dtype=data.dtype, shape=(count, count)) return matrix
[ "def", "edges_to_coo", "(", "edges", ",", "count", "=", "None", ",", "data", "=", "None", ")", ":", "edges", "=", "np", ".", "asanyarray", "(", "edges", ",", "dtype", "=", "np", ".", "int64", ")", "if", "not", "(", "len", "(", "edges", ")", "==", "0", "or", "util", ".", "is_shape", "(", "edges", ",", "(", "-", "1", ",", "2", ")", ")", ")", ":", "raise", "ValueError", "(", "'edges must be (n,2)!'", ")", "# if count isn't specified just set it to largest", "# value referenced in edges", "if", "count", "is", "None", ":", "count", "=", "edges", ".", "max", "(", ")", "+", "1", "count", "=", "int", "(", "count", ")", "# if no data is specified set every specified edge", "# to True", "if", "data", "is", "None", ":", "data", "=", "np", ".", "ones", "(", "len", "(", "edges", ")", ",", "dtype", "=", "np", ".", "bool", ")", "matrix", "=", "coo_matrix", "(", "(", "data", ",", "edges", ".", "T", ")", ",", "dtype", "=", "data", ".", "dtype", ",", "shape", "=", "(", "count", ",", "count", ")", ")", "return", "matrix" ]
27.829268
0.000847
def logpdf_link(self, link_f, y, Y_metadata=None): """ Log Likelihood Function given link(f) .. math:: \\ln p(y_{i}|\lambda(f_{i})) = -\\lambda(f_{i}) + y_{i}\\log \\lambda(f_{i}) - \\log y_{i}! :param link_f: latent variables (link(f)) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in poisson distribution :returns: likelihood evaluated for this point :rtype: float """ return -link_f + y*np.log(link_f) - special.gammaln(y+1)
[ "def", "logpdf_link", "(", "self", ",", "link_f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "return", "-", "link_f", "+", "y", "*", "np", ".", "log", "(", "link_f", ")", "-", "special", ".", "gammaln", "(", "y", "+", "1", ")" ]
34.235294
0.006689
def raise_msg_to_str(msg): """msg is a return arg from a raise. Join with new lines""" if not is_string_like(msg): msg = '\n'.join(map(str, msg)) return msg
[ "def", "raise_msg_to_str", "(", "msg", ")", ":", "if", "not", "is_string_like", "(", "msg", ")", ":", "msg", "=", "'\\n'", ".", "join", "(", "map", "(", "str", ",", "msg", ")", ")", "return", "msg" ]
34.6
0.00565
def parse_selectors(model, fields=None, exclude=None, key_map=None, **options): """Validates fields are valid and maps pseudo-fields to actual fields for a given model class. """ fields = fields or DEFAULT_SELECTORS exclude = exclude or () key_map = key_map or {} validated = [] for alias in fields: # Map the output key name to the actual field/accessor name for # the model actual = key_map.get(alias, alias) # Validate the field exists cleaned = resolver.get_field(model, actual) if cleaned is None: raise AttributeError('The "{0}" attribute could not be found ' 'on the model "{1}"'.format(actual, model)) # Mapped value, so use the original name listed in `fields` if type(cleaned) is list: validated.extend(cleaned) elif alias != actual: validated.append(alias) else: validated.append(cleaned) return tuple([x for x in validated if x not in exclude])
[ "def", "parse_selectors", "(", "model", ",", "fields", "=", "None", ",", "exclude", "=", "None", ",", "key_map", "=", "None", ",", "*", "*", "options", ")", ":", "fields", "=", "fields", "or", "DEFAULT_SELECTORS", "exclude", "=", "exclude", "or", "(", ")", "key_map", "=", "key_map", "or", "{", "}", "validated", "=", "[", "]", "for", "alias", "in", "fields", ":", "# Map the output key name to the actual field/accessor name for", "# the model", "actual", "=", "key_map", ".", "get", "(", "alias", ",", "alias", ")", "# Validate the field exists", "cleaned", "=", "resolver", ".", "get_field", "(", "model", ",", "actual", ")", "if", "cleaned", "is", "None", ":", "raise", "AttributeError", "(", "'The \"{0}\" attribute could not be found '", "'on the model \"{1}\"'", ".", "format", "(", "actual", ",", "model", ")", ")", "# Mapped value, so use the original name listed in `fields`", "if", "type", "(", "cleaned", ")", "is", "list", ":", "validated", ".", "extend", "(", "cleaned", ")", "elif", "alias", "!=", "actual", ":", "validated", ".", "append", "(", "alias", ")", "else", ":", "validated", ".", "append", "(", "cleaned", ")", "return", "tuple", "(", "[", "x", "for", "x", "in", "validated", "if", "x", "not", "in", "exclude", "]", ")" ]
34.3
0.000945
def get_formset(self): """Provide the formset corresponding to this DataTable. Use this to validate the formset and to get the submitted data back. """ if self._formset is None: self._formset = self.formset_class( self.request.POST or None, initial=self._get_formset_data(), prefix=self._meta.name) return self._formset
[ "def", "get_formset", "(", "self", ")", ":", "if", "self", ".", "_formset", "is", "None", ":", "self", ".", "_formset", "=", "self", ".", "formset_class", "(", "self", ".", "request", ".", "POST", "or", "None", ",", "initial", "=", "self", ".", "_get_formset_data", "(", ")", ",", "prefix", "=", "self", ".", "_meta", ".", "name", ")", "return", "self", ".", "_formset" ]
37.272727
0.004762
def get_conn(self): """ Returns a Google Cloud Storage service object. """ if not self._conn: self._conn = storage.Client(credentials=self._get_credentials()) return self._conn
[ "def", "get_conn", "(", "self", ")", ":", "if", "not", "self", ".", "_conn", ":", "self", ".", "_conn", "=", "storage", ".", "Client", "(", "credentials", "=", "self", ".", "_get_credentials", "(", ")", ")", "return", "self", ".", "_conn" ]
27.75
0.008734
def __update_offsets(self, fileobj, atoms, delta, offset): """Update offset tables in all 'stco' and 'co64' atoms.""" if delta == 0: return moov = atoms[b"moov"] for atom in moov.findall(b'stco', True): self.__update_offset_table(fileobj, ">%dI", atom, delta, offset) for atom in moov.findall(b'co64', True): self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset) try: for atom in atoms[b"moof"].findall(b'tfhd', True): self.__update_tfhd(fileobj, atom, delta, offset) except KeyError: pass
[ "def", "__update_offsets", "(", "self", ",", "fileobj", ",", "atoms", ",", "delta", ",", "offset", ")", ":", "if", "delta", "==", "0", ":", "return", "moov", "=", "atoms", "[", "b\"moov\"", "]", "for", "atom", "in", "moov", ".", "findall", "(", "b'stco'", ",", "True", ")", ":", "self", ".", "__update_offset_table", "(", "fileobj", ",", "\">%dI\"", ",", "atom", ",", "delta", ",", "offset", ")", "for", "atom", "in", "moov", ".", "findall", "(", "b'co64'", ",", "True", ")", ":", "self", ".", "__update_offset_table", "(", "fileobj", ",", "\">%dQ\"", ",", "atom", ",", "delta", ",", "offset", ")", "try", ":", "for", "atom", "in", "atoms", "[", "b\"moof\"", "]", ".", "findall", "(", "b'tfhd'", ",", "True", ")", ":", "self", ".", "__update_tfhd", "(", "fileobj", ",", "atom", ",", "delta", ",", "offset", ")", "except", "KeyError", ":", "pass" ]
44.214286
0.003165
def _var_uint_field_handler(handler, ctx): """Handler co-routine for variable unsigned integer fields that. Invokes the given ``handler`` function with the read field and context, then immediately yields to the resulting co-routine. """ _, self = yield queue = ctx.queue value = 0 while True: if len(queue) == 0: # We don't know when the field ends, so read at least one byte. yield ctx.read_data_transition(1, self) octet = queue.read_byte() value <<= _VAR_INT_VALUE_BITS value |= octet & _VAR_INT_VALUE_MASK if octet & _VAR_INT_SIGNAL_MASK: break yield ctx.immediate_transition(handler(value, ctx))
[ "def", "_var_uint_field_handler", "(", "handler", ",", "ctx", ")", ":", "_", ",", "self", "=", "yield", "queue", "=", "ctx", ".", "queue", "value", "=", "0", "while", "True", ":", "if", "len", "(", "queue", ")", "==", "0", ":", "# We don't know when the field ends, so read at least one byte.", "yield", "ctx", ".", "read_data_transition", "(", "1", ",", "self", ")", "octet", "=", "queue", ".", "read_byte", "(", ")", "value", "<<=", "_VAR_INT_VALUE_BITS", "value", "|=", "octet", "&", "_VAR_INT_VALUE_MASK", "if", "octet", "&", "_VAR_INT_SIGNAL_MASK", ":", "break", "yield", "ctx", ".", "immediate_transition", "(", "handler", "(", "value", ",", "ctx", ")", ")" ]
36.578947
0.001403
def update_last_wm_layers(self, service_id, num_layers=10): """ Update and index the last added and deleted layers (num_layers) in WorldMap service. """ from hypermap.aggregator.models import Service LOGGER.debug( 'Updating the index the last %s added and %s deleted layers in WorldMap service' % (num_layers, num_layers) ) service = Service.objects.get(id=service_id) # TODO raise error if service type is not WM type if service.type == 'Hypermap:WorldMapLegacy': from hypermap.aggregator.models import update_layers_wm_legacy as update_layers_wm if service.type == 'Hypermap:WorldMap': from hypermap.aggregator.models import update_layers_geonode_wm as update_layers_wm update_layers_wm(service, num_layers) # Remove in search engine last num_layers that were deleted LOGGER.debug('Removing the index for the last %s deleted layers' % num_layers) layer_to_unindex = service.layer_set.filter(was_deleted=True).order_by('-last_updated')[0:num_layers] for layer in layer_to_unindex: if not settings.REGISTRY_SKIP_CELERY: unindex_layer(layer.id, use_cache=True) else: unindex_layer(layer.id) # Add/Update in search engine last num_layers that were added LOGGER.debug('Adding/Updating the index for the last %s added layers' % num_layers) layer_to_index = service.layer_set.filter(was_deleted=False).order_by('-last_updated')[0:num_layers] for layer in layer_to_index: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
[ "def", "update_last_wm_layers", "(", "self", ",", "service_id", ",", "num_layers", "=", "10", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Service", "LOGGER", ".", "debug", "(", "'Updating the index the last %s added and %s deleted layers in WorldMap service'", "%", "(", "num_layers", ",", "num_layers", ")", ")", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "# TODO raise error if service type is not WM type", "if", "service", ".", "type", "==", "'Hypermap:WorldMapLegacy'", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "update_layers_wm_legacy", "as", "update_layers_wm", "if", "service", ".", "type", "==", "'Hypermap:WorldMap'", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "update_layers_geonode_wm", "as", "update_layers_wm", "update_layers_wm", "(", "service", ",", "num_layers", ")", "# Remove in search engine last num_layers that were deleted", "LOGGER", ".", "debug", "(", "'Removing the index for the last %s deleted layers'", "%", "num_layers", ")", "layer_to_unindex", "=", "service", ".", "layer_set", ".", "filter", "(", "was_deleted", "=", "True", ")", ".", "order_by", "(", "'-last_updated'", ")", "[", "0", ":", "num_layers", "]", "for", "layer", "in", "layer_to_unindex", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "True", ")", "else", ":", "unindex_layer", "(", "layer", ".", "id", ")", "# Add/Update in search engine last num_layers that were added", "LOGGER", ".", "debug", "(", "'Adding/Updating the index for the last %s added layers'", "%", "num_layers", ")", "layer_to_index", "=", "service", ".", "layer_set", ".", "filter", "(", "was_deleted", "=", "False", ")", ".", "order_by", "(", "'-last_updated'", ")", "[", "0", ":", "num_layers", "]", "for", "layer", "in", "layer_to_index", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "True", ")", "else", ":", "index_layer", "(", "layer", ".", "id", ")" ]
45.5
0.00538
def _initialize_transactions_queues(self, chain_state: ChainState): """Initialize the pending transaction queue from the previous run. Note: This will only send the transactions which don't have their side-effects applied. Transactions which another node may have sent already will be detected by the alarm task's first run and cleared from the queue (e.g. A monitoring service update transfer). """ assert self.alarm.is_primed(), f'AlarmTask not primed. node:{self!r}' pending_transactions = views.get_pending_transactions(chain_state) log.debug( 'Processing pending transactions', num_pending_transactions=len(pending_transactions), node=pex(self.address), ) for transaction in pending_transactions: try: self.raiden_event_handler.on_raiden_event(self, transaction) except RaidenRecoverableError as e: log.error(str(e)) except InvalidDBData: raise except RaidenUnrecoverableError as e: log_unrecoverable = ( self.config['environment_type'] == Environment.PRODUCTION and not self.config['unrecoverable_error_should_crash'] ) if log_unrecoverable: log.error(str(e)) else: raise
[ "def", "_initialize_transactions_queues", "(", "self", ",", "chain_state", ":", "ChainState", ")", ":", "assert", "self", ".", "alarm", ".", "is_primed", "(", ")", ",", "f'AlarmTask not primed. node:{self!r}'", "pending_transactions", "=", "views", ".", "get_pending_transactions", "(", "chain_state", ")", "log", ".", "debug", "(", "'Processing pending transactions'", ",", "num_pending_transactions", "=", "len", "(", "pending_transactions", ")", ",", "node", "=", "pex", "(", "self", ".", "address", ")", ",", ")", "for", "transaction", "in", "pending_transactions", ":", "try", ":", "self", ".", "raiden_event_handler", ".", "on_raiden_event", "(", "self", ",", "transaction", ")", "except", "RaidenRecoverableError", "as", "e", ":", "log", ".", "error", "(", "str", "(", "e", ")", ")", "except", "InvalidDBData", ":", "raise", "except", "RaidenUnrecoverableError", "as", "e", ":", "log_unrecoverable", "=", "(", "self", ".", "config", "[", "'environment_type'", "]", "==", "Environment", ".", "PRODUCTION", "and", "not", "self", ".", "config", "[", "'unrecoverable_error_should_crash'", "]", ")", "if", "log_unrecoverable", ":", "log", ".", "error", "(", "str", "(", "e", ")", ")", "else", ":", "raise" ]
41
0.002042
def p_enum_constant(t): """enum_constant : ID EQUALS value""" global name_dict, error_occurred id = t[1] value = t[3] lineno = t.lineno(1) if id_unique(id, 'enum', lineno): info = name_dict[id] = const_info(id, value, lineno, enum=True) if not (value[0].isdigit() or value[0] == '-'): # We have a name instead of a constant, make sure it is defined if value not in name_dict: error_occurred = True print("ERROR - can't derefence {0:s} at line {1:s}".format( value, lineno)) elif not isinstance(name_dict[value], const_info): error_occurred = True print( "ERROR - reference to {0:s} at line {1:s} is not a constant". format(value, lineno)) else: info.positive = name_dict[value].positive t[0] = [info] else: t[0] = []
[ "def", "p_enum_constant", "(", "t", ")", ":", "global", "name_dict", ",", "error_occurred", "id", "=", "t", "[", "1", "]", "value", "=", "t", "[", "3", "]", "lineno", "=", "t", ".", "lineno", "(", "1", ")", "if", "id_unique", "(", "id", ",", "'enum'", ",", "lineno", ")", ":", "info", "=", "name_dict", "[", "id", "]", "=", "const_info", "(", "id", ",", "value", ",", "lineno", ",", "enum", "=", "True", ")", "if", "not", "(", "value", "[", "0", "]", ".", "isdigit", "(", ")", "or", "value", "[", "0", "]", "==", "'-'", ")", ":", "# We have a name instead of a constant, make sure it is defined", "if", "value", "not", "in", "name_dict", ":", "error_occurred", "=", "True", "print", "(", "\"ERROR - can't derefence {0:s} at line {1:s}\"", ".", "format", "(", "value", ",", "lineno", ")", ")", "elif", "not", "isinstance", "(", "name_dict", "[", "value", "]", ",", "const_info", ")", ":", "error_occurred", "=", "True", "print", "(", "\"ERROR - reference to {0:s} at line {1:s} is not a constant\"", ".", "format", "(", "value", ",", "lineno", ")", ")", "else", ":", "info", ".", "positive", "=", "name_dict", "[", "value", "]", ".", "positive", "t", "[", "0", "]", "=", "[", "info", "]", "else", ":", "t", "[", "0", "]", "=", "[", "]" ]
39.208333
0.002075
def addFASTAFilteringCommandLineOptions(parser): """ Add standard FASTA filtering command-line options to an argparse parser. These are options that can be used to select or omit entire FASTA records, NOT options that change them (for that see addFASTAEditingCommandLineOptions). @param parser: An C{argparse.ArgumentParser} instance. """ parser.add_argument( '--minLength', type=int, help='The minimum sequence length') parser.add_argument( '--maxLength', type=int, help='The maximum sequence length') parser.add_argument( '--whitelist', action='append', help='Sequence titles (ids) that should be whitelisted') parser.add_argument( '--blacklist', action='append', help='Sequence titles (ids) that should be blacklisted') parser.add_argument( '--whitelistFile', help=('The name of a file that contains sequence titles (ids) that ' 'should be whitelisted, one per line')) parser.add_argument( '--blacklistFile', help=('The name of a file that contains sequence titles (ids) that ' 'should be blacklisted, one per line')) parser.add_argument( '--titleRegex', help='A regex that sequence titles (ids) must match.') parser.add_argument( '--negativeTitleRegex', help='A regex that sequence titles (ids) must not match.') # A mutually exclusive group for --keepSequences and --removeSequences. group = parser.add_mutually_exclusive_group() group.add_argument( '--keepSequences', help=('Specify (1-based) ranges of sequence numbers that should be ' 'kept. E.g., --keepSequences 1-3,5 will output just the 1st, ' '2nd, 3rd, and 5th sequences. All others will be omitted.')) group.add_argument( '--removeSequences', help=('Specify (1-based) ranges of sequence numbers that should be ' 'removed. E.g., --removeSequences 1-3,5 will output all but the ' '1st, 2nd, 3rd, and 5th sequences. All others will be ouput.')) parser.add_argument( '--head', type=int, metavar='N', help='Only the first N sequences will be printed.') parser.add_argument( '--removeDuplicates', action='store_true', default=False, help=('Duplicate reads will be removed, based only on ' 'sequence identity. The first occurrence is kept.')) parser.add_argument( '--removeDuplicatesById', action='store_true', default=False, help=('Duplicate reads will be removed, based only on ' 'read id. The first occurrence is kept.')) # See the docstring for dark.reads.Reads.filter for more detail on # randomSubset. parser.add_argument( '--randomSubset', type=int, help=('An integer giving the number of sequences that should be kept. ' 'These will be selected at random.')) # See the docstring for dark.reads.Reads.filter for more detail on # trueLength. parser.add_argument( '--trueLength', type=int, help=('The number of reads in the FASTA input. Only to be used with ' 'randomSubset')) parser.add_argument( '--sampleFraction', type=float, help=('A [0.0, 1.0] C{float} indicating a fraction of the reads that ' 'should be allowed to pass through the filter. The sample size ' 'will only be approximately the product of the sample fraction ' 'and the number of reads. The sample is taken at random.')) parser.add_argument( '--sequenceNumbersFile', help=('A file of (1-based) sequence numbers to retain. Numbers must ' 'be one per line.'))
[ "def", "addFASTAFilteringCommandLineOptions", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--minLength'", ",", "type", "=", "int", ",", "help", "=", "'The minimum sequence length'", ")", "parser", ".", "add_argument", "(", "'--maxLength'", ",", "type", "=", "int", ",", "help", "=", "'The maximum sequence length'", ")", "parser", ".", "add_argument", "(", "'--whitelist'", ",", "action", "=", "'append'", ",", "help", "=", "'Sequence titles (ids) that should be whitelisted'", ")", "parser", ".", "add_argument", "(", "'--blacklist'", ",", "action", "=", "'append'", ",", "help", "=", "'Sequence titles (ids) that should be blacklisted'", ")", "parser", ".", "add_argument", "(", "'--whitelistFile'", ",", "help", "=", "(", "'The name of a file that contains sequence titles (ids) that '", "'should be whitelisted, one per line'", ")", ")", "parser", ".", "add_argument", "(", "'--blacklistFile'", ",", "help", "=", "(", "'The name of a file that contains sequence titles (ids) that '", "'should be blacklisted, one per line'", ")", ")", "parser", ".", "add_argument", "(", "'--titleRegex'", ",", "help", "=", "'A regex that sequence titles (ids) must match.'", ")", "parser", ".", "add_argument", "(", "'--negativeTitleRegex'", ",", "help", "=", "'A regex that sequence titles (ids) must not match.'", ")", "# A mutually exclusive group for --keepSequences and --removeSequences.", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "group", ".", "add_argument", "(", "'--keepSequences'", ",", "help", "=", "(", "'Specify (1-based) ranges of sequence numbers that should be '", "'kept. E.g., --keepSequences 1-3,5 will output just the 1st, '", "'2nd, 3rd, and 5th sequences. All others will be omitted.'", ")", ")", "group", ".", "add_argument", "(", "'--removeSequences'", ",", "help", "=", "(", "'Specify (1-based) ranges of sequence numbers that should be '", "'removed. E.g., --removeSequences 1-3,5 will output all but the '", "'1st, 2nd, 3rd, and 5th sequences. All others will be ouput.'", ")", ")", "parser", ".", "add_argument", "(", "'--head'", ",", "type", "=", "int", ",", "metavar", "=", "'N'", ",", "help", "=", "'Only the first N sequences will be printed.'", ")", "parser", ".", "add_argument", "(", "'--removeDuplicates'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "(", "'Duplicate reads will be removed, based only on '", "'sequence identity. The first occurrence is kept.'", ")", ")", "parser", ".", "add_argument", "(", "'--removeDuplicatesById'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "(", "'Duplicate reads will be removed, based only on '", "'read id. The first occurrence is kept.'", ")", ")", "# See the docstring for dark.reads.Reads.filter for more detail on", "# randomSubset.", "parser", ".", "add_argument", "(", "'--randomSubset'", ",", "type", "=", "int", ",", "help", "=", "(", "'An integer giving the number of sequences that should be kept. '", "'These will be selected at random.'", ")", ")", "# See the docstring for dark.reads.Reads.filter for more detail on", "# trueLength.", "parser", ".", "add_argument", "(", "'--trueLength'", ",", "type", "=", "int", ",", "help", "=", "(", "'The number of reads in the FASTA input. Only to be used with '", "'randomSubset'", ")", ")", "parser", ".", "add_argument", "(", "'--sampleFraction'", ",", "type", "=", "float", ",", "help", "=", "(", "'A [0.0, 1.0] C{float} indicating a fraction of the reads that '", "'should be allowed to pass through the filter. The sample size '", "'will only be approximately the product of the sample fraction '", "'and the number of reads. The sample is taken at random.'", ")", ")", "parser", ".", "add_argument", "(", "'--sequenceNumbersFile'", ",", "help", "=", "(", "'A file of (1-based) sequence numbers to retain. Numbers must '", "'be one per line.'", ")", ")" ]
38.072165
0.000264
def begin(self): """Enter a transaction explicitly. No data will be written until the transaction has been committed. """ if not hasattr(self.local, 'tx'): self.local.tx = [] self.local.tx.append(self.executable.begin())
[ "def", "begin", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "local", ",", "'tx'", ")", ":", "self", ".", "local", ".", "tx", "=", "[", "]", "self", ".", "local", ".", "tx", ".", "append", "(", "self", ".", "executable", ".", "begin", "(", ")", ")" ]
33.25
0.007326
def Expand(self): """Reads the contents of the current node and the full subtree. It then makes the subtree available until the next xmlTextReaderRead() call """ ret = libxml2mod.xmlTextReaderExpand(self._o) if ret is None:raise treeError('xmlTextReaderExpand() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "Expand", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlTextReaderExpand", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlTextReaderExpand() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
45.375
0.010811
def GetComponentsFromGT(self, modes, innode): """Obtain components from graph table for the given observation mode keywords and starting node. .. note:: This prints extra information to screen if ``pysynphot.tables.DEBUG`` is set to `True`. Parameters ---------- modes : list of str List of individual keywords within the observation mode. innode : int Starting node, usually 1. Returns ------- components, thcomponents : list of str List of optical and thermal component names. Raises ------ KeyError No matches found for one of the keywords. ValueError Incomplete observation mode or unused keyword(s) detected. """ components = [] thcomponents = [] outnode = 0 inmodes=set(modes) used_modes=set() count = 0 while outnode >= 0: if (DEBUG and (outnode < 0)): print("outnode == %d: stop condition"%outnode) previous_outnode = outnode nodes = N.where(self.innodes == innode) # If there are no entries with this innode, we're done if nodes[0].size == 0: if DEBUG: print("no such innode %d: stop condition"%innode) #return (components,thcomponents) break # Find the entry corresponding to the component named # 'default', bacause thats the one we'll use if we don't # match anything in the modes list defaultindex = N.where(self.keywords[nodes] =='default') if 'default' in self.keywords[nodes]: dfi=N.where(self.keywords[nodes] == 'default')[0][0] outnode = self.outnodes[nodes[0][dfi]] component = self.compnames[nodes[0][dfi]] thcomponent = self.thcompnames[nodes[0][dfi]] used_default=True else: #There's no default, so fail if you don't match anything # in the keyword matching step. outnode = -2 component = thcomponent = None # Now try and match something from the modes list for mode in modes: if mode in self.keywords[nodes]: used_modes.add(mode) index = N.where(self.keywords[nodes]==mode) if len(index[0])>1: raise KeyError('%d matches found for %s'%(len(index[0]),mode)) idx=index[0][0] component = self.compnames[nodes[0][idx]] thcomponent = self.thcompnames[nodes[0][idx]] outnode = self.outnodes[nodes[0][idx]] used_default=False if DEBUG: print("Innode %d Outnode %d Compname %s"%(innode, outnode, component)) components.append(component) thcomponents.append(thcomponent) innode = outnode if outnode == previous_outnode: if DEBUG: print("Innode: %d Outnode:%d Used default: %s"%(innode, outnode,used_default)) count += 1 if count > 3: if DEBUG: print("same outnode %d > 3 times: stop condition"%outnode) break if (outnode < 0): if DEBUG: print("outnode == %d: stop condition"%outnode) raise ValueError("Incomplete obsmode %s"%str(modes)) #Check for unused modes if inmodes != used_modes: unused=str(inmodes.difference(used_modes)) raise ValueError("Warning: unused keywords %s"%unused) return (components,thcomponents)
[ "def", "GetComponentsFromGT", "(", "self", ",", "modes", ",", "innode", ")", ":", "components", "=", "[", "]", "thcomponents", "=", "[", "]", "outnode", "=", "0", "inmodes", "=", "set", "(", "modes", ")", "used_modes", "=", "set", "(", ")", "count", "=", "0", "while", "outnode", ">=", "0", ":", "if", "(", "DEBUG", "and", "(", "outnode", "<", "0", ")", ")", ":", "print", "(", "\"outnode == %d: stop condition\"", "%", "outnode", ")", "previous_outnode", "=", "outnode", "nodes", "=", "N", ".", "where", "(", "self", ".", "innodes", "==", "innode", ")", "# If there are no entries with this innode, we're done", "if", "nodes", "[", "0", "]", ".", "size", "==", "0", ":", "if", "DEBUG", ":", "print", "(", "\"no such innode %d: stop condition\"", "%", "innode", ")", "#return (components,thcomponents)", "break", "# Find the entry corresponding to the component named", "# 'default', bacause thats the one we'll use if we don't", "# match anything in the modes list", "defaultindex", "=", "N", ".", "where", "(", "self", ".", "keywords", "[", "nodes", "]", "==", "'default'", ")", "if", "'default'", "in", "self", ".", "keywords", "[", "nodes", "]", ":", "dfi", "=", "N", ".", "where", "(", "self", ".", "keywords", "[", "nodes", "]", "==", "'default'", ")", "[", "0", "]", "[", "0", "]", "outnode", "=", "self", ".", "outnodes", "[", "nodes", "[", "0", "]", "[", "dfi", "]", "]", "component", "=", "self", ".", "compnames", "[", "nodes", "[", "0", "]", "[", "dfi", "]", "]", "thcomponent", "=", "self", ".", "thcompnames", "[", "nodes", "[", "0", "]", "[", "dfi", "]", "]", "used_default", "=", "True", "else", ":", "#There's no default, so fail if you don't match anything", "# in the keyword matching step.", "outnode", "=", "-", "2", "component", "=", "thcomponent", "=", "None", "# Now try and match something from the modes list", "for", "mode", "in", "modes", ":", "if", "mode", "in", "self", ".", "keywords", "[", "nodes", "]", ":", "used_modes", ".", "add", "(", "mode", ")", "index", "=", "N", ".", "where", "(", "self", ".", "keywords", "[", "nodes", "]", "==", "mode", ")", "if", "len", "(", "index", "[", "0", "]", ")", ">", "1", ":", "raise", "KeyError", "(", "'%d matches found for %s'", "%", "(", "len", "(", "index", "[", "0", "]", ")", ",", "mode", ")", ")", "idx", "=", "index", "[", "0", "]", "[", "0", "]", "component", "=", "self", ".", "compnames", "[", "nodes", "[", "0", "]", "[", "idx", "]", "]", "thcomponent", "=", "self", ".", "thcompnames", "[", "nodes", "[", "0", "]", "[", "idx", "]", "]", "outnode", "=", "self", ".", "outnodes", "[", "nodes", "[", "0", "]", "[", "idx", "]", "]", "used_default", "=", "False", "if", "DEBUG", ":", "print", "(", "\"Innode %d Outnode %d Compname %s\"", "%", "(", "innode", ",", "outnode", ",", "component", ")", ")", "components", ".", "append", "(", "component", ")", "thcomponents", ".", "append", "(", "thcomponent", ")", "innode", "=", "outnode", "if", "outnode", "==", "previous_outnode", ":", "if", "DEBUG", ":", "print", "(", "\"Innode: %d Outnode:%d Used default: %s\"", "%", "(", "innode", ",", "outnode", ",", "used_default", ")", ")", "count", "+=", "1", "if", "count", ">", "3", ":", "if", "DEBUG", ":", "print", "(", "\"same outnode %d > 3 times: stop condition\"", "%", "outnode", ")", "break", "if", "(", "outnode", "<", "0", ")", ":", "if", "DEBUG", ":", "print", "(", "\"outnode == %d: stop condition\"", "%", "outnode", ")", "raise", "ValueError", "(", "\"Incomplete obsmode %s\"", "%", "str", "(", "modes", ")", ")", "#Check for unused modes", "if", "inmodes", "!=", "used_modes", ":", "unused", "=", "str", "(", "inmodes", ".", "difference", "(", "used_modes", ")", ")", "raise", "ValueError", "(", "\"Warning: unused keywords %s\"", "%", "unused", ")", "return", "(", "components", ",", "thcomponents", ")" ]
33.883929
0.008449
def about(): """ About box for aps. Gives version numbers for aps, NumPy, SciPy, Cython, and MatPlotLib. """ print("") print("aps: APS Journals API in Python for Humans") print("Copyright (c) 2017 and later.") print("Xiao Shang") print("") print("aps Version: %s" % aps.__version__) print("Numpy Version: %s" % numpy.__version__) print("Scipy Version: %s" % scipy.__version__) try: import Cython cython_ver = Cython.__version__ except: cython_ver = 'None' print("Cython Version: %s" % cython_ver) try: import matplotlib matplotlib_ver = matplotlib.__version__ except: matplotlib_ver = 'None' print("Matplotlib Version: %s" % matplotlib_ver) print("Python Version: %d.%d.%d" % sys.version_info[0:3]) print("Number of CPUs: %s" % hardware_info()['cpus']) # print("BLAS Info: %s" % _blas_info()) print("Platform Info: %s (%s)" % (platform.system(), platform.machine())) aps_install_path = os.path.dirname(inspect.getsourcefile(aps)) print("Installation path: %s" % aps_install_path) print("")
[ "def", "about", "(", ")", ":", "print", "(", "\"\"", ")", "print", "(", "\"aps: APS Journals API in Python for Humans\"", ")", "print", "(", "\"Copyright (c) 2017 and later.\"", ")", "print", "(", "\"Xiao Shang\"", ")", "print", "(", "\"\"", ")", "print", "(", "\"aps Version: %s\"", "%", "aps", ".", "__version__", ")", "print", "(", "\"Numpy Version: %s\"", "%", "numpy", ".", "__version__", ")", "print", "(", "\"Scipy Version: %s\"", "%", "scipy", ".", "__version__", ")", "try", ":", "import", "Cython", "cython_ver", "=", "Cython", ".", "__version__", "except", ":", "cython_ver", "=", "'None'", "print", "(", "\"Cython Version: %s\"", "%", "cython_ver", ")", "try", ":", "import", "matplotlib", "matplotlib_ver", "=", "matplotlib", ".", "__version__", "except", ":", "matplotlib_ver", "=", "'None'", "print", "(", "\"Matplotlib Version: %s\"", "%", "matplotlib_ver", ")", "print", "(", "\"Python Version: %d.%d.%d\"", "%", "sys", ".", "version_info", "[", "0", ":", "3", "]", ")", "print", "(", "\"Number of CPUs: %s\"", "%", "hardware_info", "(", ")", "[", "'cpus'", "]", ")", "# print(\"BLAS Info: %s\" % _blas_info())", "print", "(", "\"Platform Info: %s (%s)\"", "%", "(", "platform", ".", "system", "(", ")", ",", "platform", ".", "machine", "(", ")", ")", ")", "aps_install_path", "=", "os", ".", "path", ".", "dirname", "(", "inspect", ".", "getsourcefile", "(", "aps", ")", ")", "print", "(", "\"Installation path: %s\"", "%", "aps_install_path", ")", "print", "(", "\"\"", ")" ]
36.090909
0.002453
def download_data_dictionary(request, dataset_id): """Generates and returns compiled data dictionary from database. Returned as a CSV response. """ dataset = Dataset.objects.get(pk=dataset_id) dataDict = dataset.data_dictionary fields = DataDictionaryField.objects.filter( parent_dict=dataDict ).order_by('columnIndex') response = HttpResponse(content_type='text/csv') csvName = slugify(dataset.title + ' data dict') + '.csv' response['Content-Disposition'] = 'attachment; filename=%s' % (csvName) csvWriter = writer(response) metaHeader = [ 'Data Dictionary for {0} prepared by {1}'.format( dataset.title, dataset.uploaded_by ) ] csvWriter.writerow(metaHeader) trueHeader = ['Column Index', 'Heading', 'Description', 'Data Type'] csvWriter.writerow(trueHeader) for field in fields: mappedIndex = field.COLUMN_INDEX_CHOICES[field.columnIndex-1][1] csvWriter.writerow( [mappedIndex, field.heading, field.description, field.dataType] ) return response
[ "def", "download_data_dictionary", "(", "request", ",", "dataset_id", ")", ":", "dataset", "=", "Dataset", ".", "objects", ".", "get", "(", "pk", "=", "dataset_id", ")", "dataDict", "=", "dataset", ".", "data_dictionary", "fields", "=", "DataDictionaryField", ".", "objects", ".", "filter", "(", "parent_dict", "=", "dataDict", ")", ".", "order_by", "(", "'columnIndex'", ")", "response", "=", "HttpResponse", "(", "content_type", "=", "'text/csv'", ")", "csvName", "=", "slugify", "(", "dataset", ".", "title", "+", "' data dict'", ")", "+", "'.csv'", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=%s'", "%", "(", "csvName", ")", "csvWriter", "=", "writer", "(", "response", ")", "metaHeader", "=", "[", "'Data Dictionary for {0} prepared by {1}'", ".", "format", "(", "dataset", ".", "title", ",", "dataset", ".", "uploaded_by", ")", "]", "csvWriter", ".", "writerow", "(", "metaHeader", ")", "trueHeader", "=", "[", "'Column Index'", ",", "'Heading'", ",", "'Description'", ",", "'Data Type'", "]", "csvWriter", ".", "writerow", "(", "trueHeader", ")", "for", "field", "in", "fields", ":", "mappedIndex", "=", "field", ".", "COLUMN_INDEX_CHOICES", "[", "field", ".", "columnIndex", "-", "1", "]", "[", "1", "]", "csvWriter", ".", "writerow", "(", "[", "mappedIndex", ",", "field", ".", "heading", ",", "field", ".", "description", ",", "field", ".", "dataType", "]", ")", "return", "response" ]
32.666667
0.000901
def get_file_language(filename, text=None): """Get file language from filename""" ext = osp.splitext(filename)[1] if ext.startswith('.'): ext = ext[1:] # file extension with leading dot language = ext if not ext: if text is None: text, _enc = encoding.read(filename) for line in text.splitlines(): if not line.strip(): continue if line.startswith('#!'): shebang = line[2:] if 'python' in shebang: language = 'python' else: break return language
[ "def", "get_file_language", "(", "filename", ",", "text", "=", "None", ")", ":", "ext", "=", "osp", ".", "splitext", "(", "filename", ")", "[", "1", "]", "if", "ext", ".", "startswith", "(", "'.'", ")", ":", "ext", "=", "ext", "[", "1", ":", "]", "# file extension with leading dot\r", "language", "=", "ext", "if", "not", "ext", ":", "if", "text", "is", "None", ":", "text", ",", "_enc", "=", "encoding", ".", "read", "(", "filename", ")", "for", "line", "in", "text", ".", "splitlines", "(", ")", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "continue", "if", "line", ".", "startswith", "(", "'#!'", ")", ":", "shebang", "=", "line", "[", "2", ":", "]", "if", "'python'", "in", "shebang", ":", "language", "=", "'python'", "else", ":", "break", "return", "language" ]
32.684211
0.007825
def get_conn(self): """ Returns an SFTP connection object """ if self.conn is None: cnopts = pysftp.CnOpts() if self.no_host_key_check: cnopts.hostkeys = None cnopts.compression = self.compress conn_params = { 'host': self.remote_host, 'port': self.port, 'username': self.username, 'cnopts': cnopts } if self.password and self.password.strip(): conn_params['password'] = self.password if self.key_file: conn_params['private_key'] = self.key_file if self.private_key_pass: conn_params['private_key_pass'] = self.private_key_pass self.conn = pysftp.Connection(**conn_params) return self.conn
[ "def", "get_conn", "(", "self", ")", ":", "if", "self", ".", "conn", "is", "None", ":", "cnopts", "=", "pysftp", ".", "CnOpts", "(", ")", "if", "self", ".", "no_host_key_check", ":", "cnopts", ".", "hostkeys", "=", "None", "cnopts", ".", "compression", "=", "self", ".", "compress", "conn_params", "=", "{", "'host'", ":", "self", ".", "remote_host", ",", "'port'", ":", "self", ".", "port", ",", "'username'", ":", "self", ".", "username", ",", "'cnopts'", ":", "cnopts", "}", "if", "self", ".", "password", "and", "self", ".", "password", ".", "strip", "(", ")", ":", "conn_params", "[", "'password'", "]", "=", "self", ".", "password", "if", "self", ".", "key_file", ":", "conn_params", "[", "'private_key'", "]", "=", "self", ".", "key_file", "if", "self", ".", "private_key_pass", ":", "conn_params", "[", "'private_key_pass'", "]", "=", "self", ".", "private_key_pass", "self", ".", "conn", "=", "pysftp", ".", "Connection", "(", "*", "*", "conn_params", ")", "return", "self", ".", "conn" ]
35.125
0.002309
def variance(self, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False): """Calculate the `SpectralVariance` of this `Spectrogram`. Parameters ---------- bins : `~numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional, default: `None` left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional, default: `None` right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional, default: `500` number of bins to generate, only read if ``bins`` is not given log : `bool`, optional, default: `False` calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional, default: `False` normalise bin counts to a unit sum density : `bool`, optional, default: `False` normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights """ from ..frequencyseries import SpectralVariance return SpectralVariance.from_spectrogram( self, bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
[ "def", "variance", "(", "self", ",", "bins", "=", "None", ",", "low", "=", "None", ",", "high", "=", "None", ",", "nbins", "=", "500", ",", "log", "=", "False", ",", "norm", "=", "False", ",", "density", "=", "False", ")", ":", "from", ".", ".", "frequencyseries", "import", "SpectralVariance", "return", "SpectralVariance", ".", "from_spectrogram", "(", "self", ",", "bins", "=", "bins", ",", "low", "=", "low", ",", "high", "=", "high", ",", "nbins", "=", "nbins", ",", "log", "=", "log", ",", "norm", "=", "norm", ",", "density", "=", "density", ")" ]
40.487179
0.001855
def find(query): """ Search by Name, SMILES, InChI, InChIKey, etc. Returns first 100 Compounds """ assert type(query) == str or type(query) == str, 'query not a string object' searchurl = 'http://www.chemspider.com/Search.asmx/SimpleSearch?query=%s&token=%s' % (urlquote(query), TOKEN) response = urlopen(searchurl) tree = ET.parse(response) elem = tree.getroot() csid_tags = elem.getiterator('{http://www.chemspider.com/}int') compoundlist = [] for tag in csid_tags: compoundlist.append(Compound(tag.text)) return compoundlist if compoundlist else None
[ "def", "find", "(", "query", ")", ":", "assert", "type", "(", "query", ")", "==", "str", "or", "type", "(", "query", ")", "==", "str", ",", "'query not a string object'", "searchurl", "=", "'http://www.chemspider.com/Search.asmx/SimpleSearch?query=%s&token=%s'", "%", "(", "urlquote", "(", "query", ")", ",", "TOKEN", ")", "response", "=", "urlopen", "(", "searchurl", ")", "tree", "=", "ET", ".", "parse", "(", "response", ")", "elem", "=", "tree", ".", "getroot", "(", ")", "csid_tags", "=", "elem", ".", "getiterator", "(", "'{http://www.chemspider.com/}int'", ")", "compoundlist", "=", "[", "]", "for", "tag", "in", "csid_tags", ":", "compoundlist", ".", "append", "(", "Compound", "(", "tag", ".", "text", ")", ")", "return", "compoundlist", "if", "compoundlist", "else", "None" ]
49.166667
0.006656
def image_destroy(image): """Wraps openjpeg library function opj_image_destroy.""" OPENJPEG.opj_image_destroy.argtypes = [ctypes.POINTER(ImageType)] OPENJPEG.opj_image_destroy(image)
[ "def", "image_destroy", "(", "image", ")", ":", "OPENJPEG", ".", "opj_image_destroy", ".", "argtypes", "=", "[", "ctypes", ".", "POINTER", "(", "ImageType", ")", "]", "OPENJPEG", ".", "opj_image_destroy", "(", "image", ")" ]
47.75
0.005155
def named_relations(self, name, neg=False): '''Returns list of named Relations. <name> may be string or list. ''' if self.internalLinks and not neg: if isinstance(name, six.string_types): return filter(lambda x: x.name == name, self.internalLinks) elif isinstance(name, list): return filter(lambda x: x.name in name, self.internalLinks) else: return None #should rise error elif self.internalLinks and neg: if isinstance(name, six.string_types): return filter(lambda x: x.name != name, self.internalLinks) elif isinstance(name, list): return filter(lambda x: x.name not in name, self.internalLinks) else: return None #should rise error else: return []
[ "def", "named_relations", "(", "self", ",", "name", ",", "neg", "=", "False", ")", ":", "if", "self", ".", "internalLinks", "and", "not", "neg", ":", "if", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "name", "==", "name", ",", "self", ".", "internalLinks", ")", "elif", "isinstance", "(", "name", ",", "list", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "name", "in", "name", ",", "self", ".", "internalLinks", ")", "else", ":", "return", "None", "#should rise error", "elif", "self", ".", "internalLinks", "and", "neg", ":", "if", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "name", "!=", "name", ",", "self", ".", "internalLinks", ")", "elif", "isinstance", "(", "name", ",", "list", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "name", "not", "in", "name", ",", "self", ".", "internalLinks", ")", "else", ":", "return", "None", "#should rise error", "else", ":", "return", "[", "]" ]
31.935484
0.007843
def ping(): ''' Is the chassis responding? :return: Returns False if the chassis didn't respond, True otherwise. ''' r = __salt__['dracr.system_info'](host=DETAILS['host'], admin_username=DETAILS['admin_username'], admin_password=DETAILS['admin_password']) if r.get('retcode', 0) == 1: return False else: return True try: return r['dict'].get('ret', False) except Exception: return False
[ "def", "ping", "(", ")", ":", "r", "=", "__salt__", "[", "'dracr.system_info'", "]", "(", "host", "=", "DETAILS", "[", "'host'", "]", ",", "admin_username", "=", "DETAILS", "[", "'admin_username'", "]", ",", "admin_password", "=", "DETAILS", "[", "'admin_password'", "]", ")", "if", "r", ".", "get", "(", "'retcode'", ",", "0", ")", "==", "1", ":", "return", "False", "else", ":", "return", "True", "try", ":", "return", "r", "[", "'dict'", "]", ".", "get", "(", "'ret'", ",", "False", ")", "except", "Exception", ":", "return", "False" ]
28.666667
0.001876
def purge_old_request_logs(delete_before_days=7): """ Purges old request logs from the database table """ delete_before_date = timezone.now() - timedelta(days=delete_before_days) logs_deleted = RequestLog.objects.filter( created_on__lte=delete_before_date).delete() return logs_deleted
[ "def", "purge_old_request_logs", "(", "delete_before_days", "=", "7", ")", ":", "delete_before_date", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "delete_before_days", ")", "logs_deleted", "=", "RequestLog", ".", "objects", ".", "filter", "(", "created_on__lte", "=", "delete_before_date", ")", ".", "delete", "(", ")", "return", "logs_deleted" ]
38.75
0.003155
def split_by_folder(self, train:str='train', valid:str='valid')->'ItemLists': "Split the data depending on the folder (`train` or `valid`) in which the filenames are." return self.split_by_idxs(self._get_by_folder(train), self._get_by_folder(valid))
[ "def", "split_by_folder", "(", "self", ",", "train", ":", "str", "=", "'train'", ",", "valid", ":", "str", "=", "'valid'", ")", "->", "'ItemLists'", ":", "return", "self", ".", "split_by_idxs", "(", "self", ".", "_get_by_folder", "(", "train", ")", ",", "self", ".", "_get_by_folder", "(", "valid", ")", ")" ]
87.666667
0.041509
def data_to_df(self, sysbase=False): """ Return a pandas.DataFrame of device parameters. :param sysbase: save per unit values in system base """ p_dict_comp = self.data_to_dict(sysbase=sysbase) self._check_pd() self.param_df = pd.DataFrame(data=p_dict_comp).set_index('idx') return self.param_df
[ "def", "data_to_df", "(", "self", ",", "sysbase", "=", "False", ")", ":", "p_dict_comp", "=", "self", ".", "data_to_dict", "(", "sysbase", "=", "sysbase", ")", "self", ".", "_check_pd", "(", ")", "self", ".", "param_df", "=", "pd", ".", "DataFrame", "(", "data", "=", "p_dict_comp", ")", ".", "set_index", "(", "'idx'", ")", "return", "self", ".", "param_df" ]
29.25
0.005525
def run(self, galaxy_data, results=None, mask=None): """ Run this phase. Parameters ---------- galaxy_data mask: Mask The default masks passed in by the pipeline results: autofit.tools.pipeline.ResultsCollection An object describing the results of the last phase or None if no phase has been executed Returns ------- result: AbstractPhase.Result A result object comprising the best fit model and other hyper. """ analysis = self.make_analysis(galaxy_data=galaxy_data, results=results, mask=mask) result = self.run_analysis(analysis) return self.make_result(result, analysis)
[ "def", "run", "(", "self", ",", "galaxy_data", ",", "results", "=", "None", ",", "mask", "=", "None", ")", ":", "analysis", "=", "self", ".", "make_analysis", "(", "galaxy_data", "=", "galaxy_data", ",", "results", "=", "results", ",", "mask", "=", "mask", ")", "result", "=", "self", ".", "run_analysis", "(", "analysis", ")", "return", "self", ".", "make_result", "(", "result", ",", "analysis", ")" ]
33.571429
0.005517
def _download_video(self, video_url, video_name): """Download a video from the remote node :param video_url: video url :param video_name: video name """ filename = '{0:0=2d}_{1}'.format(DriverWrappersPool.videos_number, video_name) filename = '{}.mp4'.format(get_valid_filename(filename)) filepath = os.path.join(DriverWrappersPool.videos_directory, filename) if not os.path.exists(DriverWrappersPool.videos_directory): os.makedirs(DriverWrappersPool.videos_directory) response = requests.get(video_url) open(filepath, 'wb').write(response.content) self.logger.info("Video saved in '%s'", filepath) DriverWrappersPool.videos_number += 1
[ "def", "_download_video", "(", "self", ",", "video_url", ",", "video_name", ")", ":", "filename", "=", "'{0:0=2d}_{1}'", ".", "format", "(", "DriverWrappersPool", ".", "videos_number", ",", "video_name", ")", "filename", "=", "'{}.mp4'", ".", "format", "(", "get_valid_filename", "(", "filename", ")", ")", "filepath", "=", "os", ".", "path", ".", "join", "(", "DriverWrappersPool", ".", "videos_directory", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "DriverWrappersPool", ".", "videos_directory", ")", ":", "os", ".", "makedirs", "(", "DriverWrappersPool", ".", "videos_directory", ")", "response", "=", "requests", ".", "get", "(", "video_url", ")", "open", "(", "filepath", ",", "'wb'", ")", ".", "write", "(", "response", ".", "content", ")", "self", ".", "logger", ".", "info", "(", "\"Video saved in '%s'\"", ",", "filepath", ")", "DriverWrappersPool", ".", "videos_number", "+=", "1" ]
48.733333
0.004027
def is_dict(value, **kwargs): """Indicate whether ``value`` is a valid :class:`dict <python:dict>` .. note:: This will return ``True`` even if ``value`` is an empty :class:`dict <python:dict>`. :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ if isinstance(value, dict): return True try: value = validators.dict(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
[ "def", "is_dict", "(", "value", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "True", "try", ":", "value", "=", "validators", ".", "dict", "(", "value", ",", "*", "*", "kwargs", ")", "except", "SyntaxError", "as", "error", ":", "raise", "error", "except", "Exception", ":", "return", "False", "return", "True" ]
25.928571
0.002656
def _save_owner_cover_photo(session, hash, photo): """ https://vk.com/dev/photos.saveOwnerCoverPhoto """ response = session.fetch('photos.saveOwnerCoverPhoto', hash=hash, photo=photo) return response
[ "def", "_save_owner_cover_photo", "(", "session", ",", "hash", ",", "photo", ")", ":", "response", "=", "session", ".", "fetch", "(", "'photos.saveOwnerCoverPhoto'", ",", "hash", "=", "hash", ",", "photo", "=", "photo", ")", "return", "response" ]
39
0.012552
def init_db(drop_all=False): """Initialize the database, optionally dropping existing tables.""" if drop_all: Base.metadata.drop_all(bind=engine) Base.metadata.create_all(bind=engine) return session
[ "def", "init_db", "(", "drop_all", "=", "False", ")", ":", "if", "drop_all", ":", "Base", ".", "metadata", ".", "drop_all", "(", "bind", "=", "engine", ")", "Base", ".", "metadata", ".", "create_all", "(", "bind", "=", "engine", ")", "return", "session" ]
31
0.004484
def get_headers_from_environ(environ): """Get a wsgiref.headers.Headers object with headers from the environment. Headers in environ are prefixed with 'HTTP_', are all uppercase, and have had dashes replaced with underscores. This strips the HTTP_ prefix and changes underscores back to dashes before adding them to the returned set of headers. Args: environ: An environ dict for the request as defined in PEP-333. Returns: A wsgiref.headers.Headers object that's been filled in with any HTTP headers found in environ. """ headers = wsgiref.headers.Headers([]) for header, value in environ.iteritems(): if header.startswith('HTTP_'): headers[header[5:].replace('_', '-')] = value # Content-Type is special; it does not start with 'HTTP_'. if 'CONTENT_TYPE' in environ: headers['CONTENT-TYPE'] = environ['CONTENT_TYPE'] return headers
[ "def", "get_headers_from_environ", "(", "environ", ")", ":", "headers", "=", "wsgiref", ".", "headers", ".", "Headers", "(", "[", "]", ")", "for", "header", ",", "value", "in", "environ", ".", "iteritems", "(", ")", ":", "if", "header", ".", "startswith", "(", "'HTTP_'", ")", ":", "headers", "[", "header", "[", "5", ":", "]", ".", "replace", "(", "'_'", ",", "'-'", ")", "]", "=", "value", "# Content-Type is special; it does not start with 'HTTP_'.", "if", "'CONTENT_TYPE'", "in", "environ", ":", "headers", "[", "'CONTENT-TYPE'", "]", "=", "environ", "[", "'CONTENT_TYPE'", "]", "return", "headers" ]
37.695652
0.008999
def match(self, field_name, field_value, **options): """ Returns first match found in :any:`get_all` >>> airtable.match('Name', 'John') {'fields': {'Name': 'John'} } Args: field_name (``str``): Name of field to match (column name). field_value (``str``): Value of field to match. Keyword Args: max_records (``int``, optional): The maximum total number of records that will be returned. See :any:`MaxRecordsParam` view (``str``, optional): The name or ID of a view. See :any:`ViewParam`. fields (``str``, ``list``, optional): Name of field or fields to be retrieved. Default is all fields. See :any:`FieldsParam`. sort (``list``, optional): List of fields to sort by. Default order is ascending. See :any:`SortParam`. Returns: record (``dict``): First record to match the field_value provided """ from_name_and_value = AirtableParams.FormulaParam.from_name_and_value formula = from_name_and_value(field_name, field_value) options['formula'] = formula for record in self.get_all(**options): return record else: return {}
[ "def", "match", "(", "self", ",", "field_name", ",", "field_value", ",", "*", "*", "options", ")", ":", "from_name_and_value", "=", "AirtableParams", ".", "FormulaParam", ".", "from_name_and_value", "formula", "=", "from_name_and_value", "(", "field_name", ",", "field_value", ")", "options", "[", "'formula'", "]", "=", "formula", "for", "record", "in", "self", ".", "get_all", "(", "*", "*", "options", ")", ":", "return", "record", "else", ":", "return", "{", "}" ]
41.870968
0.001506
def snapshots_create(container, name=None, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Create a snapshot for a container container : The name of the container to get. name : The name of the snapshot. remote_addr : An URL to a remote server. The 'cert' and 'key' fields must also be provided if 'remote_addr' is defined. Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Verify the ssl certificate. Default: True CLI Examples: .. code-block:: bash $ salt '*' lxd.snapshots_create test-container test-snapshot ''' cont = container_get( container, remote_addr, cert, key, verify_cert, _raw=True ) if not name: name = datetime.now().strftime('%Y%m%d%H%M%S') cont.snapshots.create(name) for c in snapshots_all(container).get(container): if c.get('name') == name: return {'name': name} return {'name': False}
[ "def", "snapshots_create", "(", "container", ",", "name", "=", "None", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "cont", "=", "container_get", "(", "container", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "if", "not", "name", ":", "name", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y%m%d%H%M%S'", ")", "cont", ".", "snapshots", ".", "create", "(", "name", ")", "for", "c", "in", "snapshots_all", "(", "container", ")", ".", "get", "(", "container", ")", ":", "if", "c", ".", "get", "(", "'name'", ")", "==", "name", ":", "return", "{", "'name'", ":", "name", "}", "return", "{", "'name'", ":", "False", "}" ]
22.830189
0.000792
def eval( self, expression: z3.ExprRef, model_completion: bool = False ) -> Union[None, z3.ExprRef]: """ Evaluate the expression using this model :param expression: The expression to evaluate :param model_completion: Use the default value if the model has no interpretation of the given expression :return: The evaluated expression """ for internal_model in self.raw: is_last_model = self.raw.index(internal_model) == len(self.raw) - 1 is_relevant_model = expression.decl() in list(internal_model.decls()) if is_relevant_model or is_last_model: return internal_model.eval(expression, model_completion) return None
[ "def", "eval", "(", "self", ",", "expression", ":", "z3", ".", "ExprRef", ",", "model_completion", ":", "bool", "=", "False", ")", "->", "Union", "[", "None", ",", "z3", ".", "ExprRef", "]", ":", "for", "internal_model", "in", "self", ".", "raw", ":", "is_last_model", "=", "self", ".", "raw", ".", "index", "(", "internal_model", ")", "==", "len", "(", "self", ".", "raw", ")", "-", "1", "is_relevant_model", "=", "expression", ".", "decl", "(", ")", "in", "list", "(", "internal_model", ".", "decls", "(", ")", ")", "if", "is_relevant_model", "or", "is_last_model", ":", "return", "internal_model", ".", "eval", "(", "expression", ",", "model_completion", ")", "return", "None" ]
48
0.006812
def file_dict(*packages, **kwargs): ''' .. versionchanged: 2016.3.0 List the files that belong to a package. CLI Examples: .. code-block:: bash salt '*' pkg.file_dict nginx salt '*' pkg.file_dict nginx varnish ''' errors = [] files = {} for package in packages: cmd = ['pkg_info', '-qL', package] ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace') files[package] = [] for line in ret['stderr'].splitlines(): errors.append(line) for line in ret['stdout'].splitlines(): if line.startswith('/'): files[package].append(line) else: continue # unexpected string ret = {'errors': errors, 'files': files} for field in list(ret): if not ret[field] or ret[field] == '': del ret[field] return ret
[ "def", "file_dict", "(", "*", "packages", ",", "*", "*", "kwargs", ")", ":", "errors", "=", "[", "]", "files", "=", "{", "}", "for", "package", "in", "packages", ":", "cmd", "=", "[", "'pkg_info'", ",", "'-qL'", ",", "package", "]", "ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "output_loglevel", "=", "'trace'", ")", "files", "[", "package", "]", "=", "[", "]", "for", "line", "in", "ret", "[", "'stderr'", "]", ".", "splitlines", "(", ")", ":", "errors", ".", "append", "(", "line", ")", "for", "line", "in", "ret", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "'/'", ")", ":", "files", "[", "package", "]", ".", "append", "(", "line", ")", "else", ":", "continue", "# unexpected string", "ret", "=", "{", "'errors'", ":", "errors", ",", "'files'", ":", "files", "}", "for", "field", "in", "list", "(", "ret", ")", ":", "if", "not", "ret", "[", "field", "]", "or", "ret", "[", "field", "]", "==", "''", ":", "del", "ret", "[", "field", "]", "return", "ret" ]
25.323529
0.001119
def checkout_with_fetch(git_folder, refspec, repository="origin"): """Fetch the refspec, and checkout FETCH_HEAD. Beware that you will ne in detached head mode. """ _LOGGER.info("Trying to fetch and checkout %s", refspec) repo = Repo(str(git_folder)) repo.git.fetch(repository, refspec) # FETCH_HEAD should be set repo.git.checkout("FETCH_HEAD") _LOGGER.info("Fetch and checkout success for %s", refspec)
[ "def", "checkout_with_fetch", "(", "git_folder", ",", "refspec", ",", "repository", "=", "\"origin\"", ")", ":", "_LOGGER", ".", "info", "(", "\"Trying to fetch and checkout %s\"", ",", "refspec", ")", "repo", "=", "Repo", "(", "str", "(", "git_folder", ")", ")", "repo", ".", "git", ".", "fetch", "(", "repository", ",", "refspec", ")", "# FETCH_HEAD should be set", "repo", ".", "git", ".", "checkout", "(", "\"FETCH_HEAD\"", ")", "_LOGGER", ".", "info", "(", "\"Fetch and checkout success for %s\"", ",", "refspec", ")" ]
47.666667
0.002288
def to_array_with_default(value, default_value): """ Converts value into array object with specified default. Single values are converted into arrays with single element. :param value: the value to convert. :param default_value: default array object. :return: array object or default array when value is None. """ result = ArrayConverter.to_nullable_array(value) return result if result != None else default_value
[ "def", "to_array_with_default", "(", "value", ",", "default_value", ")", ":", "result", "=", "ArrayConverter", ".", "to_nullable_array", "(", "value", ")", "return", "result", "if", "result", "!=", "None", "else", "default_value" ]
36.615385
0.006148
def _pkg_names(pkg): """ Given a namespace package, yield the components of that package. >>> names = Installer._pkg_names('a.b.c') >>> set(names) == set(['a', 'a.b', 'a.b.c']) True """ parts = pkg.split('.') while parts: yield '.'.join(parts) parts.pop()
[ "def", "_pkg_names", "(", "pkg", ")", ":", "parts", "=", "pkg", ".", "split", "(", "'.'", ")", "while", "parts", ":", "yield", "'.'", ".", "join", "(", "parts", ")", "parts", ".", "pop", "(", ")" ]
26.153846
0.005682
def getWaveletData(eda): ''' This function computes the wavelet coefficients INPUT: data: DataFrame, index is a list of timestamps at 8Hz, columns include EDA, filtered_eda OUTPUT: wave1Second: DateFrame, index is a list of timestamps at 1Hz, columns include OneSecond_feature1, OneSecond_feature2, OneSecond_feature3 waveHalfSecond: DateFrame, index is a list of timestamps at 2Hz, columns include HalfSecond_feature1, HalfSecond_feature2 ''' # Create wavelet dataframes oneSecond = halfSecond = # Compute wavelets cA_n, cD_3, cD_2, cD_1 = pywt.wavedec(eda, 'Haar', level=3) #3 = 1Hz, 2 = 2Hz, 1=4Hz # Wavelet 1 second window N = int(len(eda)/sampling_rate) coeff1 = np.max(abs(np.reshape(cD_1[0:4*N],(N,4))), axis=1) coeff2 = np.max(abs(np.reshape(cD_2[0:2*N],(N,2))), axis=1) coeff3 = abs(cD_3[0:N]) wave1Second = pd.DataFrame({'OneSecond_feature1':coeff1,'OneSecond_feature2':coeff2,'OneSecond_feature3':coeff3}) wave1Second.index = oneSecond[:len(wave1Second)] # Wavelet Half second window N = int(np.floor((len(data)/8.0)*2)) coeff1 = np.max(abs(np.reshape(cD_1[0:2*N],(N,2))),axis=1) coeff2 = abs(cD_2[0:N]) waveHalfSecond = pd.DataFrame({'HalfSecond_feature1':coeff1,'HalfSecond_feature2':coeff2}) waveHalfSecond.index = halfSecond[:len(waveHalfSecond)] return wave1Second,waveHalfSecond
[ "def", "getWaveletData", "(", "eda", ")", ":", "# Create wavelet dataframes", "oneSecond", "=", "halfSecond", "=", "# Compute wavelets", "cA_n", ",", "cD_3", ",", "cD_2", ",", "cD_1", "=", "pywt", ".", "wavedec", "(", "eda", ",", "'Haar'", ",", "level", "=", "3", ")", "#3 = 1Hz, 2 = 2Hz, 1=4Hz", "# Wavelet 1 second window", "N", "=", "int", "(", "len", "(", "eda", ")", "/", "sampling_rate", ")", "coeff1", "=", "np", ".", "max", "(", "abs", "(", "np", ".", "reshape", "(", "cD_1", "[", "0", ":", "4", "*", "N", "]", ",", "(", "N", ",", "4", ")", ")", ")", ",", "axis", "=", "1", ")", "coeff2", "=", "np", ".", "max", "(", "abs", "(", "np", ".", "reshape", "(", "cD_2", "[", "0", ":", "2", "*", "N", "]", ",", "(", "N", ",", "2", ")", ")", ")", ",", "axis", "=", "1", ")", "coeff3", "=", "abs", "(", "cD_3", "[", "0", ":", "N", "]", ")", "wave1Second", "=", "pd", ".", "DataFrame", "(", "{", "'OneSecond_feature1'", ":", "coeff1", ",", "'OneSecond_feature2'", ":", "coeff2", ",", "'OneSecond_feature3'", ":", "coeff3", "}", ")", "wave1Second", ".", "index", "=", "oneSecond", "[", ":", "len", "(", "wave1Second", ")", "]", "# Wavelet Half second window", "N", "=", "int", "(", "np", ".", "floor", "(", "(", "len", "(", "data", ")", "/", "8.0", ")", "*", "2", ")", ")", "coeff1", "=", "np", ".", "max", "(", "abs", "(", "np", ".", "reshape", "(", "cD_1", "[", "0", ":", "2", "*", "N", "]", ",", "(", "N", ",", "2", ")", ")", ")", ",", "axis", "=", "1", ")", "coeff2", "=", "abs", "(", "cD_2", "[", "0", ":", "N", "]", ")", "waveHalfSecond", "=", "pd", ".", "DataFrame", "(", "{", "'HalfSecond_feature1'", ":", "coeff1", ",", "'HalfSecond_feature2'", ":", "coeff2", "}", ")", "waveHalfSecond", ".", "index", "=", "halfSecond", "[", ":", "len", "(", "waveHalfSecond", ")", "]", "return", "wave1Second", ",", "waveHalfSecond" ]
42.454545
0.017446
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0): """ Returns a list of matching WebElements that are visible. If "limit" is set and > 0, will only return that many elements. """ self.wait_for_ready_state_complete() if page_utils.is_xpath_selector(selector): by = By.XPATH if page_utils.is_link_text_selector(selector): selector = page_utils.get_link_text_from_selector(selector) by = By.LINK_TEXT v_elems = page_actions.find_visible_elements(self.driver, selector, by) if limit and limit > 0 and len(v_elems) > limit: v_elems = v_elems[:limit] return v_elems
[ "def", "find_visible_elements", "(", "self", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "limit", "=", "0", ")", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "if", "page_utils", ".", "is_xpath_selector", "(", "selector", ")", ":", "by", "=", "By", ".", "XPATH", "if", "page_utils", ".", "is_link_text_selector", "(", "selector", ")", ":", "selector", "=", "page_utils", ".", "get_link_text_from_selector", "(", "selector", ")", "by", "=", "By", ".", "LINK_TEXT", "v_elems", "=", "page_actions", ".", "find_visible_elements", "(", "self", ".", "driver", ",", "selector", ",", "by", ")", "if", "limit", "and", "limit", ">", "0", "and", "len", "(", "v_elems", ")", ">", "limit", ":", "v_elems", "=", "v_elems", "[", ":", "limit", "]", "return", "v_elems" ]
52.692308
0.002869
def history(self): """ Returns a list of changeset for this file in which the file was changed """ if self.changeset is None: raise NodeError('Unable to get changeset for this FileNode') return self.changeset.get_file_history(self.path)
[ "def", "history", "(", "self", ")", ":", "if", "self", ".", "changeset", "is", "None", ":", "raise", "NodeError", "(", "'Unable to get changeset for this FileNode'", ")", "return", "self", ".", "changeset", ".", "get_file_history", "(", "self", ".", "path", ")" ]
40.285714
0.006944
def list_leases(self, prefix): """Retrieve a list of lease ids. Supported methods: LIST: /sys/leases/lookup/{prefix}. Produces: 200 application/json :param prefix: Lease prefix to filter list by. :type prefix: str | unicode :return: The JSON response of the request. :rtype: dict """ api_path = '/v1/sys/leases/lookup/{prefix}'.format(prefix=prefix) response = self._adapter.list( url=api_path, ) return response.json()
[ "def", "list_leases", "(", "self", ",", "prefix", ")", ":", "api_path", "=", "'/v1/sys/leases/lookup/{prefix}'", ".", "format", "(", "prefix", "=", "prefix", ")", "response", "=", "self", ".", "_adapter", ".", "list", "(", "url", "=", "api_path", ",", ")", "return", "response", ".", "json", "(", ")" ]
32.375
0.003752
def _get_timethresh_heuristics(self): """ resonably decent hueristics for how much time to wait before updating progress. """ if self.length > 1E5: time_thresh = 2.5 elif self.length > 1E4: time_thresh = 2.0 elif self.length > 1E3: time_thresh = 1.0 else: time_thresh = 0.5 return time_thresh
[ "def", "_get_timethresh_heuristics", "(", "self", ")", ":", "if", "self", ".", "length", ">", "1E5", ":", "time_thresh", "=", "2.5", "elif", "self", ".", "length", ">", "1E4", ":", "time_thresh", "=", "2.0", "elif", "self", ".", "length", ">", "1E3", ":", "time_thresh", "=", "1.0", "else", ":", "time_thresh", "=", "0.5", "return", "time_thresh" ]
28.5
0.004854
def next_tokens_in_sequence(observed, current): """ Given the observed list of tokens, and the current list, finds out what should be next next emitted word """ idx = 0 for word in current: if observed[idx:].count(word) != 0: found_pos = observed.index(word, idx) idx = max(idx + 1, found_pos) # otherwise, don't increment idx if idx < len(observed): return observed[idx:] else: return []
[ "def", "next_tokens_in_sequence", "(", "observed", ",", "current", ")", ":", "idx", "=", "0", "for", "word", "in", "current", ":", "if", "observed", "[", "idx", ":", "]", ".", "count", "(", "word", ")", "!=", "0", ":", "found_pos", "=", "observed", ".", "index", "(", "word", ",", "idx", ")", "idx", "=", "max", "(", "idx", "+", "1", ",", "found_pos", ")", "# otherwise, don't increment idx", "if", "idx", "<", "len", "(", "observed", ")", ":", "return", "observed", "[", "idx", ":", "]", "else", ":", "return", "[", "]" ]
32.785714
0.002119
def transform_tensor(self, tensor): """ Applies rotation portion to a tensor. Note that tensor has to be in full form, not the Voigt form. Args: tensor (numpy array): a rank n tensor Returns: Transformed tensor. """ dim = tensor.shape rank = len(dim) assert all([i == 3 for i in dim]) # Build einstein sum string lc = string.ascii_lowercase indices = lc[:rank], lc[rank:2 * rank] einsum_string = ','.join([a + i for a, i in zip(*indices)]) einsum_string += ',{}->{}'.format(*indices[::-1]) einsum_args = [self.rotation_matrix] * rank + [tensor] return np.einsum(einsum_string, *einsum_args)
[ "def", "transform_tensor", "(", "self", ",", "tensor", ")", ":", "dim", "=", "tensor", ".", "shape", "rank", "=", "len", "(", "dim", ")", "assert", "all", "(", "[", "i", "==", "3", "for", "i", "in", "dim", "]", ")", "# Build einstein sum string", "lc", "=", "string", ".", "ascii_lowercase", "indices", "=", "lc", "[", ":", "rank", "]", ",", "lc", "[", "rank", ":", "2", "*", "rank", "]", "einsum_string", "=", "','", ".", "join", "(", "[", "a", "+", "i", "for", "a", ",", "i", "in", "zip", "(", "*", "indices", ")", "]", ")", "einsum_string", "+=", "',{}->{}'", ".", "format", "(", "*", "indices", "[", ":", ":", "-", "1", "]", ")", "einsum_args", "=", "[", "self", ".", "rotation_matrix", "]", "*", "rank", "+", "[", "tensor", "]", "return", "np", ".", "einsum", "(", "einsum_string", ",", "*", "einsum_args", ")" ]
32.909091
0.002685
def rsolve(A, y): """ Robust solve Ax=y. """ from numpy_sugar.linalg import rsolve as _rsolve try: beta = _rsolve(A, y) except LinAlgError: msg = "Could not converge to solve Ax=y." msg += " Setting x to zero." warnings.warn(msg, RuntimeWarning) beta = zeros(A.shape[0]) return beta
[ "def", "rsolve", "(", "A", ",", "y", ")", ":", "from", "numpy_sugar", ".", "linalg", "import", "rsolve", "as", "_rsolve", "try", ":", "beta", "=", "_rsolve", "(", "A", ",", "y", ")", "except", "LinAlgError", ":", "msg", "=", "\"Could not converge to solve Ax=y.\"", "msg", "+=", "\" Setting x to zero.\"", "warnings", ".", "warn", "(", "msg", ",", "RuntimeWarning", ")", "beta", "=", "zeros", "(", "A", ".", "shape", "[", "0", "]", ")", "return", "beta" ]
22.533333
0.002841
def get_incoming_properties_per_page(self, per_page=1000, page=1, params=None): """ Get incoming properties per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=INCOMING_PROPERTIES, per_page=per_page, page=page, params=params)
[ "def", "get_incoming_properties_per_page", "(", "self", ",", "per_page", "=", "1000", ",", "page", "=", "1", ",", "params", "=", "None", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "INCOMING_PROPERTIES", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "params", ")" ]
44
0.006682
def top_answers(self): """获取话题下的精华答案. :return: 话题下的精华答案,返回生成器. :rtype: Answer.Iterable """ from .question import Question from .answer import Answer from .author import Author, ANONYMOUS top_answers_url = Topic_Top_Answers_Url.format(self.id) params = {'page': 1} while True: # 超出50页直接返回 if params['page'] > 50: return res = self._session.get(top_answers_url, params=params) params['page'] += 1 soup = BeautifulSoup(res.content) # 不够50页,来到错误页面 返回 if soup.find('div', class_='error') is not None: return questions = soup.find_all('a', class_='question_link') answers = soup.find_all('a', class_='answer-date-link') authors = soup.find_all('div', class_='zm-item-answer-author-info') upvotes = soup.find_all('a', class_='zm-item-vote-count') for ans, up, q, au in zip(answers, upvotes, questions, authors): answer_url = Zhihu_URL + ans['href'] question_url = Zhihu_URL + q['href'] question_title = q.text.strip() upvote = up.text if upvote.isdigit(): upvote = int(upvote) else: upvote = None question = Question(question_url, question_title, session=self._session) if au.a is None: author = ANONYMOUS else: author_url = Zhihu_URL + au.a['href'] author_name = au.a.text author_motto = au.strong['title'] if au.strong else '' author = Author(author_url, author_name, author_motto, session=self._session) yield Answer(answer_url, question, author, upvote, session=self._session)
[ "def", "top_answers", "(", "self", ")", ":", "from", ".", "question", "import", "Question", "from", ".", "answer", "import", "Answer", "from", ".", "author", "import", "Author", ",", "ANONYMOUS", "top_answers_url", "=", "Topic_Top_Answers_Url", ".", "format", "(", "self", ".", "id", ")", "params", "=", "{", "'page'", ":", "1", "}", "while", "True", ":", "# 超出50页直接返回", "if", "params", "[", "'page'", "]", ">", "50", ":", "return", "res", "=", "self", ".", "_session", ".", "get", "(", "top_answers_url", ",", "params", "=", "params", ")", "params", "[", "'page'", "]", "+=", "1", "soup", "=", "BeautifulSoup", "(", "res", ".", "content", ")", "# 不够50页,来到错误页面 返回", "if", "soup", ".", "find", "(", "'div'", ",", "class_", "=", "'error'", ")", "is", "not", "None", ":", "return", "questions", "=", "soup", ".", "find_all", "(", "'a'", ",", "class_", "=", "'question_link'", ")", "answers", "=", "soup", ".", "find_all", "(", "'a'", ",", "class_", "=", "'answer-date-link'", ")", "authors", "=", "soup", ".", "find_all", "(", "'div'", ",", "class_", "=", "'zm-item-answer-author-info'", ")", "upvotes", "=", "soup", ".", "find_all", "(", "'a'", ",", "class_", "=", "'zm-item-vote-count'", ")", "for", "ans", ",", "up", ",", "q", ",", "au", "in", "zip", "(", "answers", ",", "upvotes", ",", "questions", ",", "authors", ")", ":", "answer_url", "=", "Zhihu_URL", "+", "ans", "[", "'href'", "]", "question_url", "=", "Zhihu_URL", "+", "q", "[", "'href'", "]", "question_title", "=", "q", ".", "text", ".", "strip", "(", ")", "upvote", "=", "up", ".", "text", "if", "upvote", ".", "isdigit", "(", ")", ":", "upvote", "=", "int", "(", "upvote", ")", "else", ":", "upvote", "=", "None", "question", "=", "Question", "(", "question_url", ",", "question_title", ",", "session", "=", "self", ".", "_session", ")", "if", "au", ".", "a", "is", "None", ":", "author", "=", "ANONYMOUS", "else", ":", "author_url", "=", "Zhihu_URL", "+", "au", ".", "a", "[", "'href'", "]", "author_name", "=", "au", ".", "a", ".", "text", "author_motto", "=", "au", ".", "strong", "[", "'title'", "]", "if", "au", ".", "strong", "else", "''", "author", "=", "Author", "(", "author_url", ",", "author_name", ",", "author_motto", ",", "session", "=", "self", ".", "_session", ")", "yield", "Answer", "(", "answer_url", ",", "question", ",", "author", ",", "upvote", ",", "session", "=", "self", ".", "_session", ")" ]
42.255319
0.000984
def run_suite(case, config, summary): """ Run the full suite of numerics tests """ m = importlib.import_module(config['module']) m.set_up() config["name"] = case analysis_data = {} bundle = livvkit.numerics_model_module model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) plot_dir = os.path.join(livvkit.output_dir, "numerics", "imgs") config["plot_dir"] = plot_dir functions.mkdir_p(plot_dir) model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) for mscale in sorted(model_cases): bscale = bench_cases[mscale] if mscale in bench_cases else [] for mproc in model_cases[mscale]: full_name = '-'.join([mscale, mproc]) bpath = (os.path.join(bench_dir, mscale, mproc.replace("-", os.path.sep)) if mproc in bscale else "") mpath = os.path.join(model_dir, mscale, mproc.replace("-", os.path.sep)) model_data = functions.find_file(mpath, "*" + config["output_ext"]) bench_data = functions.find_file(bpath, "*" + config["output_ext"]) analysis_data[full_name] = bundle.get_plot_data(model_data, bench_data, m.setup[case], config) try: el = m.run(config, analysis_data) except KeyError: el = elements.error("Numerics Plots", "Missing data") result = elements.page(case, config['description'], element_list=el) summary[case] = _summarize_result(m, analysis_data, config) _print_summary(m, case, summary[case]) functions.create_page_from_template("numerics.html", os.path.join(livvkit.index_dir, "numerics", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "numerics"), case + ".json")
[ "def", "run_suite", "(", "case", ",", "config", ",", "summary", ")", ":", "m", "=", "importlib", ".", "import_module", "(", "config", "[", "'module'", "]", ")", "m", ".", "set_up", "(", ")", "config", "[", "\"name\"", "]", "=", "case", "analysis_data", "=", "{", "}", "bundle", "=", "livvkit", ".", "numerics_model_module", "model_dir", "=", "os", ".", "path", ".", "join", "(", "livvkit", ".", "model_dir", ",", "config", "[", "'data_dir'", "]", ",", "case", ")", "bench_dir", "=", "os", ".", "path", ".", "join", "(", "livvkit", ".", "bench_dir", ",", "config", "[", "'data_dir'", "]", ",", "case", ")", "plot_dir", "=", "os", ".", "path", ".", "join", "(", "livvkit", ".", "output_dir", ",", "\"numerics\"", ",", "\"imgs\"", ")", "config", "[", "\"plot_dir\"", "]", "=", "plot_dir", "functions", ".", "mkdir_p", "(", "plot_dir", ")", "model_cases", "=", "functions", ".", "collect_cases", "(", "model_dir", ")", "bench_cases", "=", "functions", ".", "collect_cases", "(", "bench_dir", ")", "for", "mscale", "in", "sorted", "(", "model_cases", ")", ":", "bscale", "=", "bench_cases", "[", "mscale", "]", "if", "mscale", "in", "bench_cases", "else", "[", "]", "for", "mproc", "in", "model_cases", "[", "mscale", "]", ":", "full_name", "=", "'-'", ".", "join", "(", "[", "mscale", ",", "mproc", "]", ")", "bpath", "=", "(", "os", ".", "path", ".", "join", "(", "bench_dir", ",", "mscale", ",", "mproc", ".", "replace", "(", "\"-\"", ",", "os", ".", "path", ".", "sep", ")", ")", "if", "mproc", "in", "bscale", "else", "\"\"", ")", "mpath", "=", "os", ".", "path", ".", "join", "(", "model_dir", ",", "mscale", ",", "mproc", ".", "replace", "(", "\"-\"", ",", "os", ".", "path", ".", "sep", ")", ")", "model_data", "=", "functions", ".", "find_file", "(", "mpath", ",", "\"*\"", "+", "config", "[", "\"output_ext\"", "]", ")", "bench_data", "=", "functions", ".", "find_file", "(", "bpath", ",", "\"*\"", "+", "config", "[", "\"output_ext\"", "]", ")", "analysis_data", "[", "full_name", "]", "=", "bundle", ".", "get_plot_data", "(", "model_data", ",", "bench_data", ",", "m", ".", "setup", "[", "case", "]", ",", "config", ")", "try", ":", "el", "=", "m", ".", "run", "(", "config", ",", "analysis_data", ")", "except", "KeyError", ":", "el", "=", "elements", ".", "error", "(", "\"Numerics Plots\"", ",", "\"Missing data\"", ")", "result", "=", "elements", ".", "page", "(", "case", ",", "config", "[", "'description'", "]", ",", "element_list", "=", "el", ")", "summary", "[", "case", "]", "=", "_summarize_result", "(", "m", ",", "analysis_data", ",", "config", ")", "_print_summary", "(", "m", ",", "case", ",", "summary", "[", "case", "]", ")", "functions", ".", "create_page_from_template", "(", "\"numerics.html\"", ",", "os", ".", "path", ".", "join", "(", "livvkit", ".", "index_dir", ",", "\"numerics\"", ",", "case", "+", "\".html\"", ")", ")", "functions", ".", "write_json", "(", "result", ",", "os", ".", "path", ".", "join", "(", "livvkit", ".", "output_dir", ",", "\"numerics\"", ")", ",", "case", "+", "\".json\"", ")" ]
53.421053
0.002419
def magic_set(obj): """ Adds a function/method to an object. Uses the name of the first argument as a hint about whether it is a method (``self``), class method (``cls`` or ``klass``), or static method (anything else). Works on both instances and classes. >>> class color: ... def __init__(self, r, g, b): ... self.r, self.g, self.b = r, g, b >>> c = color(0, 1, 0) >>> c # doctest: +ELLIPSIS <__main__.color instance at ...> >>> @magic_set(color) ... def __repr__(self): ... return '<color %s %s %s>' % (self.r, self.g, self.b) >>> c <color 0 1 0> >>> @magic_set(color) ... def red(cls): ... return cls(1, 0, 0) >>> color.red() <color 1 0 0> >>> c.red() <color 1 0 0> >>> @magic_set(color) ... def name(): ... return 'color' >>> color.name() 'color' >>> @magic_set(c) ... def name(self): ... return 'red' >>> c.name() 'red' >>> @magic_set(c) ... def name(cls): ... return cls.__name__ >>> c.name() 'color' >>> @magic_set(c) ... def pr(obj): ... print obj >>> c.pr(1) 1 """ def decorator(func): is_class = isinstance(obj, six.class_types) args, varargs, varkw, defaults = inspect.getargspec(func) if not args or args[0] not in ('self', 'cls', 'klass'): # Static function/method if is_class: replacement = staticmethod(func) else: replacement = func elif args[0] == 'self': if is_class: replacement = func else: def replacement(*args, **kw): return func(obj, *args, **kw) try: replacement.__name__ = func.__name__ except: pass else: if is_class: replacement = classmethod(func) else: def replacement(*args, **kw): return func(obj.__class__, *args, **kw) try: replacement.__name__ = func.__name__ except: pass setattr(obj, func.__name__, replacement) return replacement return decorator
[ "def", "magic_set", "(", "obj", ")", ":", "def", "decorator", "(", "func", ")", ":", "is_class", "=", "isinstance", "(", "obj", ",", "six", ".", "class_types", ")", "args", ",", "varargs", ",", "varkw", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "func", ")", "if", "not", "args", "or", "args", "[", "0", "]", "not", "in", "(", "'self'", ",", "'cls'", ",", "'klass'", ")", ":", "# Static function/method", "if", "is_class", ":", "replacement", "=", "staticmethod", "(", "func", ")", "else", ":", "replacement", "=", "func", "elif", "args", "[", "0", "]", "==", "'self'", ":", "if", "is_class", ":", "replacement", "=", "func", "else", ":", "def", "replacement", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "func", "(", "obj", ",", "*", "args", ",", "*", "*", "kw", ")", "try", ":", "replacement", ".", "__name__", "=", "func", ".", "__name__", "except", ":", "pass", "else", ":", "if", "is_class", ":", "replacement", "=", "classmethod", "(", "func", ")", "else", ":", "def", "replacement", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "func", "(", "obj", ".", "__class__", ",", "*", "args", ",", "*", "*", "kw", ")", "try", ":", "replacement", ".", "__name__", "=", "func", ".", "__name__", "except", ":", "pass", "setattr", "(", "obj", ",", "func", ".", "__name__", ",", "replacement", ")", "return", "replacement", "return", "decorator" ]
26.153846
0.001417
def plot_carriers(self, temp=300): """ Plot the carrier concentration in function of Fermi level Args: temp: the temperature Returns: a matplotlib object """ import matplotlib.pyplot as plt plt.semilogy(self._bz.mu_steps, abs(self._bz._carrier_conc[temp] / (self._bz.vol * 1e-24)), linewidth=3.0, color='r') self._plot_bg_limits() self._plot_doping(temp) plt.xlim(-0.5, self._bz.gap + 0.5) plt.ylim(1e14, 1e22) plt.ylabel("carrier concentration (cm-3)", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) return plt
[ "def", "plot_carriers", "(", "self", ",", "temp", "=", "300", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "semilogy", "(", "self", ".", "_bz", ".", "mu_steps", ",", "abs", "(", "self", ".", "_bz", ".", "_carrier_conc", "[", "temp", "]", "/", "(", "self", ".", "_bz", ".", "vol", "*", "1e-24", ")", ")", ",", "linewidth", "=", "3.0", ",", "color", "=", "'r'", ")", "self", ".", "_plot_bg_limits", "(", ")", "self", ".", "_plot_doping", "(", "temp", ")", "plt", ".", "xlim", "(", "-", "0.5", ",", "self", ".", "_bz", ".", "gap", "+", "0.5", ")", "plt", ".", "ylim", "(", "1e14", ",", "1e22", ")", "plt", ".", "ylabel", "(", "\"carrier concentration (cm-3)\"", ",", "fontsize", "=", "30.0", ")", "plt", ".", "xlabel", "(", "\"E-E$_f$ (eV)\"", ",", "fontsize", "=", "30", ")", "plt", ".", "xticks", "(", "fontsize", "=", "25", ")", "plt", ".", "yticks", "(", "fontsize", "=", "25", ")", "return", "plt" ]
32.217391
0.003932
def preprX(*attributes, address=True, full_name=False, pretty=False, keyless=False, **kwargs): """ `Creates prettier object representations` @*attributes: (#str) instance attributes within the object you wish to display. Attributes can be recursive e.g. |one.two.three| for access to |self.one.two.three| @address: (#bool) |True| to include the memory address @full_name: (#bool) |True| to include the full path to the object vs. the qualified name @pretty: (#bool) |True| to allow bolding and coloring @keyless: (#bool) |True| to display the values of @attributes withotu their attribute names .. class Foo(object): def __init__(self, bar, baz=None): self.bar = bar self.baz = baz __repr__ = prepr('bar', 'baz', address=False) foo = Foo('foobar') repr(foo) .. |<Foo:bar=`foobar`, baz=None>| """ def _format(obj, attribute): try: if keyless: val = getattr_in(obj, attribute) if val is not None: return repr(val) else: return '%s=%s' % (attribute, repr(getattr_in(obj, attribute))) except AttributeError: return None def prep(obj, address=address, full_name=full_name, pretty=pretty, keyless=keyless, **kwargs): if address: address = ":%s" % hex(id(obj)) else: address = "" data = list(filter(lambda x: x is not None, map(lambda a: _format(obj, a), attributes))) if data: data = ':%s' % ', '.join(data) else: data = '' return stdout_encode("<%s%s%s>" % (get_obj_name(obj), data, address)) return prep
[ "def", "preprX", "(", "*", "attributes", ",", "address", "=", "True", ",", "full_name", "=", "False", ",", "pretty", "=", "False", ",", "keyless", "=", "False", ",", "*", "*", "kwargs", ")", ":", "def", "_format", "(", "obj", ",", "attribute", ")", ":", "try", ":", "if", "keyless", ":", "val", "=", "getattr_in", "(", "obj", ",", "attribute", ")", "if", "val", "is", "not", "None", ":", "return", "repr", "(", "val", ")", "else", ":", "return", "'%s=%s'", "%", "(", "attribute", ",", "repr", "(", "getattr_in", "(", "obj", ",", "attribute", ")", ")", ")", "except", "AttributeError", ":", "return", "None", "def", "prep", "(", "obj", ",", "address", "=", "address", ",", "full_name", "=", "full_name", ",", "pretty", "=", "pretty", ",", "keyless", "=", "keyless", ",", "*", "*", "kwargs", ")", ":", "if", "address", ":", "address", "=", "\":%s\"", "%", "hex", "(", "id", "(", "obj", ")", ")", "else", ":", "address", "=", "\"\"", "data", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "map", "(", "lambda", "a", ":", "_format", "(", "obj", ",", "a", ")", ",", "attributes", ")", ")", ")", "if", "data", ":", "data", "=", "':%s'", "%", "', '", ".", "join", "(", "data", ")", "else", ":", "data", "=", "''", "return", "stdout_encode", "(", "\"<%s%s%s>\"", "%", "(", "get_obj_name", "(", "obj", ")", ",", "data", ",", "address", ")", ")", "return", "prep" ]
35.716981
0.000514
def __access(self, ts): """ Record an API access. """ with self.connection: self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)", (ts, self.domain))
[ "def", "__access", "(", "self", ",", "ts", ")", ":", "with", "self", ".", "connection", ":", "self", ".", "connection", ".", "execute", "(", "\"INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)\"", ",", "(", "ts", ",", "self", ".", "domain", ")", ")" ]
47
0.012552
def get_column(self, col): """ Loop over files getting the requested dataset values from each Parameters ---------- col : string Name of the dataset to be returned Returns ------- numpy array Values from the dataset, filtered if requested and concatenated in order of file list """ logging.info('getting %s' % col) vals = [] for f in self.files: d = FileData(f, group=self.group, columnlist=self.columns, filter_func=self.filter_func) vals.append(d.get_column(col)) # Close each file since h5py has an upper limit on the number of # open file objects (approx. 1000) d.close() logging.info('- got %i values' % sum(len(v) for v in vals)) return np.concatenate(vals)
[ "def", "get_column", "(", "self", ",", "col", ")", ":", "logging", ".", "info", "(", "'getting %s'", "%", "col", ")", "vals", "=", "[", "]", "for", "f", "in", "self", ".", "files", ":", "d", "=", "FileData", "(", "f", ",", "group", "=", "self", ".", "group", ",", "columnlist", "=", "self", ".", "columns", ",", "filter_func", "=", "self", ".", "filter_func", ")", "vals", ".", "append", "(", "d", ".", "get_column", "(", "col", ")", ")", "# Close each file since h5py has an upper limit on the number of", "# open file objects (approx. 1000)", "d", ".", "close", "(", ")", "logging", ".", "info", "(", "'- got %i values'", "%", "sum", "(", "len", "(", "v", ")", "for", "v", "in", "vals", ")", ")", "return", "np", ".", "concatenate", "(", "vals", ")" ]
33.576923
0.002227
def build_param_schema(schema, param_type): """Turn a swagger endpoint schema into an equivalent one to validate our request. As an example, this would take this swagger schema: { "paramType": "query", "name": "query", "description": "Location to query", "type": "string", "required": true } To this jsonschema: { "type": "object", "additionalProperties": "False", "properties:": { "description": "Location to query", "type": "string", "required": true } } Which we can then validate against a JSON object we construct from the pyramid request. """ properties = filter_params_by_type(schema, param_type) if not properties: return # Generate a jsonschema that describes the set of all query parameters. We # can then validate this against dict(request.params). return { 'type': 'object', 'properties': dict((p['name'], p) for p in properties), # Allow extra headers. Most HTTP requests will have headers which # are outside the scope of the spec (like `Host`, or `User-Agent`) 'additionalProperties': param_type == 'header', }
[ "def", "build_param_schema", "(", "schema", ",", "param_type", ")", ":", "properties", "=", "filter_params_by_type", "(", "schema", ",", "param_type", ")", "if", "not", "properties", ":", "return", "# Generate a jsonschema that describes the set of all query parameters. We", "# can then validate this against dict(request.params).", "return", "{", "'type'", ":", "'object'", ",", "'properties'", ":", "dict", "(", "(", "p", "[", "'name'", "]", ",", "p", ")", "for", "p", "in", "properties", ")", ",", "# Allow extra headers. Most HTTP requests will have headers which", "# are outside the scope of the spec (like `Host`, or `User-Agent`)", "'additionalProperties'", ":", "param_type", "==", "'header'", ",", "}" ]
33.631579
0.00076
def get_values_fix_params(self, exp, rep, tag, which='last', **kwargs): """ this function uses get_value(..) but returns all values where the subexperiments match the additional kwargs arguments. if alpha=1.0, beta=0.01 is given, then only those experiment values are returned, as a list. """ subexps = self.get_exps(exp) tagvalues = ['%s%s'%(k, convert_param_to_dirname(kwargs[k])) for k in kwargs] values = [self.get_value(se, rep, tag, which) for se in subexps if all(map(lambda tv: tv in se, tagvalues))] params = [self.get_params(se) for se in subexps if all(map(lambda tv: tv in se, tagvalues))] return values, params
[ "def", "get_values_fix_params", "(", "self", ",", "exp", ",", "rep", ",", "tag", ",", "which", "=", "'last'", ",", "*", "*", "kwargs", ")", ":", "subexps", "=", "self", ".", "get_exps", "(", "exp", ")", "tagvalues", "=", "[", "'%s%s'", "%", "(", "k", ",", "convert_param_to_dirname", "(", "kwargs", "[", "k", "]", ")", ")", "for", "k", "in", "kwargs", "]", "values", "=", "[", "self", ".", "get_value", "(", "se", ",", "rep", ",", "tag", ",", "which", ")", "for", "se", "in", "subexps", "if", "all", "(", "map", "(", "lambda", "tv", ":", "tv", "in", "se", ",", "tagvalues", ")", ")", "]", "params", "=", "[", "self", ".", "get_params", "(", "se", ")", "for", "se", "in", "subexps", "if", "all", "(", "map", "(", "lambda", "tv", ":", "tv", "in", "se", ",", "tagvalues", ")", ")", "]", "return", "values", ",", "params" ]
55.538462
0.012262
def is_taint_related_to_ip(self, simrun_addr, stmt_idx, taint_type, simrun_whitelist=None): """ Query in taint graph to check if a specific taint will taint the IP in the future or not. The taint is specified with the tuple (simrun_addr, stmt_idx, taint_type). :param simrun_addr: Address of the SimRun. :param stmt_idx: Statement ID. :param taint_type: Type of the taint, might be one of the following: 'reg', 'tmp', 'mem'. :param simrun_whitelist: A list of SimRun addresses that are whitelisted, i.e. the tainted exit will be ignored if it is in those SimRuns. :returns: True/False """ if simrun_whitelist is None: simrun_whitelist = set() if type(simrun_whitelist) is not set: simrun_whitelist = set(simrun_whitelist) # Find the specific taint in our graph taint = None for n in self.taint_graph.nodes(): if n.type == taint_type and n.addr == simrun_addr and n.stmt_id == stmt_idx: taint = n break if taint is None: raise AngrBackwardSlicingError('The specific taint is not found') bfs_tree = networkx.bfs_tree(self.taint_graph, taint) # A node is tainting the IP if one of the following criteria holds: # - a descendant tmp variable is used as a default exit or a conditional exit of its corresponding SimRun # - a descendant register is the IP itself for descendant in bfs_tree.nodes(): if descendant.type == 'exit': if descendant.addr not in simrun_whitelist: return True elif descendant.type == 'reg' and descendant.reg == self.project.arch.ip_offset: return True return False
[ "def", "is_taint_related_to_ip", "(", "self", ",", "simrun_addr", ",", "stmt_idx", ",", "taint_type", ",", "simrun_whitelist", "=", "None", ")", ":", "if", "simrun_whitelist", "is", "None", ":", "simrun_whitelist", "=", "set", "(", ")", "if", "type", "(", "simrun_whitelist", ")", "is", "not", "set", ":", "simrun_whitelist", "=", "set", "(", "simrun_whitelist", ")", "# Find the specific taint in our graph", "taint", "=", "None", "for", "n", "in", "self", ".", "taint_graph", ".", "nodes", "(", ")", ":", "if", "n", ".", "type", "==", "taint_type", "and", "n", ".", "addr", "==", "simrun_addr", "and", "n", ".", "stmt_id", "==", "stmt_idx", ":", "taint", "=", "n", "break", "if", "taint", "is", "None", ":", "raise", "AngrBackwardSlicingError", "(", "'The specific taint is not found'", ")", "bfs_tree", "=", "networkx", ".", "bfs_tree", "(", "self", ".", "taint_graph", ",", "taint", ")", "# A node is tainting the IP if one of the following criteria holds:", "# - a descendant tmp variable is used as a default exit or a conditional exit of its corresponding SimRun", "# - a descendant register is the IP itself", "for", "descendant", "in", "bfs_tree", ".", "nodes", "(", ")", ":", "if", "descendant", ".", "type", "==", "'exit'", ":", "if", "descendant", ".", "addr", "not", "in", "simrun_whitelist", ":", "return", "True", "elif", "descendant", ".", "type", "==", "'reg'", "and", "descendant", ".", "reg", "==", "self", ".", "project", ".", "arch", ".", "ip_offset", ":", "return", "True", "return", "False" ]
44.285714
0.00526
def header(self): """Return a header for type 1 MIDI file.""" tracks = a2b_hex('%04x' % len([t for t in self.tracks if t.track_data != ''])) return 'MThd\x00\x00\x00\x06\x00\x01' + tracks + self.time_division
[ "def", "header", "(", "self", ")", ":", "tracks", "=", "a2b_hex", "(", "'%04x'", "%", "len", "(", "[", "t", "for", "t", "in", "self", ".", "tracks", "if", "t", ".", "track_data", "!=", "''", "]", ")", ")", "return", "'MThd\\x00\\x00\\x00\\x06\\x00\\x01'", "+", "tracks", "+", "self", ".", "time_division" ]
48
0.012295
def parse_gpx(gpx_element, gpxns=None): """Parse a GPX file into a GpxModel. Args: gpx_element: The root <gpx> element of an XML document containing a version attribute. GPX versions 1.0 and 1.1 are supported. gpxns: The XML namespace for GPX in Clarke notation (i.e. delimited by curly braces). Returns: A GpxModel representing the data from the supplies xml. Raises: ValueError: The supplied XML could not be parsed as GPX. """ gpxns = gpxns if gpxns is not None else determine_gpx_namespace(gpx_element) if gpx_element.tag != gpxns+'gpx': raise ValueError("No gpx root element") version = gpx_element.attrib['version'] if version == '1.0': return parse_gpx_1_0(gpx_element, gpxns=gpxns) elif version == '1.1': return parse_gpx_1_1(gpx_element, gpxns=gpxns) else: raise ValueError("Cannot parse GPX version {0}".format(version))
[ "def", "parse_gpx", "(", "gpx_element", ",", "gpxns", "=", "None", ")", ":", "gpxns", "=", "gpxns", "if", "gpxns", "is", "not", "None", "else", "determine_gpx_namespace", "(", "gpx_element", ")", "if", "gpx_element", ".", "tag", "!=", "gpxns", "+", "'gpx'", ":", "raise", "ValueError", "(", "\"No gpx root element\"", ")", "version", "=", "gpx_element", ".", "attrib", "[", "'version'", "]", "if", "version", "==", "'1.0'", ":", "return", "parse_gpx_1_0", "(", "gpx_element", ",", "gpxns", "=", "gpxns", ")", "elif", "version", "==", "'1.1'", ":", "return", "parse_gpx_1_1", "(", "gpx_element", ",", "gpxns", "=", "gpxns", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot parse GPX version {0}\"", ".", "format", "(", "version", ")", ")" ]
32.482759
0.002062
def deprecated(function): # pylint: disable=invalid-name """Decorator to mark functions or methods as deprecated.""" def IssueDeprecationWarning(*args, **kwargs): """Issue a deprecation warning.""" warnings.simplefilter('default', DeprecationWarning) warnings.warn('Call to deprecated function: {0:s}.'.format( function.__name__), category=DeprecationWarning, stacklevel=2) return function(*args, **kwargs) IssueDeprecationWarning.__name__ = function.__name__ IssueDeprecationWarning.__doc__ = function.__doc__ IssueDeprecationWarning.__dict__.update(function.__dict__) return IssueDeprecationWarning
[ "def", "deprecated", "(", "function", ")", ":", "# pylint: disable=invalid-name", "def", "IssueDeprecationWarning", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Issue a deprecation warning.\"\"\"", "warnings", ".", "simplefilter", "(", "'default'", ",", "DeprecationWarning", ")", "warnings", ".", "warn", "(", "'Call to deprecated function: {0:s}.'", ".", "format", "(", "function", ".", "__name__", ")", ",", "category", "=", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "IssueDeprecationWarning", ".", "__name__", "=", "function", ".", "__name__", "IssueDeprecationWarning", ".", "__doc__", "=", "function", ".", "__doc__", "IssueDeprecationWarning", ".", "__dict__", ".", "update", "(", "function", ".", "__dict__", ")", "return", "IssueDeprecationWarning" ]
41.733333
0.010938
def dry_run_report(self): """ Returns text displaying the items that need to be uploaded or a message saying there are no files/folders to upload. :return: str: report text """ project_uploader = ProjectUploadDryRun() project_uploader.run(self.local_project) items = project_uploader.upload_items if not items: return "\n\nNo changes found. Nothing needs to be uploaded.\n\n" else: result = "\n\nFiles/Folders that need to be uploaded:\n" for item in items: result += "{}\n".format(item) result += "\n" return result
[ "def", "dry_run_report", "(", "self", ")", ":", "project_uploader", "=", "ProjectUploadDryRun", "(", ")", "project_uploader", ".", "run", "(", "self", ".", "local_project", ")", "items", "=", "project_uploader", ".", "upload_items", "if", "not", "items", ":", "return", "\"\\n\\nNo changes found. Nothing needs to be uploaded.\\n\\n\"", "else", ":", "result", "=", "\"\\n\\nFiles/Folders that need to be uploaded:\\n\"", "for", "item", "in", "items", ":", "result", "+=", "\"{}\\n\"", ".", "format", "(", "item", ")", "result", "+=", "\"\\n\"", "return", "result" ]
38.588235
0.004464
def generate_mesh( geo_object, verbose=True, dim=3, prune_vertices=True, prune_z_0=False, remove_faces=False, gmsh_path=None, extra_gmsh_arguments=None, # for debugging purposes: geo_filename=None, mesh_file_type="msh", ): """Return a meshio.Mesh, storing the mesh points, cells, and data, generated by Gmsh from the `geo_object`, written to a temporary file, and reread by `meshio`. Gmsh's native "msh" format is ill-suited to fast I/O. This can greatly reduce the performance of pygmsh. As alternatives, try `mesh_file_type=`: - "vtk"`, though Gmsh doesn't write the physical tags to VTK <https://gitlab.onelab.info/gmsh/gmsh/issues/389> or - `"mesh"`, though this only supports a few basic elements - "line", "triangle", "quad", "tetra", "hexahedron" - and doesn't preserve the `$PhysicalNames`, just the `int` tags. """ if extra_gmsh_arguments is None: extra_gmsh_arguments = [] # For format "mesh", ask Gmsh to save the physical tags # http://gmsh.info/doc/texinfo/gmsh.html#index-Mesh_002eSaveElementTagType if mesh_file_type == "mesh": extra_gmsh_arguments += ["-string", "Mesh.SaveElementTagType=2;"] preserve_geo = geo_filename is not None if geo_filename is None: with tempfile.NamedTemporaryFile(suffix=".geo") as f: geo_filename = f.name with open(geo_filename, "w") as f: f.write(geo_object.get_code()) # As of Gmsh 4.1.3, the mesh format options are # ``` # auto, msh1, msh2, msh3, msh4, msh, unv, vtk, wrl, mail, stl, p3d, mesh, bdf, cgns, # med, diff, ir3, inp, ply2, celum, su2, x3d, dat, neu, m, key # ``` # Pick the correct filename suffix. filename_suffix = "msh" if mesh_file_type[:3] == "msh" else mesh_file_type with tempfile.NamedTemporaryFile(suffix="." + filename_suffix) as handle: msh_filename = handle.name gmsh_executable = gmsh_path if gmsh_path is not None else _get_gmsh_exe() args = [ "-{}".format(dim), geo_filename, "-format", mesh_file_type, "-bin", "-o", msh_filename, ] + extra_gmsh_arguments # https://stackoverflow.com/a/803421/353337 p = subprocess.Popen( [gmsh_executable] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) if verbose: while True: line = p.stdout.readline() if not line: break print(line.decode("utf-8"), end="") p.communicate() assert p.returncode == 0, "Gmsh exited with error (return code {}).".format( p.returncode ) mesh = meshio.read(msh_filename) if remove_faces: # Only keep the cells of highest topological dimension; discard faces # and such. two_d_cells = set(["triangle", "quad"]) three_d_cells = set( ["tetra", "hexahedron", "wedge", "pyramid", "penta_prism", "hexa_prism"] ) if any(k in mesh.cells for k in three_d_cells): keep_keys = three_d_cells.intersection(mesh.cells.keys()) elif any(k in mesh.cells for k in two_d_cells): keep_keys = two_d_cells.intersection(mesh.cells.keys()) else: keep_keys = mesh.cells.keys() mesh.cells = {key: mesh.cells[key] for key in keep_keys} mesh.cell_data = {key: mesh.cell_data[key] for key in keep_keys} if prune_vertices: # Make sure to include only those vertices which belong to a cell. ncells = numpy.concatenate([numpy.concatenate(c) for c in mesh.cells.values()]) uvertices, uidx = numpy.unique(ncells, return_inverse=True) k = 0 for key in mesh.cells.keys(): n = numpy.prod(mesh.cells[key].shape) mesh.cells[key] = uidx[k : k + n].reshape(mesh.cells[key].shape) k += n mesh.points = mesh.points[uvertices] for key in mesh.point_data: mesh.point_data[key] = mesh.point_data[key][uvertices] # clean up os.remove(msh_filename) if preserve_geo: print("\ngeo file: {}".format(geo_filename)) else: os.remove(geo_filename) if ( prune_z_0 and mesh.points.shape[1] == 3 and numpy.all(numpy.abs(mesh.points[:, 2]) < 1.0e-13) ): mesh.points = mesh.points[:, :2] return mesh
[ "def", "generate_mesh", "(", "geo_object", ",", "verbose", "=", "True", ",", "dim", "=", "3", ",", "prune_vertices", "=", "True", ",", "prune_z_0", "=", "False", ",", "remove_faces", "=", "False", ",", "gmsh_path", "=", "None", ",", "extra_gmsh_arguments", "=", "None", ",", "# for debugging purposes:", "geo_filename", "=", "None", ",", "mesh_file_type", "=", "\"msh\"", ",", ")", ":", "if", "extra_gmsh_arguments", "is", "None", ":", "extra_gmsh_arguments", "=", "[", "]", "# For format \"mesh\", ask Gmsh to save the physical tags", "# http://gmsh.info/doc/texinfo/gmsh.html#index-Mesh_002eSaveElementTagType", "if", "mesh_file_type", "==", "\"mesh\"", ":", "extra_gmsh_arguments", "+=", "[", "\"-string\"", ",", "\"Mesh.SaveElementTagType=2;\"", "]", "preserve_geo", "=", "geo_filename", "is", "not", "None", "if", "geo_filename", "is", "None", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "\".geo\"", ")", "as", "f", ":", "geo_filename", "=", "f", ".", "name", "with", "open", "(", "geo_filename", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "geo_object", ".", "get_code", "(", ")", ")", "# As of Gmsh 4.1.3, the mesh format options are", "# ```", "# auto, msh1, msh2, msh3, msh4, msh, unv, vtk, wrl, mail, stl, p3d, mesh, bdf, cgns,", "# med, diff, ir3, inp, ply2, celum, su2, x3d, dat, neu, m, key", "# ```", "# Pick the correct filename suffix.", "filename_suffix", "=", "\"msh\"", "if", "mesh_file_type", "[", ":", "3", "]", "==", "\"msh\"", "else", "mesh_file_type", "with", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "\".\"", "+", "filename_suffix", ")", "as", "handle", ":", "msh_filename", "=", "handle", ".", "name", "gmsh_executable", "=", "gmsh_path", "if", "gmsh_path", "is", "not", "None", "else", "_get_gmsh_exe", "(", ")", "args", "=", "[", "\"-{}\"", ".", "format", "(", "dim", ")", ",", "geo_filename", ",", "\"-format\"", ",", "mesh_file_type", ",", "\"-bin\"", ",", "\"-o\"", ",", "msh_filename", ",", "]", "+", "extra_gmsh_arguments", "# https://stackoverflow.com/a/803421/353337", "p", "=", "subprocess", ".", "Popen", "(", "[", "gmsh_executable", "]", "+", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "if", "verbose", ":", "while", "True", ":", "line", "=", "p", ".", "stdout", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "print", "(", "line", ".", "decode", "(", "\"utf-8\"", ")", ",", "end", "=", "\"\"", ")", "p", ".", "communicate", "(", ")", "assert", "p", ".", "returncode", "==", "0", ",", "\"Gmsh exited with error (return code {}).\"", ".", "format", "(", "p", ".", "returncode", ")", "mesh", "=", "meshio", ".", "read", "(", "msh_filename", ")", "if", "remove_faces", ":", "# Only keep the cells of highest topological dimension; discard faces", "# and such.", "two_d_cells", "=", "set", "(", "[", "\"triangle\"", ",", "\"quad\"", "]", ")", "three_d_cells", "=", "set", "(", "[", "\"tetra\"", ",", "\"hexahedron\"", ",", "\"wedge\"", ",", "\"pyramid\"", ",", "\"penta_prism\"", ",", "\"hexa_prism\"", "]", ")", "if", "any", "(", "k", "in", "mesh", ".", "cells", "for", "k", "in", "three_d_cells", ")", ":", "keep_keys", "=", "three_d_cells", ".", "intersection", "(", "mesh", ".", "cells", ".", "keys", "(", ")", ")", "elif", "any", "(", "k", "in", "mesh", ".", "cells", "for", "k", "in", "two_d_cells", ")", ":", "keep_keys", "=", "two_d_cells", ".", "intersection", "(", "mesh", ".", "cells", ".", "keys", "(", ")", ")", "else", ":", "keep_keys", "=", "mesh", ".", "cells", ".", "keys", "(", ")", "mesh", ".", "cells", "=", "{", "key", ":", "mesh", ".", "cells", "[", "key", "]", "for", "key", "in", "keep_keys", "}", "mesh", ".", "cell_data", "=", "{", "key", ":", "mesh", ".", "cell_data", "[", "key", "]", "for", "key", "in", "keep_keys", "}", "if", "prune_vertices", ":", "# Make sure to include only those vertices which belong to a cell.", "ncells", "=", "numpy", ".", "concatenate", "(", "[", "numpy", ".", "concatenate", "(", "c", ")", "for", "c", "in", "mesh", ".", "cells", ".", "values", "(", ")", "]", ")", "uvertices", ",", "uidx", "=", "numpy", ".", "unique", "(", "ncells", ",", "return_inverse", "=", "True", ")", "k", "=", "0", "for", "key", "in", "mesh", ".", "cells", ".", "keys", "(", ")", ":", "n", "=", "numpy", ".", "prod", "(", "mesh", ".", "cells", "[", "key", "]", ".", "shape", ")", "mesh", ".", "cells", "[", "key", "]", "=", "uidx", "[", "k", ":", "k", "+", "n", "]", ".", "reshape", "(", "mesh", ".", "cells", "[", "key", "]", ".", "shape", ")", "k", "+=", "n", "mesh", ".", "points", "=", "mesh", ".", "points", "[", "uvertices", "]", "for", "key", "in", "mesh", ".", "point_data", ":", "mesh", ".", "point_data", "[", "key", "]", "=", "mesh", ".", "point_data", "[", "key", "]", "[", "uvertices", "]", "# clean up", "os", ".", "remove", "(", "msh_filename", ")", "if", "preserve_geo", ":", "print", "(", "\"\\ngeo file: {}\"", ".", "format", "(", "geo_filename", ")", ")", "else", ":", "os", ".", "remove", "(", "geo_filename", ")", "if", "(", "prune_z_0", "and", "mesh", ".", "points", ".", "shape", "[", "1", "]", "==", "3", "and", "numpy", ".", "all", "(", "numpy", ".", "abs", "(", "mesh", ".", "points", "[", ":", ",", "2", "]", ")", "<", "1.0e-13", ")", ")", ":", "mesh", ".", "points", "=", "mesh", ".", "points", "[", ":", ",", ":", "2", "]", "return", "mesh" ]
32.165414
0.001587
def QA_indicator_MFI(DataFrame, N=14): """ 资金指标 TYP := (HIGH + LOW + CLOSE)/3; V1:=SUM(IF(TYP>REF(TYP,1),TYP*VOL,0),N)/SUM(IF(TYP<REF(TYP,1),TYP*VOL,0),N); MFI:100-(100/(1+V1)); 赋值: (最高价 + 最低价 + 收盘价)/3 V1赋值:如果TYP>1日前的TYP,返回TYP*成交量(手),否则返回0的N日累和/如果TYP<1日前的TYP,返回TYP*成交量(手),否则返回0的N日累和 输出资金流量指标:100-(100/(1+V1)) """ C = DataFrame['close'] H = DataFrame['high'] L = DataFrame['low'] VOL = DataFrame['volume'] TYP = (C + H + L) / 3 V1 = SUM(IF(TYP > REF(TYP, 1), TYP * VOL, 0), N) / \ SUM(IF(TYP < REF(TYP, 1), TYP * VOL, 0), N) mfi = 100 - (100 / (1 + V1)) DICT = {'MFI': mfi} return pd.DataFrame(DICT)
[ "def", "QA_indicator_MFI", "(", "DataFrame", ",", "N", "=", "14", ")", ":", "C", "=", "DataFrame", "[", "'close'", "]", "H", "=", "DataFrame", "[", "'high'", "]", "L", "=", "DataFrame", "[", "'low'", "]", "VOL", "=", "DataFrame", "[", "'volume'", "]", "TYP", "=", "(", "C", "+", "H", "+", "L", ")", "/", "3", "V1", "=", "SUM", "(", "IF", "(", "TYP", ">", "REF", "(", "TYP", ",", "1", ")", ",", "TYP", "*", "VOL", ",", "0", ")", ",", "N", ")", "/", "SUM", "(", "IF", "(", "TYP", "<", "REF", "(", "TYP", ",", "1", ")", ",", "TYP", "*", "VOL", ",", "0", ")", ",", "N", ")", "mfi", "=", "100", "-", "(", "100", "/", "(", "1", "+", "V1", ")", ")", "DICT", "=", "{", "'MFI'", ":", "mfi", "}", "return", "pd", ".", "DataFrame", "(", "DICT", ")" ]
31.380952
0.001473
def set_world(self, grd, start_y_x, y_x): """ tell the agent to move to location y,x Why is there another grd object in the agent? Because this is NOT the main grid, rather a copy for the agent to overwrite with planning routes, etc. The real grid is initialised in World.__init__() class """ self.grd = grd self.start_y = start_y_x[0] self.start_x = start_y_x[1] self.current_y = start_y_x[0] self.current_x = start_y_x[1] self.target_y = y_x[0] self.target_x = y_x[1] self.backtrack = [0,0] # set only if blocked and agent needs to go back self.prefer_x = 0 # set only if backtracked as preferred direction x self.prefer_y = 0
[ "def", "set_world", "(", "self", ",", "grd", ",", "start_y_x", ",", "y_x", ")", ":", "self", ".", "grd", "=", "grd", "self", ".", "start_y", "=", "start_y_x", "[", "0", "]", "self", ".", "start_x", "=", "start_y_x", "[", "1", "]", "self", ".", "current_y", "=", "start_y_x", "[", "0", "]", "self", ".", "current_x", "=", "start_y_x", "[", "1", "]", "self", ".", "target_y", "=", "y_x", "[", "0", "]", "self", ".", "target_x", "=", "y_x", "[", "1", "]", "self", ".", "backtrack", "=", "[", "0", ",", "0", "]", "# set only if blocked and agent needs to go back", "self", ".", "prefer_x", "=", "0", "# set only if backtracked as preferred direction x", "self", ".", "prefer_y", "=", "0" ]
42.111111
0.009032
def ungrab_server(self, onerror = None): """Release the server if it was previously grabbed by this client.""" request.UngrabServer(display = self.display, onerror = onerror)
[ "def", "ungrab_server", "(", "self", ",", "onerror", "=", "None", ")", ":", "request", ".", "UngrabServer", "(", "display", "=", "self", ".", "display", ",", "onerror", "=", "onerror", ")" ]
54
0.03653
def show(*actors, **options # at=None, # shape=(1, 1), # N=None, # pos=(0, 0), # size="auto", # screensize="auto", # title="", # bg="blackboard", # bg2=None, # axes=4, # infinity=False, # verbose=True, # interactive=None, # offscreen=False, # resetcam=True, # zoom=None, # viewup="", # azimuth=0, # elevation=0, # roll=0, # interactorStyle=0, # newPlotter=False, # depthpeeling=False, # q=False, ): """ Create on the fly an instance of class ``Plotter`` and show the object(s) provided. Allowed input objects are: ``filename``, ``vtkPolyData``, ``vtkActor``, ``vtkActor2D``, ``vtkImageActor``, ``vtkAssembly`` or ``vtkVolume``. If filename is given, its type is guessed based on its extension. Supported formats are: `vtu, vts, vtp, ply, obj, stl, 3ds, xml, neutral, gmsh, pcd, xyz, txt, byu, tif, slc, vti, mhd, png, jpg`. :param bool newPlotter: if set to `True`, a call to ``show`` will instantiate a new ``Plotter`` object (a new window) instead of reusing the first created. See e.g.: |readVolumeAsIsoSurface.py|_ :return: the current ``Plotter`` class instance. .. note:: With multiple renderers, keyword ``at`` can become a `list`, e.g. .. code-block:: python from vtkplotter import * s = Sphere() c = Cube() p = Paraboloid() show(s, c, at=[0, 1], shape=(3,1)) show(p, at=2, interactive=True) # # is equivalent to: vp = Plotter(shape=(3,1)) s = Sphere() c = Cube() p = Paraboloid() vp.show(s, at=0) vp.show(p, at=1) vp.show(c, at=2, interactive=True) """ at = options.pop("at", None) shape = options.pop("shape", (1, 1)) N = options.pop("N", None) pos = options.pop("pos", (0, 0)) size = options.pop("size", "auto") screensize = options.pop("screensize", "auto") title = options.pop("title", "") bg = options.pop("bg", "blackboard") bg2 = options.pop("bg2", None) axes = options.pop("axes", 4) infinity = options.pop("infinity", False) verbose = options.pop("verbose", True) interactive = options.pop("interactive", None) offscreen = options.pop("offscreen", False) resetcam = options.pop("resetcam", True) zoom = options.pop("zoom", None) viewup = options.pop("viewup", "") azimuth = options.pop("azimuth", 0) elevation = options.pop("elevation", 0) roll = options.pop("roll", 0) interactorStyle = options.pop("interactorStyle", 0) newPlotter = options.pop("newPlotter", False) depthpeeling = options.pop("depthpeeling", False) q = options.pop("q", False) if len(actors) == 0: actors = None elif len(actors) == 1: actors = actors[0] else: actors = utils.flatten(actors) if settings.plotter_instance and newPlotter == False: vp = settings.plotter_instance else: if utils.isSequence(at): if not utils.isSequence(actors): colors.printc("~times show() Error: input must be a list.", c=1) exit() if len(at) != len(actors): colors.printc("~times show() Error: lists 'input' and 'at', must have equal lengths.", c=1) exit() if len(at) > 1 and (shape == (1, 1) and N == None): N = max(at) + 1 elif at is None and (N or shape != (1, 1)): if not utils.isSequence(actors): colors.printc('~times show() Error: N or shape is set, but input is not a sequence.', c=1) colors.printc(' you may need to specify e.g. at=0', c=1) exit() at = range(len(actors)) vp = Plotter( shape=shape, N=N, pos=pos, size=size, screensize=screensize, title=title, bg=bg, bg2=bg2, axes=axes, infinity=infinity, depthpeeling=depthpeeling, verbose=verbose, interactive=interactive, offscreen=offscreen, ) if utils.isSequence(at): for i, a in enumerate(actors): vp.show( a, at=i, zoom=zoom, resetcam=resetcam, viewup=viewup, azimuth=azimuth, elevation=elevation, roll=roll, interactive=interactive, interactorStyle=interactorStyle, q=q, ) vp.interactor.Start() else: vp.show( actors, at=at, zoom=zoom, resetcam=resetcam, viewup=viewup, azimuth=azimuth, elevation=elevation, roll=roll, interactive=interactive, interactorStyle=interactorStyle, q=q, ) return vp
[ "def", "show", "(", "*", "actors", ",", "*", "*", "options", "# at=None,", "# shape=(1, 1),", "# N=None,", "# pos=(0, 0),", "# size=\"auto\",", "# screensize=\"auto\",", "# title=\"\",", "# bg=\"blackboard\",", "# bg2=None,", "# axes=4,", "# infinity=False,", "# verbose=True,", "# interactive=None,", "# offscreen=False,", "# resetcam=True,", "# zoom=None,", "# viewup=\"\",", "# azimuth=0,", "# elevation=0,", "# roll=0,", "# interactorStyle=0,", "# newPlotter=False,", "# depthpeeling=False,", "# q=False,", ")", ":", "at", "=", "options", ".", "pop", "(", "\"at\"", ",", "None", ")", "shape", "=", "options", ".", "pop", "(", "\"shape\"", ",", "(", "1", ",", "1", ")", ")", "N", "=", "options", ".", "pop", "(", "\"N\"", ",", "None", ")", "pos", "=", "options", ".", "pop", "(", "\"pos\"", ",", "(", "0", ",", "0", ")", ")", "size", "=", "options", ".", "pop", "(", "\"size\"", ",", "\"auto\"", ")", "screensize", "=", "options", ".", "pop", "(", "\"screensize\"", ",", "\"auto\"", ")", "title", "=", "options", ".", "pop", "(", "\"title\"", ",", "\"\"", ")", "bg", "=", "options", ".", "pop", "(", "\"bg\"", ",", "\"blackboard\"", ")", "bg2", "=", "options", ".", "pop", "(", "\"bg2\"", ",", "None", ")", "axes", "=", "options", ".", "pop", "(", "\"axes\"", ",", "4", ")", "infinity", "=", "options", ".", "pop", "(", "\"infinity\"", ",", "False", ")", "verbose", "=", "options", ".", "pop", "(", "\"verbose\"", ",", "True", ")", "interactive", "=", "options", ".", "pop", "(", "\"interactive\"", ",", "None", ")", "offscreen", "=", "options", ".", "pop", "(", "\"offscreen\"", ",", "False", ")", "resetcam", "=", "options", ".", "pop", "(", "\"resetcam\"", ",", "True", ")", "zoom", "=", "options", ".", "pop", "(", "\"zoom\"", ",", "None", ")", "viewup", "=", "options", ".", "pop", "(", "\"viewup\"", ",", "\"\"", ")", "azimuth", "=", "options", ".", "pop", "(", "\"azimuth\"", ",", "0", ")", "elevation", "=", "options", ".", "pop", "(", "\"elevation\"", ",", "0", ")", "roll", "=", "options", ".", "pop", "(", "\"roll\"", ",", "0", ")", "interactorStyle", "=", "options", ".", "pop", "(", "\"interactorStyle\"", ",", "0", ")", "newPlotter", "=", "options", ".", "pop", "(", "\"newPlotter\"", ",", "False", ")", "depthpeeling", "=", "options", ".", "pop", "(", "\"depthpeeling\"", ",", "False", ")", "q", "=", "options", ".", "pop", "(", "\"q\"", ",", "False", ")", "if", "len", "(", "actors", ")", "==", "0", ":", "actors", "=", "None", "elif", "len", "(", "actors", ")", "==", "1", ":", "actors", "=", "actors", "[", "0", "]", "else", ":", "actors", "=", "utils", ".", "flatten", "(", "actors", ")", "if", "settings", ".", "plotter_instance", "and", "newPlotter", "==", "False", ":", "vp", "=", "settings", ".", "plotter_instance", "else", ":", "if", "utils", ".", "isSequence", "(", "at", ")", ":", "if", "not", "utils", ".", "isSequence", "(", "actors", ")", ":", "colors", ".", "printc", "(", "\"~times show() Error: input must be a list.\"", ",", "c", "=", "1", ")", "exit", "(", ")", "if", "len", "(", "at", ")", "!=", "len", "(", "actors", ")", ":", "colors", ".", "printc", "(", "\"~times show() Error: lists 'input' and 'at', must have equal lengths.\"", ",", "c", "=", "1", ")", "exit", "(", ")", "if", "len", "(", "at", ")", ">", "1", "and", "(", "shape", "==", "(", "1", ",", "1", ")", "and", "N", "==", "None", ")", ":", "N", "=", "max", "(", "at", ")", "+", "1", "elif", "at", "is", "None", "and", "(", "N", "or", "shape", "!=", "(", "1", ",", "1", ")", ")", ":", "if", "not", "utils", ".", "isSequence", "(", "actors", ")", ":", "colors", ".", "printc", "(", "'~times show() Error: N or shape is set, but input is not a sequence.'", ",", "c", "=", "1", ")", "colors", ".", "printc", "(", "' you may need to specify e.g. at=0'", ",", "c", "=", "1", ")", "exit", "(", ")", "at", "=", "range", "(", "len", "(", "actors", ")", ")", "vp", "=", "Plotter", "(", "shape", "=", "shape", ",", "N", "=", "N", ",", "pos", "=", "pos", ",", "size", "=", "size", ",", "screensize", "=", "screensize", ",", "title", "=", "title", ",", "bg", "=", "bg", ",", "bg2", "=", "bg2", ",", "axes", "=", "axes", ",", "infinity", "=", "infinity", ",", "depthpeeling", "=", "depthpeeling", ",", "verbose", "=", "verbose", ",", "interactive", "=", "interactive", ",", "offscreen", "=", "offscreen", ",", ")", "if", "utils", ".", "isSequence", "(", "at", ")", ":", "for", "i", ",", "a", "in", "enumerate", "(", "actors", ")", ":", "vp", ".", "show", "(", "a", ",", "at", "=", "i", ",", "zoom", "=", "zoom", ",", "resetcam", "=", "resetcam", ",", "viewup", "=", "viewup", ",", "azimuth", "=", "azimuth", ",", "elevation", "=", "elevation", ",", "roll", "=", "roll", ",", "interactive", "=", "interactive", ",", "interactorStyle", "=", "interactorStyle", ",", "q", "=", "q", ",", ")", "vp", ".", "interactor", ".", "Start", "(", ")", "else", ":", "vp", ".", "show", "(", "actors", ",", "at", "=", "at", ",", "zoom", "=", "zoom", ",", "resetcam", "=", "resetcam", ",", "viewup", "=", "viewup", ",", "azimuth", "=", "azimuth", ",", "elevation", "=", "elevation", ",", "roll", "=", "roll", ",", "interactive", "=", "interactive", ",", "interactorStyle", "=", "interactorStyle", ",", "q", "=", "q", ",", ")", "return", "vp" ]
30.475309
0.007454
def pkginfo_to_dict(path, distribution=None): """ Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict. The description is included under the key ['description'] rather than being written to a separate file. path: path to PKG-INFO file distribution: optional distutils Distribution() """ metadata = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) metadata["generator"] = "bdist_wheel (" + wheel.__version__ + ")" try: unicode pkg_info = read_pkg_info(path) except NameError: pkg_info = email.parser.Parser().parsestr(open(path, 'rb').read().decode('utf-8')) description = None if pkg_info['Summary']: metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary') del pkg_info['Summary'] if pkg_info['Description']: description = dedent_description(pkg_info) del pkg_info['Description'] else: payload = pkg_info.get_payload() if isinstance(payload, bytes): # Avoid a Python 2 Unicode error. # We still suffer ? glyphs on Python 3. payload = payload.decode('utf-8') if payload: description = payload if description: pkg_info['description'] = description for key in unique(k.lower() for k in pkg_info.keys()): low_key = key.replace('-', '_') if low_key in SKIP_FIELDS: continue if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN': continue if low_key in PLURAL_FIELDS: metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key) elif low_key == "requires_dist": handle_requires(metadata, pkg_info, key) elif low_key == 'provides_extra': if not 'extras' in metadata: metadata['extras'] = [] metadata['extras'].extend(pkg_info.get_all(key)) elif low_key == 'home_page': metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]} elif low_key == 'keywords': metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key]) else: metadata[low_key] = pkg_info[key] metadata['metadata_version'] = METADATA_VERSION if 'extras' in metadata: metadata['extras'] = sorted(set(metadata['extras'])) # include more information if distribution is available if distribution: for requires, attr in (('test_requires', 'tests_require'),): try: requirements = getattr(distribution, attr) if isinstance(requirements, list): new_requirements = list(convert_requirements(requirements)) metadata[requires] = [{'requires':new_requirements}] except AttributeError: pass # handle contacts contacts = [] for contact_type, role in CONTACT_FIELDS: contact = {} for key in contact_type: if contact_type[key] in metadata: contact[key] = metadata.pop(contact_type[key]) if contact: contact['role'] = role contacts.append(contact) if contacts: metadata['extensions']['python.details']['contacts'] = contacts # convert entry points to exports try: with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file: ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read()) exports = {} for group, items in ep_map.items(): exports[group] = {} for item in items.values(): name, export = str(item).split(' = ', 1) exports[group][name] = export if exports: metadata['extensions']['python.exports'] = exports except IOError: pass # copy console_scripts entry points to commands if 'python.exports' in metadata['extensions']: for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'), ('gui_scripts', 'wrap_gui')): if ep_script in metadata['extensions']['python.exports']: metadata['extensions']['python.commands'][wrap_script] = \ metadata['extensions']['python.exports'][ep_script] return metadata
[ "def", "pkginfo_to_dict", "(", "path", ",", "distribution", "=", "None", ")", ":", "metadata", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "dict", ")", ")", ")", "metadata", "[", "\"generator\"", "]", "=", "\"bdist_wheel (\"", "+", "wheel", ".", "__version__", "+", "\")\"", "try", ":", "unicode", "pkg_info", "=", "read_pkg_info", "(", "path", ")", "except", "NameError", ":", "pkg_info", "=", "email", ".", "parser", ".", "Parser", "(", ")", ".", "parsestr", "(", "open", "(", "path", ",", "'rb'", ")", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "description", "=", "None", "if", "pkg_info", "[", "'Summary'", "]", ":", "metadata", "[", "'summary'", "]", "=", "pkginfo_unicode", "(", "pkg_info", ",", "'Summary'", ")", "del", "pkg_info", "[", "'Summary'", "]", "if", "pkg_info", "[", "'Description'", "]", ":", "description", "=", "dedent_description", "(", "pkg_info", ")", "del", "pkg_info", "[", "'Description'", "]", "else", ":", "payload", "=", "pkg_info", ".", "get_payload", "(", ")", "if", "isinstance", "(", "payload", ",", "bytes", ")", ":", "# Avoid a Python 2 Unicode error.", "# We still suffer ? glyphs on Python 3.", "payload", "=", "payload", ".", "decode", "(", "'utf-8'", ")", "if", "payload", ":", "description", "=", "payload", "if", "description", ":", "pkg_info", "[", "'description'", "]", "=", "description", "for", "key", "in", "unique", "(", "k", ".", "lower", "(", ")", "for", "k", "in", "pkg_info", ".", "keys", "(", ")", ")", ":", "low_key", "=", "key", ".", "replace", "(", "'-'", ",", "'_'", ")", "if", "low_key", "in", "SKIP_FIELDS", ":", "continue", "if", "low_key", "in", "UNKNOWN_FIELDS", "and", "pkg_info", ".", "get", "(", "key", ")", "==", "'UNKNOWN'", ":", "continue", "if", "low_key", "in", "PLURAL_FIELDS", ":", "metadata", "[", "PLURAL_FIELDS", "[", "low_key", "]", "]", "=", "pkg_info", ".", "get_all", "(", "key", ")", "elif", "low_key", "==", "\"requires_dist\"", ":", "handle_requires", "(", "metadata", ",", "pkg_info", ",", "key", ")", "elif", "low_key", "==", "'provides_extra'", ":", "if", "not", "'extras'", "in", "metadata", ":", "metadata", "[", "'extras'", "]", "=", "[", "]", "metadata", "[", "'extras'", "]", ".", "extend", "(", "pkg_info", ".", "get_all", "(", "key", ")", ")", "elif", "low_key", "==", "'home_page'", ":", "metadata", "[", "'extensions'", "]", "[", "'python.details'", "]", "[", "'project_urls'", "]", "=", "{", "'Home'", ":", "pkg_info", "[", "key", "]", "}", "elif", "low_key", "==", "'keywords'", ":", "metadata", "[", "'keywords'", "]", "=", "KEYWORDS_RE", ".", "split", "(", "pkg_info", "[", "key", "]", ")", "else", ":", "metadata", "[", "low_key", "]", "=", "pkg_info", "[", "key", "]", "metadata", "[", "'metadata_version'", "]", "=", "METADATA_VERSION", "if", "'extras'", "in", "metadata", ":", "metadata", "[", "'extras'", "]", "=", "sorted", "(", "set", "(", "metadata", "[", "'extras'", "]", ")", ")", "# include more information if distribution is available", "if", "distribution", ":", "for", "requires", ",", "attr", "in", "(", "(", "'test_requires'", ",", "'tests_require'", ")", ",", ")", ":", "try", ":", "requirements", "=", "getattr", "(", "distribution", ",", "attr", ")", "if", "isinstance", "(", "requirements", ",", "list", ")", ":", "new_requirements", "=", "list", "(", "convert_requirements", "(", "requirements", ")", ")", "metadata", "[", "requires", "]", "=", "[", "{", "'requires'", ":", "new_requirements", "}", "]", "except", "AttributeError", ":", "pass", "# handle contacts", "contacts", "=", "[", "]", "for", "contact_type", ",", "role", "in", "CONTACT_FIELDS", ":", "contact", "=", "{", "}", "for", "key", "in", "contact_type", ":", "if", "contact_type", "[", "key", "]", "in", "metadata", ":", "contact", "[", "key", "]", "=", "metadata", ".", "pop", "(", "contact_type", "[", "key", "]", ")", "if", "contact", ":", "contact", "[", "'role'", "]", "=", "role", "contacts", ".", "append", "(", "contact", ")", "if", "contacts", ":", "metadata", "[", "'extensions'", "]", "[", "'python.details'", "]", "[", "'contacts'", "]", "=", "contacts", "# convert entry points to exports", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "\"entry_points.txt\"", ")", ",", "\"r\"", ")", "as", "ep_file", ":", "ep_map", "=", "pkg_resources", ".", "EntryPoint", ".", "parse_map", "(", "ep_file", ".", "read", "(", ")", ")", "exports", "=", "{", "}", "for", "group", ",", "items", "in", "ep_map", ".", "items", "(", ")", ":", "exports", "[", "group", "]", "=", "{", "}", "for", "item", "in", "items", ".", "values", "(", ")", ":", "name", ",", "export", "=", "str", "(", "item", ")", ".", "split", "(", "' = '", ",", "1", ")", "exports", "[", "group", "]", "[", "name", "]", "=", "export", "if", "exports", ":", "metadata", "[", "'extensions'", "]", "[", "'python.exports'", "]", "=", "exports", "except", "IOError", ":", "pass", "# copy console_scripts entry points to commands", "if", "'python.exports'", "in", "metadata", "[", "'extensions'", "]", ":", "for", "(", "ep_script", ",", "wrap_script", ")", "in", "(", "(", "'console_scripts'", ",", "'wrap_console'", ")", ",", "(", "'gui_scripts'", ",", "'wrap_gui'", ")", ")", ":", "if", "ep_script", "in", "metadata", "[", "'extensions'", "]", "[", "'python.exports'", "]", ":", "metadata", "[", "'extensions'", "]", "[", "'python.commands'", "]", "[", "wrap_script", "]", "=", "metadata", "[", "'extensions'", "]", "[", "'python.exports'", "]", "[", "ep_script", "]", "return", "metadata" ]
34.801653
0.001616
def libvlc_media_player_set_agl(p_mi, drawable): '''Set the agl handler where the media player should render its video output. @param p_mi: the Media Player. @param drawable: the agl handler. ''' f = _Cfunctions.get('libvlc_media_player_set_agl', None) or \ _Cfunction('libvlc_media_player_set_agl', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint32) return f(p_mi, drawable)
[ "def", "libvlc_media_player_set_agl", "(", "p_mi", ",", "drawable", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_media_player_set_agl'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_player_set_agl'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "MediaPlayer", ",", "ctypes", ".", "c_uint32", ")", "return", "f", "(", "p_mi", ",", "drawable", ")" ]
47.222222
0.006928
def pluck(record, *keys, **kwargs): """Return the record with the selected keys :param record: a list of dictionaries :param keys: some keys from the record :param kwargs: keywords determining how to deal with the keys >>> d = {'name': 'Lancelot', 'actor': 'John Cleese', 'color': 'blue'} >>> pluck(d, 'name', 'color') {'color': 'blue', 'name': 'Lancelot'} The keyword 'default' allows to replace a ``None`` value:: >>> d = {'year': 2014, 'movie': 'Bilbo'} >>> pluck(d, 'year', 'movie', 'nb_aliens', default=0) {'movie': 'Bilbo', 'nb_aliens': 0, 'year': 2014} """ default = kwargs.get('default', None) return reduce(lambda a, x: assoc(a, x, record.get(x, default)), keys, {})
[ "def", "pluck", "(", "record", ",", "*", "keys", ",", "*", "*", "kwargs", ")", ":", "default", "=", "kwargs", ".", "get", "(", "'default'", ",", "None", ")", "return", "reduce", "(", "lambda", "a", ",", "x", ":", "assoc", "(", "a", ",", "x", ",", "record", ".", "get", "(", "x", ",", "default", ")", ")", ",", "keys", ",", "{", "}", ")" ]
36.45
0.001337
def _SetHeader(self, values): """Set the row's header from a list.""" if self._values and len(values) != len(self._values): raise ValueError('Header values not equal to existing data width.') if not self._values: for _ in range(len(values)): self._values.append(None) self._keys = list(values) self._BuildIndex()
[ "def", "_SetHeader", "(", "self", ",", "values", ")", ":", "if", "self", ".", "_values", "and", "len", "(", "values", ")", "!=", "len", "(", "self", ".", "_values", ")", ":", "raise", "ValueError", "(", "'Header values not equal to existing data width.'", ")", "if", "not", "self", ".", "_values", ":", "for", "_", "in", "range", "(", "len", "(", "values", ")", ")", ":", "self", ".", "_values", ".", "append", "(", "None", ")", "self", ".", "_keys", "=", "list", "(", "values", ")", "self", ".", "_BuildIndex", "(", ")" ]
38.222222
0.008523
def get_contact(self, contact_id): """ Get single contact """ url = self.CONTACTS_ID_URL % contact_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_contact", "(", "self", ",", "contact_id", ")", ":", "url", "=", "self", ".", "CONTACTS_ID_URL", "%", "contact_id", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
25.9
0.007463
def store_rlzs_by_grp(dstore): """ Save in the datastore a composite array with fields (grp_id, gsim_id, rlzs) """ lst = [] assoc = dstore['csm_info'].get_rlzs_assoc() for grp, arr in assoc.by_grp().items(): for gsim_id, rlzs in enumerate(arr): lst.append((int(grp[4:]), gsim_id, rlzs)) dstore['csm_info/rlzs_by_grp'] = numpy.array(lst, rlzs_by_grp_dt)
[ "def", "store_rlzs_by_grp", "(", "dstore", ")", ":", "lst", "=", "[", "]", "assoc", "=", "dstore", "[", "'csm_info'", "]", ".", "get_rlzs_assoc", "(", ")", "for", "grp", ",", "arr", "in", "assoc", ".", "by_grp", "(", ")", ".", "items", "(", ")", ":", "for", "gsim_id", ",", "rlzs", "in", "enumerate", "(", "arr", ")", ":", "lst", ".", "append", "(", "(", "int", "(", "grp", "[", "4", ":", "]", ")", ",", "gsim_id", ",", "rlzs", ")", ")", "dstore", "[", "'csm_info/rlzs_by_grp'", "]", "=", "numpy", ".", "array", "(", "lst", ",", "rlzs_by_grp_dt", ")" ]
39.1
0.0025
def chk_fraction_digits_arg(s): """Checks if the string `s` is a valid fraction-digits argument. Return True or False.""" try: v = int(s) if v >= 1 and v <= 18: return True else: return False except ValueError: return False
[ "def", "chk_fraction_digits_arg", "(", "s", ")", ":", "try", ":", "v", "=", "int", "(", "s", ")", "if", "v", ">=", "1", "and", "v", "<=", "18", ":", "return", "True", "else", ":", "return", "False", "except", "ValueError", ":", "return", "False" ]
23.75
0.003378
def _sense_packet_to_data(packet): """Remove the escape pad bytes from a sense packet (\2\0 -> \2).""" data = bytearray(0) last = 0 i = 1 while (i < len(packet)): if not (last == 2 and packet[i] == 0): data.append(packet[i]) last = packet[i] i += 1 return data
[ "def", "_sense_packet_to_data", "(", "packet", ")", ":", "data", "=", "bytearray", "(", "0", ")", "last", "=", "0", "i", "=", "1", "while", "(", "i", "<", "len", "(", "packet", ")", ")", ":", "if", "not", "(", "last", "==", "2", "and", "packet", "[", "i", "]", "==", "0", ")", ":", "data", ".", "append", "(", "packet", "[", "i", "]", ")", "last", "=", "packet", "[", "i", "]", "i", "+=", "1", "return", "data" ]
29.166667
0.00554
def to_new(self, data, perplexities=None, return_distances=False): """Compute the affinities of new samples to the initial samples. This is necessary for embedding new data points into an existing embedding. Please see the :ref:`parameter-guide` for more information. Parameters ---------- data: np.ndarray The data points to be added to the existing embedding. perplexities: List[float] A list of perplexity values, which will be used in the multiscale Gaussian kernel. Perplexity can be thought of as the continuous :math:`k` number of nearest neighbors, for which t-SNE will attempt to preserve distances. return_distances: bool If needed, the function can return the indices of the nearest neighbors and their corresponding distances. Returns ------- P: array_like An :math:`N \\times M` affinity matrix expressing interactions between :math:`N` new data points the initial :math:`M` data samples. indices: np.ndarray Returned if ``return_distances=True``. The indices of the :math:`k` nearest neighbors in the existing embedding for every new data point. distances: np.ndarray Returned if ``return_distances=True``. The distances to the :math:`k` nearest neighbors in the existing embedding for every new data point. """ perplexities = perplexities if perplexities is not None else self.perplexities perplexities = self.check_perplexities(perplexities) max_perplexity = np.max(perplexities) k_neighbors = min(self.n_samples - 1, int(3 * max_perplexity)) neighbors, distances = self.knn_index.query(data, k_neighbors) P = self._calculate_P( neighbors, distances, perplexities, symmetrize=False, normalization="point-wise", n_reference_samples=self.n_samples, n_jobs=self.n_jobs, ) if return_distances: return P, neighbors, distances return P
[ "def", "to_new", "(", "self", ",", "data", ",", "perplexities", "=", "None", ",", "return_distances", "=", "False", ")", ":", "perplexities", "=", "perplexities", "if", "perplexities", "is", "not", "None", "else", "self", ".", "perplexities", "perplexities", "=", "self", ".", "check_perplexities", "(", "perplexities", ")", "max_perplexity", "=", "np", ".", "max", "(", "perplexities", ")", "k_neighbors", "=", "min", "(", "self", ".", "n_samples", "-", "1", ",", "int", "(", "3", "*", "max_perplexity", ")", ")", "neighbors", ",", "distances", "=", "self", ".", "knn_index", ".", "query", "(", "data", ",", "k_neighbors", ")", "P", "=", "self", ".", "_calculate_P", "(", "neighbors", ",", "distances", ",", "perplexities", ",", "symmetrize", "=", "False", ",", "normalization", "=", "\"point-wise\"", ",", "n_reference_samples", "=", "self", ".", "n_samples", ",", "n_jobs", "=", "self", ".", "n_jobs", ",", ")", "if", "return_distances", ":", "return", "P", ",", "neighbors", ",", "distances", "return", "P" ]
34.555556
0.00134
def script_args(f): """single decorator for adding script args""" args = [ magic_arguments.argument( '--out', type=str, help="""The variable in which to store stdout from the script. If the script is backgrounded, this will be the stdout *pipe*, instead of the stderr text itself. """ ), magic_arguments.argument( '--err', type=str, help="""The variable in which to store stderr from the script. If the script is backgrounded, this will be the stderr *pipe*, instead of the stderr text itself. """ ), magic_arguments.argument( '--bg', action="store_true", help="""Whether to run the script in the background. If given, the only way to see the output of the command is with --out/err. """ ), magic_arguments.argument( '--proc', type=str, help="""The variable in which to store Popen instance. This is used only when --bg option is given. """ ), ] for arg in args: f = arg(f) return f
[ "def", "script_args", "(", "f", ")", ":", "args", "=", "[", "magic_arguments", ".", "argument", "(", "'--out'", ",", "type", "=", "str", ",", "help", "=", "\"\"\"The variable in which to store stdout from the script.\n If the script is backgrounded, this will be the stdout *pipe*,\n instead of the stderr text itself.\n \"\"\"", ")", ",", "magic_arguments", ".", "argument", "(", "'--err'", ",", "type", "=", "str", ",", "help", "=", "\"\"\"The variable in which to store stderr from the script.\n If the script is backgrounded, this will be the stderr *pipe*,\n instead of the stderr text itself.\n \"\"\"", ")", ",", "magic_arguments", ".", "argument", "(", "'--bg'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"\"\"Whether to run the script in the background.\n If given, the only way to see the output of the command is\n with --out/err.\n \"\"\"", ")", ",", "magic_arguments", ".", "argument", "(", "'--proc'", ",", "type", "=", "str", ",", "help", "=", "\"\"\"The variable in which to store Popen instance.\n This is used only when --bg option is given.\n \"\"\"", ")", ",", "]", "for", "arg", "in", "args", ":", "f", "=", "arg", "(", "f", ")", "return", "f" ]
34.382353
0.000832
def generate_export_pipeline_code(pipeline_tree, operators): """Generate code specific to the construction of the sklearn Pipeline for export_pipeline. Parameters ---------- pipeline_tree: list List of operators in the current optimized pipeline Returns ------- Source code for the sklearn pipeline """ steps = _process_operator(pipeline_tree, operators) # number of steps in a pipeline num_step = len(steps) if num_step > 1: pipeline_text = "make_pipeline(\n{STEPS}\n)".format(STEPS=_indent(",\n".join(steps), 4)) # only one operator (root = True) else: pipeline_text = "{STEPS}".format(STEPS=_indent(",\n".join(steps), 0)) return pipeline_text
[ "def", "generate_export_pipeline_code", "(", "pipeline_tree", ",", "operators", ")", ":", "steps", "=", "_process_operator", "(", "pipeline_tree", ",", "operators", ")", "# number of steps in a pipeline", "num_step", "=", "len", "(", "steps", ")", "if", "num_step", ">", "1", ":", "pipeline_text", "=", "\"make_pipeline(\\n{STEPS}\\n)\"", ".", "format", "(", "STEPS", "=", "_indent", "(", "\",\\n\"", ".", "join", "(", "steps", ")", ",", "4", ")", ")", "# only one operator (root = True)", "else", ":", "pipeline_text", "=", "\"{STEPS}\"", ".", "format", "(", "STEPS", "=", "_indent", "(", "\",\\n\"", ".", "join", "(", "steps", ")", ",", "0", ")", ")", "return", "pipeline_text" ]
30.913043
0.004093
def extent_mode_content(): """Helper method that returns just the content in extent mode. This method was added so that the text could be reused in the wizard. :returns: A message object without brand element. :rtype: safe.messaging.message.Message """ message = m.Message() header = m.Heading(tr( 'Use intersection of hazard and exposure layers'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph(tr( 'The largest area that can be analysed is the intersection of the ' 'hazard and exposure layers you have added. To choose this option, ' 'click \'Use intersection of hazard and exposure layers\'. ')) message.add(paragraph) paragraph = m.Paragraph(tr( 'Sometimes it is more useful to analyse a smaller area. This could be ' 'to reduce processing time (smaller areas with process faster) or ' 'because information is only needed in a certain area (e.g. if a ' 'district only wants information for their district, not for the ' 'entire city). If you want to analyse a smaller area, there are a few ' 'different ways to do this.')) message.add(paragraph) header = m.Heading(tr( 'Use intersection of hazard, exposure and current view extent'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph(tr( 'If you wish to conduct the analysis on the area currently shown in ' 'the window, you can set the analysis area to \'Use intersection of ' 'hazard, exposure and current view extent\'. If the extents of the ' 'datasets are smaller than the view extent, the analysis area will be ' 'reduced to the extents of the datasets.')) message.add(paragraph) header = m.Heading(tr( 'Use intersection of hazard, exposure and this bookmark'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph(tr( 'You can also use one of your QGIS bookmarks to set the analysis ' 'area.'), m.ImportantText(tr( 'This option will be greyed out if you have no bookmarks.'))) message.add(paragraph) paragraph = m.Paragraph(tr( 'To create a bookmark, zoom to the area you want to create a bookmark ' 'for. When you are happy with the extent, click the \'New bookmark\' ' 'button in the QGIS toolbar.')) message.add(paragraph) paragraph = m.Paragraph(tr( 'The drop down menu in the InaSAFE Analysis Area window should now be ' 'activated. When you choose a bookmark from the drop down menu it ' 'will zoom to the analysis area selected by the bookmark.')) message.add(paragraph) header = m.Heading(tr( 'Use intersection of hazard, exposure and this bounding box'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph(tr( 'You can also choose the analysis area interactively by clicking ' '\'Use intersection of hazard, exposure and this bounding box\'. This ' 'will allow you to click \'Drag on map\' which will temporarily hide ' 'this window and allow you to drag a rectangle on the map. After you ' 'have finished dragging the rectangle, this window will reappear with ' 'values in the North, South, East and West boxes. If the extents of ' 'the datasets are smaller than the user defined analysis area, the ' 'analysis area will be reduced to the extents of the datasets.')) message.add(paragraph) paragraph = m.Paragraph(tr( 'Alternatively, you can enter the coordinates directly into the ' 'N/S/E/W boxes once the \'Use intersection of hazard, exposure and ' 'this bounding box\' option is selected (using the same coordinate ' 'reference system, or CRS, as the map is currently set).')) message.add(paragraph) return message
[ "def", "extent_mode_content", "(", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Use intersection of hazard and exposure layers'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'The largest area that can be analysed is the intersection of the '", "'hazard and exposure layers you have added. To choose this option, '", "'click \\'Use intersection of hazard and exposure layers\\'. '", ")", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'Sometimes it is more useful to analyse a smaller area. This could be '", "'to reduce processing time (smaller areas with process faster) or '", "'because information is only needed in a certain area (e.g. if a '", "'district only wants information for their district, not for the '", "'entire city). If you want to analyse a smaller area, there are a few '", "'different ways to do this.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Use intersection of hazard, exposure and current view extent'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'If you wish to conduct the analysis on the area currently shown in '", "'the window, you can set the analysis area to \\'Use intersection of '", "'hazard, exposure and current view extent\\'. If the extents of the '", "'datasets are smaller than the view extent, the analysis area will be '", "'reduced to the extents of the datasets.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Use intersection of hazard, exposure and this bookmark'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'You can also use one of your QGIS bookmarks to set the analysis '", "'area.'", ")", ",", "m", ".", "ImportantText", "(", "tr", "(", "'This option will be greyed out if you have no bookmarks.'", ")", ")", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'To create a bookmark, zoom to the area you want to create a bookmark '", "'for. When you are happy with the extent, click the \\'New bookmark\\' '", "'button in the QGIS toolbar.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'The drop down menu in the InaSAFE Analysis Area window should now be '", "'activated. When you choose a bookmark from the drop down menu it '", "'will zoom to the analysis area selected by the bookmark.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Use intersection of hazard, exposure and this bounding box'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'You can also choose the analysis area interactively by clicking '", "'\\'Use intersection of hazard, exposure and this bounding box\\'. This '", "'will allow you to click \\'Drag on map\\' which will temporarily hide '", "'this window and allow you to drag a rectangle on the map. After you '", "'have finished dragging the rectangle, this window will reappear with '", "'values in the North, South, East and West boxes. If the extents of '", "'the datasets are smaller than the user defined analysis area, the '", "'analysis area will be reduced to the extents of the datasets.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'Alternatively, you can enter the coordinates directly into the '", "'N/S/E/W boxes once the \\'Use intersection of hazard, exposure and '", "'this bounding box\\' option is selected (using the same coordinate '", "'reference system, or CRS, as the map is currently set).'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "return", "message" ]
48.392405
0.000256
def launchQueryForMode(self, query=None, mode=None): """ Method that launches an i3Browser to collect data. Args: ----- query: The query to be performed mode: The mode to be used to build the query. Return: ------- A string containing the recovered data or None. """ # Creating the query URL for that mode qURL = self.createURL(word=query, mode=mode) i3Browser = browser.Browser() try: # Check if it needs creds if self.needsCredentials[mode]: self._getAuthenticated(i3Browser, qURL) data = i3Browser.recoverURL(qURL) else: # Accessing the resources data = i3Browser.recoverURL(qURL) return data except KeyError: print(general.error("[*] '{}' is not a valid mode for this wrapper ({}).".format(mode, self.__class__.__name__))) return None
[ "def", "launchQueryForMode", "(", "self", ",", "query", "=", "None", ",", "mode", "=", "None", ")", ":", "# Creating the query URL for that mode", "qURL", "=", "self", ".", "createURL", "(", "word", "=", "query", ",", "mode", "=", "mode", ")", "i3Browser", "=", "browser", ".", "Browser", "(", ")", "try", ":", "# Check if it needs creds", "if", "self", ".", "needsCredentials", "[", "mode", "]", ":", "self", ".", "_getAuthenticated", "(", "i3Browser", ",", "qURL", ")", "data", "=", "i3Browser", ".", "recoverURL", "(", "qURL", ")", "else", ":", "# Accessing the resources", "data", "=", "i3Browser", ".", "recoverURL", "(", "qURL", ")", "return", "data", "except", "KeyError", ":", "print", "(", "general", ".", "error", "(", "\"[*] '{}' is not a valid mode for this wrapper ({}).\"", ".", "format", "(", "mode", ",", "self", ".", "__class__", ".", "__name__", ")", ")", ")", "return", "None" ]
32.566667
0.002982
def load_gmt(self, gene_list, gmt): """load gene set dict""" if isinstance(gmt, dict): genesets_dict = gmt elif isinstance(gmt, str): genesets_dict = self.parse_gmt(gmt) else: raise Exception("Error parsing gmt parameter for gene sets") subsets = list(genesets_dict.keys()) self.n_genesets = len(subsets) for subset in subsets: subset_list = genesets_dict.get(subset) if isinstance(subset_list, set): subset_list = list(subset_list) genesets_dict[subset] = subset_list tag_indicator = np.in1d(gene_list, subset_list, assume_unique=True) tag_len = tag_indicator.sum() if self.min_size <= tag_len <= self.max_size: continue del genesets_dict[subset] filsets_num = len(subsets) - len(genesets_dict) self._logger.info("%04d gene_sets have been filtered out when max_size=%s and min_size=%s"%(filsets_num, self.max_size, self.min_size)) if filsets_num == len(subsets): self._logger.error("No gene sets passed through filtering condition!!!, try new parameters again!\n" +\ "Note: check gene name, gmt file format, or filtering size." ) sys.exit(0) self._gmtdct=genesets_dict return genesets_dict
[ "def", "load_gmt", "(", "self", ",", "gene_list", ",", "gmt", ")", ":", "if", "isinstance", "(", "gmt", ",", "dict", ")", ":", "genesets_dict", "=", "gmt", "elif", "isinstance", "(", "gmt", ",", "str", ")", ":", "genesets_dict", "=", "self", ".", "parse_gmt", "(", "gmt", ")", "else", ":", "raise", "Exception", "(", "\"Error parsing gmt parameter for gene sets\"", ")", "subsets", "=", "list", "(", "genesets_dict", ".", "keys", "(", ")", ")", "self", ".", "n_genesets", "=", "len", "(", "subsets", ")", "for", "subset", "in", "subsets", ":", "subset_list", "=", "genesets_dict", ".", "get", "(", "subset", ")", "if", "isinstance", "(", "subset_list", ",", "set", ")", ":", "subset_list", "=", "list", "(", "subset_list", ")", "genesets_dict", "[", "subset", "]", "=", "subset_list", "tag_indicator", "=", "np", ".", "in1d", "(", "gene_list", ",", "subset_list", ",", "assume_unique", "=", "True", ")", "tag_len", "=", "tag_indicator", ".", "sum", "(", ")", "if", "self", ".", "min_size", "<=", "tag_len", "<=", "self", ".", "max_size", ":", "continue", "del", "genesets_dict", "[", "subset", "]", "filsets_num", "=", "len", "(", "subsets", ")", "-", "len", "(", "genesets_dict", ")", "self", ".", "_logger", ".", "info", "(", "\"%04d gene_sets have been filtered out when max_size=%s and min_size=%s\"", "%", "(", "filsets_num", ",", "self", ".", "max_size", ",", "self", ".", "min_size", ")", ")", "if", "filsets_num", "==", "len", "(", "subsets", ")", ":", "self", ".", "_logger", ".", "error", "(", "\"No gene sets passed through filtering condition!!!, try new parameters again!\\n\"", "+", "\"Note: check gene name, gmt file format, or filtering size.\"", ")", "sys", ".", "exit", "(", "0", ")", "self", ".", "_gmtdct", "=", "genesets_dict", "return", "genesets_dict" ]
42.625
0.008602
def add_experiment(self, id, port, time, file_name, platform): '''set {key:value} paris to self.experiment''' self.experiments[id] = {} self.experiments[id]['port'] = port self.experiments[id]['startTime'] = time self.experiments[id]['endTime'] = 'N/A' self.experiments[id]['status'] = 'INITIALIZED' self.experiments[id]['fileName'] = file_name self.experiments[id]['platform'] = platform self.write_file()
[ "def", "add_experiment", "(", "self", ",", "id", ",", "port", ",", "time", ",", "file_name", ",", "platform", ")", ":", "self", ".", "experiments", "[", "id", "]", "=", "{", "}", "self", ".", "experiments", "[", "id", "]", "[", "'port'", "]", "=", "port", "self", ".", "experiments", "[", "id", "]", "[", "'startTime'", "]", "=", "time", "self", ".", "experiments", "[", "id", "]", "[", "'endTime'", "]", "=", "'N/A'", "self", ".", "experiments", "[", "id", "]", "[", "'status'", "]", "=", "'INITIALIZED'", "self", ".", "experiments", "[", "id", "]", "[", "'fileName'", "]", "=", "file_name", "self", ".", "experiments", "[", "id", "]", "[", "'platform'", "]", "=", "platform", "self", ".", "write_file", "(", ")" ]
46.9
0.004184
def create(cls, name, domain_settings_ref=None, external_distance=110, inter_distance=110, intra_distance=110, redistribution_entry=None, default_metric=None, comment=None): """ Create an OSPF Profile. If providing a list of redistribution entries, provide in the following dict format: {'enabled': boolean, 'metric_type': 'external_1' or 'external_2', 'metric': 2, 'type': 'kernel'} Valid types for redistribution entries are: kernel, static, connected, bgp, and default_originate. You can also provide a 'filter' key with either an IPAccessList or RouteMap element to use for further access control on the redistributed route type. If metric_type is not provided, external_1 (E1) will be used. An example of a redistribution_entry would be:: {u'enabled': True, u'metric': 123, u'metric_type': u'external_2', u'filter': RouteMap('myroutemap'), u'type': u'static'} :param str name: name of profile :param str,OSPFDomainSetting domain_settings_ref: OSPFDomainSetting element or href :param int external_distance: route metric (E1-E2) :param int inter_distance: routes learned from different areas (O IA) :param int intra_distance: routes learned from same area (O) :param list redistribution_entry: how to redistribute the OSPF routes. :raises CreateElementFailed: create failed with reason :rtype: OSPFProfile """ json = {'name': name, 'external_distance': external_distance, 'inter_distance': inter_distance, 'intra_distance': intra_distance, 'default_metric': default_metric, 'comment': comment} if redistribution_entry: json.update(redistribution_entry= _format_redist_entry(redistribution_entry)) domain_settings_ref = element_resolver(domain_settings_ref) or \ OSPFDomainSetting('Default OSPFv2 Domain Settings').href json.update(domain_settings_ref=domain_settings_ref) return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "domain_settings_ref", "=", "None", ",", "external_distance", "=", "110", ",", "inter_distance", "=", "110", ",", "intra_distance", "=", "110", ",", "redistribution_entry", "=", "None", ",", "default_metric", "=", "None", ",", "comment", "=", "None", ")", ":", "json", "=", "{", "'name'", ":", "name", ",", "'external_distance'", ":", "external_distance", ",", "'inter_distance'", ":", "inter_distance", ",", "'intra_distance'", ":", "intra_distance", ",", "'default_metric'", ":", "default_metric", ",", "'comment'", ":", "comment", "}", "if", "redistribution_entry", ":", "json", ".", "update", "(", "redistribution_entry", "=", "_format_redist_entry", "(", "redistribution_entry", ")", ")", "domain_settings_ref", "=", "element_resolver", "(", "domain_settings_ref", ")", "or", "OSPFDomainSetting", "(", "'Default OSPFv2 Domain Settings'", ")", ".", "href", "json", ".", "update", "(", "domain_settings_ref", "=", "domain_settings_ref", ")", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
41.851852
0.008647
def clusters(points, radius): """ Find clusters of points which have neighbours closer than radius Parameters --------- points : (n, d) float Points of dimension d radius : float Max distance between points in a cluster Returns ---------- groups : (m,) sequence of int Indices of points in a cluster """ from . import graph tree = cKDTree(points) # some versions return pairs as a set of tuples pairs = tree.query_pairs(r=radius, output_type='ndarray') # group connected components groups = graph.connected_components(pairs) return groups
[ "def", "clusters", "(", "points", ",", "radius", ")", ":", "from", ".", "import", "graph", "tree", "=", "cKDTree", "(", "points", ")", "# some versions return pairs as a set of tuples", "pairs", "=", "tree", ".", "query_pairs", "(", "r", "=", "radius", ",", "output_type", "=", "'ndarray'", ")", "# group connected components", "groups", "=", "graph", ".", "connected_components", "(", "pairs", ")", "return", "groups" ]
23.461538
0.001575
def prepend_name_prefix(func): """ Decorator that wraps instance methods to prepend the instance's filename prefix to the beginning of the referenced filename. Must only be used on instance methods where the first parameter after `self` is `name` or a comparable parameter of a different name. """ @wraps(func) def prepend_prefix(self, name, *args, **kwargs): name = self.name_prefix + name return func(self, name, *args, **kwargs) return prepend_prefix
[ "def", "prepend_name_prefix", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "prepend_prefix", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "name", "=", "self", ".", "name_prefix", "+", "name", "return", "func", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "prepend_prefix" ]
41.166667
0.00198
def relayIndextoCoord(self, i): """ Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate """ x = i % self.relayWidth y = i / self.relayWidth return x, y
[ "def", "relayIndextoCoord", "(", "self", ",", "i", ")", ":", "x", "=", "i", "%", "self", ".", "relayWidth", "y", "=", "i", "/", "self", ".", "relayWidth", "return", "x", ",", "y" ]
20.454545
0.004255
def get_joined_filters(self, filters): """ Creates a new filters class with active filters joined """ retfilters = Filters(self.filter_converter, self.datamodel) retfilters.filters = self.filters + filters.filters retfilters.values = self.values + filters.values return retfilters
[ "def", "get_joined_filters", "(", "self", ",", "filters", ")", ":", "retfilters", "=", "Filters", "(", "self", ".", "filter_converter", ",", "self", ".", "datamodel", ")", "retfilters", ".", "filters", "=", "self", ".", "filters", "+", "filters", ".", "filters", "retfilters", ".", "values", "=", "self", ".", "values", "+", "filters", ".", "values", "return", "retfilters" ]
41.625
0.005882
def expand_config(d, dirs): """ Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside. """ context = { 'user_cache_dir': dirs.user_cache_dir, 'user_config_dir': dirs.user_config_dir, 'user_data_dir': dirs.user_data_dir, 'user_log_dir': dirs.user_log_dir, 'site_config_dir': dirs.site_config_dir, 'site_data_dir': dirs.site_data_dir, } for k, v in d.items(): if isinstance(v, dict): expand_config(v, dirs) if isinstance(v, string_types): d[k] = os.path.expanduser(os.path.expandvars(d[k])) d[k] = d[k].format(**context)
[ "def", "expand_config", "(", "d", ",", "dirs", ")", ":", "context", "=", "{", "'user_cache_dir'", ":", "dirs", ".", "user_cache_dir", ",", "'user_config_dir'", ":", "dirs", ".", "user_config_dir", ",", "'user_data_dir'", ":", "dirs", ".", "user_data_dir", ",", "'user_log_dir'", ":", "dirs", ".", "user_log_dir", ",", "'site_config_dir'", ":", "dirs", ".", "site_config_dir", ",", "'site_data_dir'", ":", "dirs", ".", "site_data_dir", ",", "}", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "expand_config", "(", "v", ",", "dirs", ")", "if", "isinstance", "(", "v", ",", "string_types", ")", ":", "d", "[", "k", "]", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "expandvars", "(", "d", "[", "k", "]", ")", ")", "d", "[", "k", "]", "=", "d", "[", "k", "]", ".", "format", "(", "*", "*", "context", ")" ]
29.659574
0.001389
def spline(x,knots,p,i=0.0): """Evaluates the ith spline basis given by knots on points in x""" assert(p+1<len(knots)) return np.array([N(float(u),float(i),float(p),knots) for u in x])
[ "def", "spline", "(", "x", ",", "knots", ",", "p", ",", "i", "=", "0.0", ")", ":", "assert", "(", "p", "+", "1", "<", "len", "(", "knots", ")", ")", "return", "np", ".", "array", "(", "[", "N", "(", "float", "(", "u", ")", ",", "float", "(", "i", ")", ",", "float", "(", "p", ")", ",", "knots", ")", "for", "u", "in", "x", "]", ")" ]
48.25
0.040816
def get_properties_by_type(self, type, recursive=True, parent_path=""): """ Returns a sorted list of fields that match the type. :param type the type of the field "string","integer" or a list of types :param recursive recurse to sub object :returns a sorted list of fields the match the type """ if parent_path: parent_path += "." if isinstance(type, str): if type == "*": type = set(MAPPING_NAME_TYPE.keys()) - set(["nested", "multi_field", "multifield"]) else: type = [type] properties = [] for prop in list(self.properties.values()): if prop.type in type: properties.append((parent_path + prop.name, prop)) continue elif prop.type == "multi_field" and prop.name in prop.fields and prop.fields[prop.name].type in type: properties.append((parent_path + prop.name, prop)) continue if not recursive: continue if prop.type in ["nested", "object"]: properties.extend( prop.get_properties_by_type(type, recursive=recursive, parent_path=parent_path + prop.name)) return sorted(properties)
[ "def", "get_properties_by_type", "(", "self", ",", "type", ",", "recursive", "=", "True", ",", "parent_path", "=", "\"\"", ")", ":", "if", "parent_path", ":", "parent_path", "+=", "\".\"", "if", "isinstance", "(", "type", ",", "str", ")", ":", "if", "type", "==", "\"*\"", ":", "type", "=", "set", "(", "MAPPING_NAME_TYPE", ".", "keys", "(", ")", ")", "-", "set", "(", "[", "\"nested\"", ",", "\"multi_field\"", ",", "\"multifield\"", "]", ")", "else", ":", "type", "=", "[", "type", "]", "properties", "=", "[", "]", "for", "prop", "in", "list", "(", "self", ".", "properties", ".", "values", "(", ")", ")", ":", "if", "prop", ".", "type", "in", "type", ":", "properties", ".", "append", "(", "(", "parent_path", "+", "prop", ".", "name", ",", "prop", ")", ")", "continue", "elif", "prop", ".", "type", "==", "\"multi_field\"", "and", "prop", ".", "name", "in", "prop", ".", "fields", "and", "prop", ".", "fields", "[", "prop", ".", "name", "]", ".", "type", "in", "type", ":", "properties", ".", "append", "(", "(", "parent_path", "+", "prop", ".", "name", ",", "prop", ")", ")", "continue", "if", "not", "recursive", ":", "continue", "if", "prop", ".", "type", "in", "[", "\"nested\"", ",", "\"object\"", "]", ":", "properties", ".", "extend", "(", "prop", ".", "get_properties_by_type", "(", "type", ",", "recursive", "=", "recursive", ",", "parent_path", "=", "parent_path", "+", "prop", ".", "name", ")", ")", "return", "sorted", "(", "properties", ")" ]
39.875
0.003826
def set_title(self, msg): """ Set first header line text """ self.s.move(0, 0) self.overwrite_line(msg, curses.A_REVERSE)
[ "def", "set_title", "(", "self", ",", "msg", ")", ":", "self", ".", "s", ".", "move", "(", "0", ",", "0", ")", "self", ".", "overwrite_line", "(", "msg", ",", "curses", ".", "A_REVERSE", ")" ]
35.5
0.013793
def greenlet_manage(self): """ This greenlet always runs in background to update current status in MongoDB every N seconds. """ while True: try: self.manage() except Exception as e: # pylint: disable=broad-except log.error("When reporting: %s" % e) finally: time.sleep(self.config["report_interval"])
[ "def", "greenlet_manage", "(", "self", ")", ":", "while", "True", ":", "try", ":", "self", ".", "manage", "(", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "log", ".", "error", "(", "\"When reporting: %s\"", "%", "e", ")", "finally", ":", "time", ".", "sleep", "(", "self", ".", "config", "[", "\"report_interval\"", "]", ")" ]
34.333333
0.004728
def _query_from_term(self, term, field_name, filter_type, is_not): """ Uses arguments to construct a list of xapian.Query's. """ if field_name != 'content' and field_name not in self.backend.column: raise InvalidIndexError('field "%s" not indexed' % field_name) # It it is an AutoQuery, it has no filters # or others, thus we short-circuit the procedure. if isinstance(term, AutoQuery): if field_name != 'content': query = '%s:%s' % (field_name, term.prepare(self)) else: query = term.prepare(self) return [self.backend.parse_query(query)] query_list = [] # Handle `ValuesListQuerySet`. if hasattr(term, 'values_list'): term = list(term) if field_name == 'content': # content is the generic search: # force no field_name search # and the field_type to be 'text'. field_name = None field_type = 'text' # we don't know what is the type(term), so we parse it. # Ideally this would not be required, but # some filters currently depend on the term to make decisions. term = _to_xapian_term(term) query_list.append(self._filter_contains(term, field_name, field_type, is_not)) # when filter has no filter_type, haystack uses # filter_type = 'content'. Here we remove it # since the above query is already doing this if filter_type == 'content': filter_type = None else: # get the field_type from the backend field_type = self.backend.schema[self.backend.column[field_name]]['type'] # private fields don't accept 'contains' or 'startswith' # since they have no meaning. if filter_type in ('contains', 'startswith') and field_name in (ID, DJANGO_ID, DJANGO_CT): filter_type = 'exact' if field_type == 'text': # we don't know what type "term" is, but we know we are searching as text # so we parse it like that. # Ideally this would not be required since _term_query does it, but # some filters currently depend on the term to make decisions. if isinstance(term, list): term = [_to_xapian_term(term) for term in term] else: term = _to_xapian_term(term) # todo: we should check that the filter is valid for this field_type or raise InvalidIndexError if filter_type == 'contains': query_list.append(self._filter_contains(term, field_name, field_type, is_not)) elif filter_type in ('content', 'exact'): query_list.append(self._filter_exact(term, field_name, field_type, is_not)) elif filter_type == 'in': query_list.append(self._filter_in(term, field_name, field_type, is_not)) elif filter_type == 'startswith': query_list.append(self._filter_startswith(term, field_name, field_type, is_not)) elif filter_type == 'endswith': raise NotImplementedError("The Xapian search backend doesn't support endswith queries.") elif filter_type == 'gt': query_list.append(self._filter_gt(term, field_name, field_type, is_not)) elif filter_type == 'gte': query_list.append(self._filter_gte(term, field_name, field_type, is_not)) elif filter_type == 'lt': query_list.append(self._filter_lt(term, field_name, field_type, is_not)) elif filter_type == 'lte': query_list.append(self._filter_lte(term, field_name, field_type, is_not)) elif filter_type == 'range': query_list.append(self._filter_range(term, field_name, field_type, is_not)) return query_list
[ "def", "_query_from_term", "(", "self", ",", "term", ",", "field_name", ",", "filter_type", ",", "is_not", ")", ":", "if", "field_name", "!=", "'content'", "and", "field_name", "not", "in", "self", ".", "backend", ".", "column", ":", "raise", "InvalidIndexError", "(", "'field \"%s\" not indexed'", "%", "field_name", ")", "# It it is an AutoQuery, it has no filters", "# or others, thus we short-circuit the procedure.", "if", "isinstance", "(", "term", ",", "AutoQuery", ")", ":", "if", "field_name", "!=", "'content'", ":", "query", "=", "'%s:%s'", "%", "(", "field_name", ",", "term", ".", "prepare", "(", "self", ")", ")", "else", ":", "query", "=", "term", ".", "prepare", "(", "self", ")", "return", "[", "self", ".", "backend", ".", "parse_query", "(", "query", ")", "]", "query_list", "=", "[", "]", "# Handle `ValuesListQuerySet`.", "if", "hasattr", "(", "term", ",", "'values_list'", ")", ":", "term", "=", "list", "(", "term", ")", "if", "field_name", "==", "'content'", ":", "# content is the generic search:", "# force no field_name search", "# and the field_type to be 'text'.", "field_name", "=", "None", "field_type", "=", "'text'", "# we don't know what is the type(term), so we parse it.", "# Ideally this would not be required, but", "# some filters currently depend on the term to make decisions.", "term", "=", "_to_xapian_term", "(", "term", ")", "query_list", ".", "append", "(", "self", ".", "_filter_contains", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "# when filter has no filter_type, haystack uses", "# filter_type = 'content'. Here we remove it", "# since the above query is already doing this", "if", "filter_type", "==", "'content'", ":", "filter_type", "=", "None", "else", ":", "# get the field_type from the backend", "field_type", "=", "self", ".", "backend", ".", "schema", "[", "self", ".", "backend", ".", "column", "[", "field_name", "]", "]", "[", "'type'", "]", "# private fields don't accept 'contains' or 'startswith'", "# since they have no meaning.", "if", "filter_type", "in", "(", "'contains'", ",", "'startswith'", ")", "and", "field_name", "in", "(", "ID", ",", "DJANGO_ID", ",", "DJANGO_CT", ")", ":", "filter_type", "=", "'exact'", "if", "field_type", "==", "'text'", ":", "# we don't know what type \"term\" is, but we know we are searching as text", "# so we parse it like that.", "# Ideally this would not be required since _term_query does it, but", "# some filters currently depend on the term to make decisions.", "if", "isinstance", "(", "term", ",", "list", ")", ":", "term", "=", "[", "_to_xapian_term", "(", "term", ")", "for", "term", "in", "term", "]", "else", ":", "term", "=", "_to_xapian_term", "(", "term", ")", "# todo: we should check that the filter is valid for this field_type or raise InvalidIndexError", "if", "filter_type", "==", "'contains'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_contains", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "in", "(", "'content'", ",", "'exact'", ")", ":", "query_list", ".", "append", "(", "self", ".", "_filter_exact", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "==", "'in'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_in", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "==", "'startswith'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_startswith", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "==", "'endswith'", ":", "raise", "NotImplementedError", "(", "\"The Xapian search backend doesn't support endswith queries.\"", ")", "elif", "filter_type", "==", "'gt'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_gt", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "==", "'gte'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_gte", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "==", "'lt'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_lt", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "==", "'lte'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_lte", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "elif", "filter_type", "==", "'range'", ":", "query_list", ".", "append", "(", "self", ".", "_filter_range", "(", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ")", "return", "query_list" ]
47.65
0.004369