text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def get_fastq_dir(fc_dir): """Retrieve the fastq directory within Solexa flowcell output. """ full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*")) bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*")) machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return os.path.join(machine_bc, "fastq") elif len(full_goat_bc) > 0: return os.path.join(full_goat_bc[0], "fastq") elif len(bustard_bc) > 0: return os.path.join(bustard_bc[0], "fastq") # otherwise assume we are in the fastq directory # XXX What other cases can we end up with here? else: return fc_dir
[ "def", "get_fastq_dir", "(", "fc_dir", ")", ":", "full_goat_bc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"*Firecrest*\"", ",", "\"Bustard*\"", ")", ")", "bustard_bc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"*Bustard*\"", ")", ")", "machine_bc", "=", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"BaseCalls\"", ")", "if", "os", ".", "path", ".", "exists", "(", "machine_bc", ")", ":", "return", "os", ".", "path", ".", "join", "(", "machine_bc", ",", "\"fastq\"", ")", "elif", "len", "(", "full_goat_bc", ")", ">", "0", ":", "return", "os", ".", "path", ".", "join", "(", "full_goat_bc", "[", "0", "]", ",", "\"fastq\"", ")", "elif", "len", "(", "bustard_bc", ")", ">", "0", ":", "return", "os", ".", "path", ".", "join", "(", "bustard_bc", "[", "0", "]", ",", "\"fastq\"", ")", "# otherwise assume we are in the fastq directory", "# XXX What other cases can we end up with here?", "else", ":", "return", "fc_dir" ]
45
0.004082
def _NewMatchSection(self, val): """Create a new configuration section for each match clause. Each match clause is added to the main config, and the criterion that will trigger the match is recorded, as is the configuration. Args: val: The value following the 'match' keyword. """ section = {"criterion": val, "config": {}} self.matches.append(section) # Now add configuration items to config section of the match block. self.section = section["config"] # Switch to a match-specific processor on a new match_block. self.processor = self._ParseMatchGrp
[ "def", "_NewMatchSection", "(", "self", ",", "val", ")", ":", "section", "=", "{", "\"criterion\"", ":", "val", ",", "\"config\"", ":", "{", "}", "}", "self", ".", "matches", ".", "append", "(", "section", ")", "# Now add configuration items to config section of the match block.", "self", ".", "section", "=", "section", "[", "\"config\"", "]", "# Switch to a match-specific processor on a new match_block.", "self", ".", "processor", "=", "self", ".", "_ParseMatchGrp" ]
39.266667
0.001658
def _inline_activate_venv(): """Built-in venv doesn't have activate_this.py, but doesn't need it anyway. As long as we find the correct executable, built-in venv sets up the environment automatically. See: https://bugs.python.org/issue21496#msg218455 """ components = [] for name in ("bin", "Scripts"): bindir = os.path.join(project.virtualenv_location, name) if os.path.exists(bindir): components.append(bindir) if "PATH" in os.environ: components.append(os.environ["PATH"]) os.environ["PATH"] = os.pathsep.join(components)
[ "def", "_inline_activate_venv", "(", ")", ":", "components", "=", "[", "]", "for", "name", "in", "(", "\"bin\"", ",", "\"Scripts\"", ")", ":", "bindir", "=", "os", ".", "path", ".", "join", "(", "project", ".", "virtualenv_location", ",", "name", ")", "if", "os", ".", "path", ".", "exists", "(", "bindir", ")", ":", "components", ".", "append", "(", "bindir", ")", "if", "\"PATH\"", "in", "os", ".", "environ", ":", "components", ".", "append", "(", "os", ".", "environ", "[", "\"PATH\"", "]", ")", "os", ".", "environ", "[", "\"PATH\"", "]", "=", "os", ".", "pathsep", ".", "join", "(", "components", ")" ]
36.4375
0.001672
def setupQuery(self, query, op, editor): """ Returns the value from the editor. :param op | <str> editor | <QWidget> || None :return <bool> | success """ try: registry = self._operatorMap[nativestring(op)] except KeyError: return False op = registry.op value = registry.defaultValue if op is None: return False if editor is not None: value = self.editorValue(editor) query.setOperatorType(op) query.setValue(value) return True
[ "def", "setupQuery", "(", "self", ",", "query", ",", "op", ",", "editor", ")", ":", "try", ":", "registry", "=", "self", ".", "_operatorMap", "[", "nativestring", "(", "op", ")", "]", "except", "KeyError", ":", "return", "False", "op", "=", "registry", ".", "op", "value", "=", "registry", ".", "defaultValue", "if", "op", "is", "None", ":", "return", "False", "if", "editor", "is", "not", "None", ":", "value", "=", "self", ".", "editorValue", "(", "editor", ")", "query", ".", "setOperatorType", "(", "op", ")", "query", ".", "setValue", "(", "value", ")", "return", "True" ]
26.038462
0.011396
def update_subnet(subnet, name, profile=None): ''' Updates a subnet CLI Example: .. code-block:: bash salt '*' neutron.update_subnet subnet-name new-subnet-name :param subnet: ID or name of subnet to update :param name: Name of this subnet :param profile: Profile to build on (Optional) :return: Value of updated subnet information ''' conn = _auth(profile) return conn.update_subnet(subnet, name)
[ "def", "update_subnet", "(", "subnet", ",", "name", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "update_subnet", "(", "subnet", ",", "name", ")" ]
25.705882
0.002208
def create_transcripts_xml(video_id, video_el, resource_fs, static_dir): """ Creates xml for transcripts. For each transcript element, an associated transcript file is also created in course OLX. Arguments: video_id (str): Video id of the video. video_el (Element): lxml Element object static_dir (str): The Directory to store transcript file. resource_fs (SubFS): The file system to store transcripts. Returns: lxml Element object with transcripts information """ video_transcripts = VideoTranscript.objects.filter(video__edx_video_id=video_id).order_by('language_code') # create transcripts node only when we have transcripts for a video if video_transcripts.exists(): transcripts_el = SubElement(video_el, 'transcripts') transcript_files_map = {} for video_transcript in video_transcripts: language_code = video_transcript.language_code file_format = video_transcript.file_format try: transcript_filename = create_transcript_file( video_id=video_id, language_code=language_code, file_format=file_format, resource_fs=resource_fs.delegate_fs(), static_dir=combine(u'course', static_dir) # File system should not start from /draft directory. ) transcript_files_map[language_code] = transcript_filename except TranscriptsGenerationException: # we don't want to halt export in this case, just log and move to the next transcript. logger.exception('[VAL] Error while generating "%s" transcript for video["%s"].', language_code, video_id) continue SubElement( transcripts_el, 'transcript', { 'language_code': language_code, 'file_format': Transcript.SRT, 'provider': video_transcript.provider, } ) return dict(xml=video_el, transcripts=transcript_files_map)
[ "def", "create_transcripts_xml", "(", "video_id", ",", "video_el", ",", "resource_fs", ",", "static_dir", ")", ":", "video_transcripts", "=", "VideoTranscript", ".", "objects", ".", "filter", "(", "video__edx_video_id", "=", "video_id", ")", ".", "order_by", "(", "'language_code'", ")", "# create transcripts node only when we have transcripts for a video", "if", "video_transcripts", ".", "exists", "(", ")", ":", "transcripts_el", "=", "SubElement", "(", "video_el", ",", "'transcripts'", ")", "transcript_files_map", "=", "{", "}", "for", "video_transcript", "in", "video_transcripts", ":", "language_code", "=", "video_transcript", ".", "language_code", "file_format", "=", "video_transcript", ".", "file_format", "try", ":", "transcript_filename", "=", "create_transcript_file", "(", "video_id", "=", "video_id", ",", "language_code", "=", "language_code", ",", "file_format", "=", "file_format", ",", "resource_fs", "=", "resource_fs", ".", "delegate_fs", "(", ")", ",", "static_dir", "=", "combine", "(", "u'course'", ",", "static_dir", ")", "# File system should not start from /draft directory.", ")", "transcript_files_map", "[", "language_code", "]", "=", "transcript_filename", "except", "TranscriptsGenerationException", ":", "# we don't want to halt export in this case, just log and move to the next transcript.", "logger", ".", "exception", "(", "'[VAL] Error while generating \"%s\" transcript for video[\"%s\"].'", ",", "language_code", ",", "video_id", ")", "continue", "SubElement", "(", "transcripts_el", ",", "'transcript'", ",", "{", "'language_code'", ":", "language_code", ",", "'file_format'", ":", "Transcript", ".", "SRT", ",", "'provider'", ":", "video_transcript", ".", "provider", ",", "}", ")", "return", "dict", "(", "xml", "=", "video_el", ",", "transcripts", "=", "transcript_files_map", ")" ]
40.897959
0.002924
def doGetAttrib(self, attribId): """get an attribute attribId: Identifier for the attribute to get return: response are the attribute byte array """ CardConnection.doGetAttrib(self, attribId) hresult, response = SCardGetAttrib(self.hcard, attribId) if hresult != 0: raise SmartcardException( 'Failed to getAttrib ' + SCardGetErrorMessage(hresult)) return response
[ "def", "doGetAttrib", "(", "self", ",", "attribId", ")", ":", "CardConnection", ".", "doGetAttrib", "(", "self", ",", "attribId", ")", "hresult", ",", "response", "=", "SCardGetAttrib", "(", "self", ".", "hcard", ",", "attribId", ")", "if", "hresult", "!=", "0", ":", "raise", "SmartcardException", "(", "'Failed to getAttrib '", "+", "SCardGetErrorMessage", "(", "hresult", ")", ")", "return", "response" ]
34.384615
0.004357
def isIdentity(M, tol=1e-06): """Check if vtkMatrix4x4 is Identity.""" for i in [0, 1, 2, 3]: for j in [0, 1, 2, 3]: e = M.GetElement(i, j) if i == j: if np.abs(e - 1) > tol: return False elif np.abs(e) > tol: return False return True
[ "def", "isIdentity", "(", "M", ",", "tol", "=", "1e-06", ")", ":", "for", "i", "in", "[", "0", ",", "1", ",", "2", ",", "3", "]", ":", "for", "j", "in", "[", "0", ",", "1", ",", "2", ",", "3", "]", ":", "e", "=", "M", ".", "GetElement", "(", "i", ",", "j", ")", "if", "i", "==", "j", ":", "if", "np", ".", "abs", "(", "e", "-", "1", ")", ">", "tol", ":", "return", "False", "elif", "np", ".", "abs", "(", "e", ")", ">", "tol", ":", "return", "False", "return", "True" ]
30.181818
0.002924
def _init_journal(self, permissive=True): """Add the initialization lines to the journal. By default adds JrnObj variable and timestamp to the journal contents. Args: permissive (bool): if True most errors in journal will not cause Revit to stop journal execution. Some still do. """ nowstamp = datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")[:-3] self._add_entry(templates.INIT .format(time_stamp=nowstamp)) if permissive: self._add_entry(templates.INIT_DEBUG)
[ "def", "_init_journal", "(", "self", ",", "permissive", "=", "True", ")", ":", "nowstamp", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%d-%b-%Y %H:%M:%S.%f\"", ")", "[", ":", "-", "3", "]", "self", ".", "_add_entry", "(", "templates", ".", "INIT", ".", "format", "(", "time_stamp", "=", "nowstamp", ")", ")", "if", "permissive", ":", "self", ".", "_add_entry", "(", "templates", ".", "INIT_DEBUG", ")" ]
41.6
0.003135
def _configure_buffer_sizes(): """Set up module globals controlling buffer sizes""" global PIPE_BUF_BYTES global OS_PIPE_SZ PIPE_BUF_BYTES = 65536 OS_PIPE_SZ = None # Teach the 'fcntl' module about 'F_SETPIPE_SZ', which is a Linux-ism, # but a good one that can drastically reduce the number of syscalls # when dealing with high-throughput pipes. if not hasattr(fcntl, 'F_SETPIPE_SZ'): import platform if platform.system() == 'Linux': fcntl.F_SETPIPE_SZ = 1031 # If Linux procfs (or something that looks like it) exposes its # maximum F_SETPIPE_SZ, adjust the default buffer sizes. try: with open('/proc/sys/fs/pipe-max-size', 'r') as f: # Figure out OS pipe size, but in case it is unusually large # or small restrain it to sensible values. OS_PIPE_SZ = min(int(f.read()), 1024 * 1024) PIPE_BUF_BYTES = max(OS_PIPE_SZ, PIPE_BUF_BYTES) except Exception: pass
[ "def", "_configure_buffer_sizes", "(", ")", ":", "global", "PIPE_BUF_BYTES", "global", "OS_PIPE_SZ", "PIPE_BUF_BYTES", "=", "65536", "OS_PIPE_SZ", "=", "None", "# Teach the 'fcntl' module about 'F_SETPIPE_SZ', which is a Linux-ism,", "# but a good one that can drastically reduce the number of syscalls", "# when dealing with high-throughput pipes.", "if", "not", "hasattr", "(", "fcntl", ",", "'F_SETPIPE_SZ'", ")", ":", "import", "platform", "if", "platform", ".", "system", "(", ")", "==", "'Linux'", ":", "fcntl", ".", "F_SETPIPE_SZ", "=", "1031", "# If Linux procfs (or something that looks like it) exposes its", "# maximum F_SETPIPE_SZ, adjust the default buffer sizes.", "try", ":", "with", "open", "(", "'/proc/sys/fs/pipe-max-size'", ",", "'r'", ")", "as", "f", ":", "# Figure out OS pipe size, but in case it is unusually large", "# or small restrain it to sensible values.", "OS_PIPE_SZ", "=", "min", "(", "int", "(", "f", ".", "read", "(", ")", ")", ",", "1024", "*", "1024", ")", "PIPE_BUF_BYTES", "=", "max", "(", "OS_PIPE_SZ", ",", "PIPE_BUF_BYTES", ")", "except", "Exception", ":", "pass" ]
36.296296
0.000994
def sdiv(a, b): """Safe division: if a == b == 0, sdiv(a, b) == 1""" if len(a) != len(b): raise ValueError('Argument a and b does not have the same length') idx = 0 ret = matrix(0, (len(a), 1), 'd') for m, n in zip(a, b): try: ret[idx] = m / n except ZeroDivisionError: ret[idx] = 1 finally: idx += 1 return ret
[ "def", "sdiv", "(", "a", ",", "b", ")", ":", "if", "len", "(", "a", ")", "!=", "len", "(", "b", ")", ":", "raise", "ValueError", "(", "'Argument a and b does not have the same length'", ")", "idx", "=", "0", "ret", "=", "matrix", "(", "0", ",", "(", "len", "(", "a", ")", ",", "1", ")", ",", "'d'", ")", "for", "m", ",", "n", "in", "zip", "(", "a", ",", "b", ")", ":", "try", ":", "ret", "[", "idx", "]", "=", "m", "/", "n", "except", "ZeroDivisionError", ":", "ret", "[", "idx", "]", "=", "1", "finally", ":", "idx", "+=", "1", "return", "ret" ]
27.857143
0.002481
def _is_potential_multi_index(columns): """ Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex Returns ------- boolean : Whether or not columns could become a MultiIndex """ return (len(columns) and not isinstance(columns, MultiIndex) and all(isinstance(c, tuple) for c in columns))
[ "def", "_is_potential_multi_index", "(", "columns", ")", ":", "return", "(", "len", "(", "columns", ")", "and", "not", "isinstance", "(", "columns", ",", "MultiIndex", ")", "and", "all", "(", "isinstance", "(", "c", ",", "tuple", ")", "for", "c", "in", "columns", ")", ")" ]
29.3125
0.002066
def feed(self, data): """ Add new incoming data to buffer and try to process """ self.buffer += data while len(self.buffer) >= 6: self.next_packet()
[ "def", "feed", "(", "self", ",", "data", ")", ":", "self", ".", "buffer", "+=", "data", "while", "len", "(", "self", ".", "buffer", ")", ">=", "6", ":", "self", ".", "next_packet", "(", ")" ]
27.714286
0.01
def get_activity_ids_by_objective_bank(self, objective_bank_id): """Gets the list of ``Activity`` ``Ids`` associated with an ``ObjectiveBank``. arg: objective_bank_id (osid.id.Id): ``Id`` of the ``ObjectiveBank`` return: (osid.id.IdList) - list of related activity ``Ids`` raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resource_ids_by_bin id_list = [] for activity in self.get_activities_by_objective_bank(objective_bank_id): id_list.append(activity.get_id()) return IdList(id_list)
[ "def", "get_activity_ids_by_objective_bank", "(", "self", ",", "objective_bank_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_resource_ids_by_bin", "id_list", "=", "[", "]", "for", "activity", "in", "self", ".", "get_activities_by_objective_bank", "(", "objective_bank_id", ")", ":", "id_list", ".", "append", "(", "activity", ".", "get_id", "(", ")", ")", "return", "IdList", "(", "id_list", ")" ]
48
0.004301
def removeLastRow(self): """Removes the last track""" lastrow = self._segments.pop(len(self._segments)-1) if len(lastrow) > 0: raise Exception("Attempt to remove non-empty stimulus track")
[ "def", "removeLastRow", "(", "self", ")", ":", "lastrow", "=", "self", ".", "_segments", ".", "pop", "(", "len", "(", "self", ".", "_segments", ")", "-", "1", ")", "if", "len", "(", "lastrow", ")", ">", "0", ":", "raise", "Exception", "(", "\"Attempt to remove non-empty stimulus track\"", ")" ]
44
0.008929
def runOnExecutor(self, *commands, oper=ACCEPT, defer_shell_expansion=False): """ This runs in the executor of the current scope. You cannot magically back out since there are no gurantees that ssh keys will be in place (they shouldn't be). """ return self.makeOutput(EXE, commands, oper=oper, defer_shell_expansion=defer_shell_expansion)
[ "def", "runOnExecutor", "(", "self", ",", "*", "commands", ",", "oper", "=", "ACCEPT", ",", "defer_shell_expansion", "=", "False", ")", ":", "return", "self", ".", "makeOutput", "(", "EXE", ",", "commands", ",", "oper", "=", "oper", ",", "defer_shell_expansion", "=", "defer_shell_expansion", ")" ]
74.8
0.007937
def saveAsPickleFile(self, path, batchSize=10): """ Save this RDD as a SequenceFile of serialized objects. The serializer used is L{pyspark.serializers.PickleSerializer}, default batch size is 10. >>> tmpFile = NamedTemporaryFile(delete=True) >>> tmpFile.close() >>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3) >>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect()) ['1', '2', 'rdd', 'spark'] """ if batchSize == 0: ser = AutoBatchedSerializer(PickleSerializer()) else: ser = BatchedSerializer(PickleSerializer(), batchSize) self._reserialize(ser)._jrdd.saveAsObjectFile(path)
[ "def", "saveAsPickleFile", "(", "self", ",", "path", ",", "batchSize", "=", "10", ")", ":", "if", "batchSize", "==", "0", ":", "ser", "=", "AutoBatchedSerializer", "(", "PickleSerializer", "(", ")", ")", "else", ":", "ser", "=", "BatchedSerializer", "(", "PickleSerializer", "(", ")", ",", "batchSize", ")", "self", ".", "_reserialize", "(", "ser", ")", ".", "_jrdd", ".", "saveAsObjectFile", "(", "path", ")" ]
42.647059
0.004049
def main(args=None): """The entry point of the application.""" if args is None: args = sys.argv[1:] # Parse command-line args = docopt(__doc__, argv=args) # Parse arguments path, address = resolve(args['<path>'], args['<address>']) host, port = split_address(address) # Validate arguments if address and not (host or port): print 'Error: Invalid address', repr(address) return # Default values if path is None: path = '.' if host is None: host = 'localhost' if port is None: port = 5000 # Run server print ' * Serving %s on http://%s:%s/' % (path, host, port)
[ "def", "main", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "# Parse command-line", "args", "=", "docopt", "(", "__doc__", ",", "argv", "=", "args", ")", "# Parse arguments", "path", ",", "address", "=", "resolve", "(", "args", "[", "'<path>'", "]", ",", "args", "[", "'<address>'", "]", ")", "host", ",", "port", "=", "split_address", "(", "address", ")", "# Validate arguments", "if", "address", "and", "not", "(", "host", "or", "port", ")", ":", "print", "'Error: Invalid address'", ",", "repr", "(", "address", ")", "return", "# Default values", "if", "path", "is", "None", ":", "path", "=", "'.'", "if", "host", "is", "None", ":", "host", "=", "'localhost'", "if", "port", "is", "None", ":", "port", "=", "5000", "# Run server", "print", "' * Serving %s on http://%s:%s/'", "%", "(", "path", ",", "host", ",", "port", ")" ]
23.925926
0.001488
def register_all_shape_checker(shape_checker_function, arg_types, exclude=(), ignore_existing=False): """Register a gradient adder for all combinations of given types. This is a convenience shorthand for calling register_add_grad when registering gradient adders for multiple types that can be interchanged for the purpose of addition. Args: shape_checker_function: A shape checker, see register_shape_checker. arg_types: List of Python type objects. The shape checker will be registered for all pairs of these types. exclude: Optional list of type tuples to exclude. ignore_existing: Boolean. Whether to silently skip argument pairs that were already registered. """ for t1 in arg_types: for t2 in arg_types: if (t1, t2) in exclude: continue if ignore_existing and (t1, t2) in shape_checkers: continue register_shape_checker(t1, t2, shape_checker_function)
[ "def", "register_all_shape_checker", "(", "shape_checker_function", ",", "arg_types", ",", "exclude", "=", "(", ")", ",", "ignore_existing", "=", "False", ")", ":", "for", "t1", "in", "arg_types", ":", "for", "t2", "in", "arg_types", ":", "if", "(", "t1", ",", "t2", ")", "in", "exclude", ":", "continue", "if", "ignore_existing", "and", "(", "t1", ",", "t2", ")", "in", "shape_checkers", ":", "continue", "register_shape_checker", "(", "t1", ",", "t2", ",", "shape_checker_function", ")" ]
40.32
0.006783
def generate_kmers(seq, k=4): """Return a generator of all the unique substrings (k-mer or q-gram strings) within a sequence/string Not effiicent for large k and long strings. Doesn't form substrings that are shorter than k, only exactly k-mers Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing. jellyfish is a C implementation of k-mer counting If seq is a string generate a sequence of k-mer string If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings If seq is a sequence of sequences of strings generate a sequence of sequence of generators ... Default k = 4 because that's the length of a gene base-pair? >>> ' '.join(generate_kmers('AGATAGATAGACACAGAAATGGGACCACAC')) 'AGAT GATA ATAG TAGA AGAT GATA ATAG TAGA AGAC GACA ACAC CACA ACAG ... CCAC CACA ACAC' """ if isinstance(seq, basestring): for i in range(len(seq) - k + 1): yield seq[i:i + k] elif isinstance(seq, (int, float, Decimal)): for s in generate_kmers(str(seq)): yield s else: for s in seq: yield generate_kmers(s, k)
[ "def", "generate_kmers", "(", "seq", ",", "k", "=", "4", ")", ":", "if", "isinstance", "(", "seq", ",", "basestring", ")", ":", "for", "i", "in", "range", "(", "len", "(", "seq", ")", "-", "k", "+", "1", ")", ":", "yield", "seq", "[", "i", ":", "i", "+", "k", "]", "elif", "isinstance", "(", "seq", ",", "(", "int", ",", "float", ",", "Decimal", ")", ")", ":", "for", "s", "in", "generate_kmers", "(", "str", "(", "seq", ")", ")", ":", "yield", "s", "else", ":", "for", "s", "in", "seq", ":", "yield", "generate_kmers", "(", "s", ",", "k", ")" ]
41.714286
0.005021
def make_exception_message(exc): """ An exception is passed in and this function returns the proper string depending on the result so it is readable enough. """ if str(exc): return '%s: %s\n' % (exc.__class__.__name__, exc) else: return '%s\n' % (exc.__class__.__name__)
[ "def", "make_exception_message", "(", "exc", ")", ":", "if", "str", "(", "exc", ")", ":", "return", "'%s: %s\\n'", "%", "(", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "else", ":", "return", "'%s\\n'", "%", "(", "exc", ".", "__class__", ".", "__name__", ")" ]
30.5
0.003185
def _observers_for_notification(self, ntype, sender): """Find all registered observers that should recieve notification""" keys = ( (ntype,sender), (ntype, None), (None, sender), (None,None) ) obs = set() for k in keys: obs.update(self.observers.get(k, set())) return obs
[ "def", "_observers_for_notification", "(", "self", ",", "ntype", ",", "sender", ")", ":", "keys", "=", "(", "(", "ntype", ",", "sender", ")", ",", "(", "ntype", ",", "None", ")", ",", "(", "None", ",", "sender", ")", ",", "(", "None", ",", "None", ")", ")", "obs", "=", "set", "(", ")", "for", "k", "in", "keys", ":", "obs", ".", "update", "(", "self", ".", "observers", ".", "get", "(", "k", ",", "set", "(", ")", ")", ")", "return", "obs" ]
26.866667
0.009592
def clip_datetime(dt, tz=DEFAULT_TZ, is_dst=None): """Limit a datetime to a valid range for datetime, datetime64, and Timestamp objects >>> from datetime import timedelta >>> clip_datetime(MAX_DATETIME + timedelta(100)) == pd.Timestamp(MAX_DATETIME, tz='utc') == MAX_TIMESTAMP True >>> MAX_TIMESTAMP Timestamp('2262-04-11 23:47:16.854775+0000', tz='UTC') """ if isinstance(dt, datetime.datetime): # TODO: this gives up a day of datetime range due to assumptions about timezone # make MIN/MAX naive and replace dt.replace(tz=None) before comparison # set it back when done dt = make_tz_aware(dt, tz=tz, is_dst=is_dst) try: return pd.Timestamp(dt) except (ValueError, AttributeError): pass if dt > MAX_DATETIME: return MAX_TIMESTAMP elif dt < MIN_DATETIME: return MIN_TIMESTAMP return NAT return dt
[ "def", "clip_datetime", "(", "dt", ",", "tz", "=", "DEFAULT_TZ", ",", "is_dst", "=", "None", ")", ":", "if", "isinstance", "(", "dt", ",", "datetime", ".", "datetime", ")", ":", "# TODO: this gives up a day of datetime range due to assumptions about timezone", "# make MIN/MAX naive and replace dt.replace(tz=None) before comparison", "# set it back when done", "dt", "=", "make_tz_aware", "(", "dt", ",", "tz", "=", "tz", ",", "is_dst", "=", "is_dst", ")", "try", ":", "return", "pd", ".", "Timestamp", "(", "dt", ")", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "pass", "if", "dt", ">", "MAX_DATETIME", ":", "return", "MAX_TIMESTAMP", "elif", "dt", "<", "MIN_DATETIME", ":", "return", "MIN_TIMESTAMP", "return", "NAT", "return", "dt" ]
40.956522
0.005187
def default(): """Default configuration for PPO.""" # General algorithm = algorithms.PPO num_agents = 30 eval_episodes = 30 use_gpu = False # Environment normalize_ranges = True # Network network = networks.feed_forward_gaussian weight_summaries = dict( all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*') policy_layers = 200, 100 value_layers = 200, 100 init_output_factor = 0.1 init_std = 0.35 # Optimization update_every = 30 update_epochs = 25 optimizer = tf.train.AdamOptimizer learning_rate = 1e-4 # Losses discount = 0.995 kl_target = 1e-2 kl_cutoff_factor = 2 kl_cutoff_coef = 1000 kl_init_penalty = 1 return locals()
[ "def", "default", "(", ")", ":", "# General", "algorithm", "=", "algorithms", ".", "PPO", "num_agents", "=", "30", "eval_episodes", "=", "30", "use_gpu", "=", "False", "# Environment", "normalize_ranges", "=", "True", "# Network", "network", "=", "networks", ".", "feed_forward_gaussian", "weight_summaries", "=", "dict", "(", "all", "=", "r'.*'", ",", "policy", "=", "r'.*/policy/.*'", ",", "value", "=", "r'.*/value/.*'", ")", "policy_layers", "=", "200", ",", "100", "value_layers", "=", "200", ",", "100", "init_output_factor", "=", "0.1", "init_std", "=", "0.35", "# Optimization", "update_every", "=", "30", "update_epochs", "=", "25", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "learning_rate", "=", "1e-4", "# Losses", "discount", "=", "0.995", "kl_target", "=", "1e-2", "kl_cutoff_factor", "=", "2", "kl_cutoff_coef", "=", "1000", "kl_init_penalty", "=", "1", "return", "locals", "(", ")" ]
22.827586
0.04058
def inject_scheme(self, b16_scheme): """Inject string $b16_scheme into self.content.""" # correctly formatted start and end of block should have already been # ascertained by _get_temp content_lines = self.content.splitlines() b16_scheme_lines = b16_scheme.splitlines() start_line = None for num, line in enumerate(content_lines): if not start_line: match = TEMP_NEEDLE.match(line) if match: start_line = num + 1 else: match = TEMP_END_NEEDLE.match(line) if match: end_line = num # put lines back together new_content_lines = (content_lines[0:start_line] + b16_scheme_lines + content_lines[end_line:]) self.content = '\n'.join(new_content_lines)
[ "def", "inject_scheme", "(", "self", ",", "b16_scheme", ")", ":", "# correctly formatted start and end of block should have already been", "# ascertained by _get_temp", "content_lines", "=", "self", ".", "content", ".", "splitlines", "(", ")", "b16_scheme_lines", "=", "b16_scheme", ".", "splitlines", "(", ")", "start_line", "=", "None", "for", "num", ",", "line", "in", "enumerate", "(", "content_lines", ")", ":", "if", "not", "start_line", ":", "match", "=", "TEMP_NEEDLE", ".", "match", "(", "line", ")", "if", "match", ":", "start_line", "=", "num", "+", "1", "else", ":", "match", "=", "TEMP_END_NEEDLE", ".", "match", "(", "line", ")", "if", "match", ":", "end_line", "=", "num", "# put lines back together", "new_content_lines", "=", "(", "content_lines", "[", "0", ":", "start_line", "]", "+", "b16_scheme_lines", "+", "content_lines", "[", "end_line", ":", "]", ")", "self", ".", "content", "=", "'\\n'", ".", "join", "(", "new_content_lines", ")" ]
40.5
0.002193
def as_representer(resource, content_type): """ Adapts the given resource and content type to a representer. :param resource: resource to adapt. :param str content_type: content (MIME) type to obtain a representer for. """ reg = get_current_registry() rpr_reg = reg.queryUtility(IRepresenterRegistry) return rpr_reg.create(type(resource), content_type)
[ "def", "as_representer", "(", "resource", ",", "content_type", ")", ":", "reg", "=", "get_current_registry", "(", ")", "rpr_reg", "=", "reg", ".", "queryUtility", "(", "IRepresenterRegistry", ")", "return", "rpr_reg", ".", "create", "(", "type", "(", "resource", ")", ",", "content_type", ")" ]
37.6
0.002597
def decimate_mean(self, a, maxpoints, **kwargs): """Return data *a* mean-decimated on *maxpoints*. Histograms each column into *maxpoints* bins and calculates the weighted average in each bin as the decimated data, using :func:`numkit.timeseries.mean_histogrammed_function`. The coarse grained time in the first column contains the centers of the histogram time. If *a* contains <= *maxpoints* then *a* is simply returned; otherwise a new array of the same dimensions but with a reduced number of *maxpoints* points is returned. .. Note:: Assumes that the first column is time. """ return self._decimate(numkit.timeseries.mean_histogrammed_function, a, maxpoints, **kwargs)
[ "def", "decimate_mean", "(", "self", ",", "a", ",", "maxpoints", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_decimate", "(", "numkit", ".", "timeseries", ".", "mean_histogrammed_function", ",", "a", ",", "maxpoints", ",", "*", "*", "kwargs", ")" ]
40.421053
0.005089
def _btc_script_serialize_unit(unit): """ Encode one item of a BTC script Return the encoded item (as a string) Based on code from pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin """ if isinstance(unit, int): # cannot be less than -1, since btc_script_deserialize() never returns such numbers if unit < -1: raise ValueError('Invalid integer: {}'.format(unit)) if unit < 16: if unit == 0: # OP_RESERVED return encoding.from_int_to_byte(OPCODE_VALUES['OP_RESERVED']) else: # OP_1 thru OP_16, or OP_1NEGATE return encoding.from_int_to_byte(unit + 80) else: # pass as numeric literal or raw opcode return encoding.from_int_to_byte(unit) elif unit is None: # None means OP_0 return b'\x00' else: if len(unit) <= 75: # length + payload return encoding.from_int_to_byte(len(unit)) + unit elif len(unit) < 256: # OP_PUSHDATA1 + length (1 byte) + payload return encoding.from_int_to_byte(OPCODE_VALUES['OP_PUSHDATA1']) + encoding.from_int_to_byte(len(unit)) + unit elif len(unit) < 65536: # OP_PUSHDATA2 + length (2 bytes, big-endian) + payload return encoding.from_int_to_byte(OPCODE_VALUES['OP_PUSHDATA2']) + encoding.encode(len(unit), 256, 2)[::-1] + unit else: # OP_PUSHDATA4 + length (4 bytes, big-endian) + payload return encoding.from_int_to_byte(OPCODE_VALUES['OP_PUSHDATA4']) + encoding.encode(len(unit), 256, 4)[::-1] + unit
[ "def", "_btc_script_serialize_unit", "(", "unit", ")", ":", "if", "isinstance", "(", "unit", ",", "int", ")", ":", "# cannot be less than -1, since btc_script_deserialize() never returns such numbers", "if", "unit", "<", "-", "1", ":", "raise", "ValueError", "(", "'Invalid integer: {}'", ".", "format", "(", "unit", ")", ")", "if", "unit", "<", "16", ":", "if", "unit", "==", "0", ":", "# OP_RESERVED", "return", "encoding", ".", "from_int_to_byte", "(", "OPCODE_VALUES", "[", "'OP_RESERVED'", "]", ")", "else", ":", "# OP_1 thru OP_16, or OP_1NEGATE", "return", "encoding", ".", "from_int_to_byte", "(", "unit", "+", "80", ")", "else", ":", "# pass as numeric literal or raw opcode", "return", "encoding", ".", "from_int_to_byte", "(", "unit", ")", "elif", "unit", "is", "None", ":", "# None means OP_0", "return", "b'\\x00'", "else", ":", "if", "len", "(", "unit", ")", "<=", "75", ":", "# length + payload", "return", "encoding", ".", "from_int_to_byte", "(", "len", "(", "unit", ")", ")", "+", "unit", "elif", "len", "(", "unit", ")", "<", "256", ":", "# OP_PUSHDATA1 + length (1 byte) + payload", "return", "encoding", ".", "from_int_to_byte", "(", "OPCODE_VALUES", "[", "'OP_PUSHDATA1'", "]", ")", "+", "encoding", ".", "from_int_to_byte", "(", "len", "(", "unit", ")", ")", "+", "unit", "elif", "len", "(", "unit", ")", "<", "65536", ":", "# OP_PUSHDATA2 + length (2 bytes, big-endian) + payload", "return", "encoding", ".", "from_int_to_byte", "(", "OPCODE_VALUES", "[", "'OP_PUSHDATA2'", "]", ")", "+", "encoding", ".", "encode", "(", "len", "(", "unit", ")", ",", "256", ",", "2", ")", "[", ":", ":", "-", "1", "]", "+", "unit", "else", ":", "# OP_PUSHDATA4 + length (4 bytes, big-endian) + payload", "return", "encoding", ".", "from_int_to_byte", "(", "OPCODE_VALUES", "[", "'OP_PUSHDATA4'", "]", ")", "+", "encoding", ".", "encode", "(", "len", "(", "unit", ")", ",", "256", ",", "4", ")", "[", ":", ":", "-", "1", "]", "+", "unit" ]
37.727273
0.00411
def _fill_naked_singles(self): """Look for naked singles, i.e. cells with ony one possible value. :return: If any Naked Single has been found. :rtype: bool """ simple_found = False for i in utils.range_(self.side): for j in utils.range_(self.side): if self[i][j] > 0: continue p = self._possibles[i][j] if len(p) == 1: self.set_cell(i, j, list(p)[0]) self.solution_steps.append(self._format_step("NAKED", (i, j), self[i][j])) simple_found = True elif len(p) == 0: raise SudokuHasNoSolutionError("Error made! No possible value for ({0},{1})!".format(i + 1, j + 1)) return simple_found
[ "def", "_fill_naked_singles", "(", "self", ")", ":", "simple_found", "=", "False", "for", "i", "in", "utils", ".", "range_", "(", "self", ".", "side", ")", ":", "for", "j", "in", "utils", ".", "range_", "(", "self", ".", "side", ")", ":", "if", "self", "[", "i", "]", "[", "j", "]", ">", "0", ":", "continue", "p", "=", "self", ".", "_possibles", "[", "i", "]", "[", "j", "]", "if", "len", "(", "p", ")", "==", "1", ":", "self", ".", "set_cell", "(", "i", ",", "j", ",", "list", "(", "p", ")", "[", "0", "]", ")", "self", ".", "solution_steps", ".", "append", "(", "self", ".", "_format_step", "(", "\"NAKED\"", ",", "(", "i", ",", "j", ")", ",", "self", "[", "i", "]", "[", "j", "]", ")", ")", "simple_found", "=", "True", "elif", "len", "(", "p", ")", "==", "0", ":", "raise", "SudokuHasNoSolutionError", "(", "\"Error made! No possible value for ({0},{1})!\"", ".", "format", "(", "i", "+", "1", ",", "j", "+", "1", ")", ")", "return", "simple_found" ]
38
0.00489
def detect(self): """Detect all currently known devices. Returns the root device.""" root = self._actions.detect() prune_empty_node(root, set()) return root
[ "def", "detect", "(", "self", ")", ":", "root", "=", "self", ".", "_actions", ".", "detect", "(", ")", "prune_empty_node", "(", "root", ",", "set", "(", ")", ")", "return", "root" ]
36.8
0.010638
def export_diagram_plane_elements(root, diagram_attributes, plane_attributes): """ Creates 'diagram' and 'plane' elements for exported BPMN XML file. Returns a tuple (diagram, plane). :param root: object of Element class, representing a BPMN XML root element ('definitions'), :param diagram_attributes: dictionary that holds attribute values for imported 'BPMNDiagram' element, :param plane_attributes: dictionary that holds attribute values for imported 'BPMNPlane' element. """ diagram = eTree.SubElement(root, BpmnDiagramGraphExport.bpmndi_namespace + "BPMNDiagram") diagram.set(consts.Consts.id, diagram_attributes[consts.Consts.id]) diagram.set(consts.Consts.name, diagram_attributes[consts.Consts.name]) plane = eTree.SubElement(diagram, BpmnDiagramGraphExport.bpmndi_namespace + "BPMNPlane") plane.set(consts.Consts.id, plane_attributes[consts.Consts.id]) plane.set(consts.Consts.bpmn_element, plane_attributes[consts.Consts.bpmn_element]) return diagram, plane
[ "def", "export_diagram_plane_elements", "(", "root", ",", "diagram_attributes", ",", "plane_attributes", ")", ":", "diagram", "=", "eTree", ".", "SubElement", "(", "root", ",", "BpmnDiagramGraphExport", ".", "bpmndi_namespace", "+", "\"BPMNDiagram\"", ")", "diagram", ".", "set", "(", "consts", ".", "Consts", ".", "id", ",", "diagram_attributes", "[", "consts", ".", "Consts", ".", "id", "]", ")", "diagram", ".", "set", "(", "consts", ".", "Consts", ".", "name", ",", "diagram_attributes", "[", "consts", ".", "Consts", ".", "name", "]", ")", "plane", "=", "eTree", ".", "SubElement", "(", "diagram", ",", "BpmnDiagramGraphExport", ".", "bpmndi_namespace", "+", "\"BPMNPlane\"", ")", "plane", ".", "set", "(", "consts", ".", "Consts", ".", "id", ",", "plane_attributes", "[", "consts", ".", "Consts", ".", "id", "]", ")", "plane", ".", "set", "(", "consts", ".", "Consts", ".", "bpmn_element", ",", "plane_attributes", "[", "consts", ".", "Consts", ".", "bpmn_element", "]", ")", "return", "diagram", ",", "plane" ]
59.222222
0.007387
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.gamma.rvs(a, loc=0, scale=1.0 / b, size=size) for a, b in zip(self.a, self.b)])
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "gamma", ".", "rvs", "(", "a", ",", "loc", "=", "0", ",", "scale", "=", "1.0", "/", "b", ",", "size", "=", "size", ")", "for", "a", ",", "b", "in", "zip", "(", "self", ".", "a", ",", "self", ".", "b", ")", "]", ")" ]
42.5
0.009217
def orientation(self): """ The angle in radians between the ``x`` axis and the major axis of the 2D Gaussian function that has the same second-order moments as the source. The angle increases in the counter-clockwise direction. """ a, b, b, c = self.covariance.flat if a < 0 or c < 0: # negative variance return np.nan * u.rad # pragma: no cover return 0.5 * np.arctan2(2. * b, (a - c))
[ "def", "orientation", "(", "self", ")", ":", "a", ",", "b", ",", "b", ",", "c", "=", "self", ".", "covariance", ".", "flat", "if", "a", "<", "0", "or", "c", "<", "0", ":", "# negative variance", "return", "np", ".", "nan", "*", "u", ".", "rad", "# pragma: no cover", "return", "0.5", "*", "np", ".", "arctan2", "(", "2.", "*", "b", ",", "(", "a", "-", "c", ")", ")" ]
38.75
0.004202
def _ewp_iccarm_set(self, ewp_dic, project_dic): """ C/C++ options (ICCARM) """ index_iccarm = self._get_option(ewp_dic, 'ICCARM') index_option = self._get_option(ewp_dic[index_iccarm]['data']['option'], 'CCDefines') self._set_multiple_option(ewp_dic[index_iccarm]['data']['option'][index_option], project_dic['macros']) index_option = self._get_option(ewp_dic[index_iccarm]['data']['option'], 'CCIncludePath2') self._set_multiple_option(ewp_dic[index_iccarm]['data']['option'][index_option], project_dic['include_paths']) iccarm_dic = ewp_dic[index_iccarm]['data']['option'] self._ewp_flags_set(iccarm_dic, project_dic, 'cxx_flags', self.FLAG_TO_IAR['cxx_flags']) self._ewp_flags_set(iccarm_dic, project_dic, 'c_flags', self.FLAG_TO_IAR['c_flags'])
[ "def", "_ewp_iccarm_set", "(", "self", ",", "ewp_dic", ",", "project_dic", ")", ":", "index_iccarm", "=", "self", ".", "_get_option", "(", "ewp_dic", ",", "'ICCARM'", ")", "index_option", "=", "self", ".", "_get_option", "(", "ewp_dic", "[", "index_iccarm", "]", "[", "'data'", "]", "[", "'option'", "]", ",", "'CCDefines'", ")", "self", ".", "_set_multiple_option", "(", "ewp_dic", "[", "index_iccarm", "]", "[", "'data'", "]", "[", "'option'", "]", "[", "index_option", "]", ",", "project_dic", "[", "'macros'", "]", ")", "index_option", "=", "self", ".", "_get_option", "(", "ewp_dic", "[", "index_iccarm", "]", "[", "'data'", "]", "[", "'option'", "]", ",", "'CCIncludePath2'", ")", "self", ".", "_set_multiple_option", "(", "ewp_dic", "[", "index_iccarm", "]", "[", "'data'", "]", "[", "'option'", "]", "[", "index_option", "]", ",", "project_dic", "[", "'include_paths'", "]", ")", "iccarm_dic", "=", "ewp_dic", "[", "index_iccarm", "]", "[", "'data'", "]", "[", "'option'", "]", "self", ".", "_ewp_flags_set", "(", "iccarm_dic", ",", "project_dic", ",", "'cxx_flags'", ",", "self", ".", "FLAG_TO_IAR", "[", "'cxx_flags'", "]", ")", "self", ".", "_ewp_flags_set", "(", "iccarm_dic", ",", "project_dic", ",", "'c_flags'", ",", "self", ".", "FLAG_TO_IAR", "[", "'c_flags'", "]", ")" ]
73.818182
0.009732
def set(self, key, value): """Set a value in the `Bison` configuration. Args: key (str): The configuration key to set a new value for. value: The value to set. """ # the configuration changes, so we invalidate the cached config self._full_config = None self._override[key] = value
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "# the configuration changes, so we invalidate the cached config", "self", ".", "_full_config", "=", "None", "self", ".", "_override", "[", "key", "]", "=", "value" ]
34.4
0.005666
def check_dict(self, opt, value): '''Take json as dictionary parameter''' try: return json.loads(value) except: raise optparse.OptionValueError("Option %s: invalid dict value: %r" % (opt, value))
[ "def", "check_dict", "(", "self", ",", "opt", ",", "value", ")", ":", "try", ":", "return", "json", ".", "loads", "(", "value", ")", "except", ":", "raise", "optparse", ".", "OptionValueError", "(", "\"Option %s: invalid dict value: %r\"", "%", "(", "opt", ",", "value", ")", ")" ]
35.666667
0.022831
def run(module_name, args=None, env_vars=None, wait=True, capture_error=False): # type: (str, list, dict, bool, bool) -> subprocess.Popen """Run Python module as a script. Search sys.path for the named module and execute its contents as the __main__ module. Since the argument is a module name, you must not give a file extension (.py). The module name should be a valid absolute Python module name, but the implementation may not always enforce this (e.g. it may allow you to use a name that includes a hyphen). Package names (including namespace packages) are also permitted. When a package name is supplied instead of a normal module, the interpreter will execute <pkg>.__main__ as the main module. This behaviour is deliberately similar to the handling of directories and zipfiles that are passed to the interpreter as the script argument. Note This option cannot be used with built-in modules and extension modules written in C, since they do not have Python module files. However, it can still be used for precompiled modules, even if the original source file is not available. If this option is given, the first element of sys.argv will be the full path to the module file ( while the module file is being located, the first element will be set to "-m"). As with the -c option, the current directory will be added to the start of sys.path. You can find more information at https://docs.python.org/3/using/cmdline.html#cmdoption-m Example: >>>import sagemaker_containers >>>from sagemaker_containers.beta.framework import mapping, modules >>>env = sagemaker_containers.training_env() {'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...} >>>hyperparameters = env.hyperparameters {'batch-size': 128, 'model_dir': '/opt/ml/model'} >>>args = mapping.to_cmd_args(hyperparameters) ['--batch-size', '128', '--model_dir', '/opt/ml/model'] >>>env_vars = mapping.to_env_vars() ['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training', 'MODEL_DIR':'/opt/ml/model', ...} >>>modules.run('user_script', args, env_vars) SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \ SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model Args: module_name (str): module name in the same format required by python -m <module-name> cli command. args (list): A list of program arguments. env_vars (dict): A map containing the environment variables to be written. capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. """ args = args or [] env_vars = env_vars or {} cmd = [_process.python_executable(), '-m', module_name] + args _logging.log_script_invocation(cmd, env_vars) if wait: return _process.check_error(cmd, _errors.ExecuteUserScriptError, capture_error=capture_error) else: return _process.create(cmd, _errors.ExecuteUserScriptError, capture_error=capture_error)
[ "def", "run", "(", "module_name", ",", "args", "=", "None", ",", "env_vars", "=", "None", ",", "wait", "=", "True", ",", "capture_error", "=", "False", ")", ":", "# type: (str, list, dict, bool, bool) -> subprocess.Popen", "args", "=", "args", "or", "[", "]", "env_vars", "=", "env_vars", "or", "{", "}", "cmd", "=", "[", "_process", ".", "python_executable", "(", ")", ",", "'-m'", ",", "module_name", "]", "+", "args", "_logging", ".", "log_script_invocation", "(", "cmd", ",", "env_vars", ")", "if", "wait", ":", "return", "_process", ".", "check_error", "(", "cmd", ",", "_errors", ".", "ExecuteUserScriptError", ",", "capture_error", "=", "capture_error", ")", "else", ":", "return", "_process", ".", "create", "(", "cmd", ",", "_errors", ".", "ExecuteUserScriptError", ",", "capture_error", "=", "capture_error", ")" ]
50.328125
0.006699
def crown(self, depth=2): """ Returns a list of leaves, nodes connected to leaves, etc. """ nodes = [] for node in self.leaves: nodes += node.flatten(depth-1) return cluster.unique(nodes)
[ "def", "crown", "(", "self", ",", "depth", "=", "2", ")", ":", "nodes", "=", "[", "]", "for", "node", "in", "self", ".", "leaves", ":", "nodes", "+=", "node", ".", "flatten", "(", "depth", "-", "1", ")", "return", "cluster", ".", "unique", "(", "nodes", ")" ]
37
0.013216
def format(self, indent_level, indent_size=4): """Format this verifier Returns: string: A formatted string """ desc = self.format_name('String') return self.wrap_lines(desc, indent_level, indent_size=indent_size)
[ "def", "format", "(", "self", ",", "indent_level", ",", "indent_size", "=", "4", ")", ":", "desc", "=", "self", ".", "format_name", "(", "'String'", ")", "return", "self", ".", "wrap_lines", "(", "desc", ",", "indent_level", ",", "indent_size", "=", "indent_size", ")" ]
28.666667
0.007519
def in_book_search(request): """Full text, in-book search.""" results = {} args = request.matchdict ident_hash = args['ident_hash'] args['search_term'] = request.params.get('q', '') query_type = request.params.get('query_type', '') combiner = '' if query_type: if query_type.lower() == 'or': combiner = '_or' id, version = split_ident_hash(ident_hash) args['uuid'] = id args['version'] = version with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute(SQL['get-collated-state'], args) res = cursor.fetchall() if res and res[0][0]: statement = SQL['get-in-collated-book-search'] else: statement = SQL['get-in-book-search'] cursor.execute(statement.format(combiner=combiner), args) res = cursor.fetchall() results['results'] = {'query': [], 'total': len(res), 'items': []} results['results']['query'] = { 'id': ident_hash, 'search_term': args['search_term'], } for uuid, version, title, snippet, matches, rank in res: results['results']['items'].append({ 'rank': '{}'.format(rank), 'id': '{}@{}'.format(uuid, version), 'title': '{}'.format(title), 'snippet': '{}'.format(snippet), 'matches': '{}'.format(matches), }) resp = request.response resp.status = '200 OK' resp.content_type = 'application/json' resp.body = json.dumps(results) return resp
[ "def", "in_book_search", "(", "request", ")", ":", "results", "=", "{", "}", "args", "=", "request", ".", "matchdict", "ident_hash", "=", "args", "[", "'ident_hash'", "]", "args", "[", "'search_term'", "]", "=", "request", ".", "params", ".", "get", "(", "'q'", ",", "''", ")", "query_type", "=", "request", ".", "params", ".", "get", "(", "'query_type'", ",", "''", ")", "combiner", "=", "''", "if", "query_type", ":", "if", "query_type", ".", "lower", "(", ")", "==", "'or'", ":", "combiner", "=", "'_or'", "id", ",", "version", "=", "split_ident_hash", "(", "ident_hash", ")", "args", "[", "'uuid'", "]", "=", "id", "args", "[", "'version'", "]", "=", "version", "with", "db_connect", "(", ")", "as", "db_connection", ":", "with", "db_connection", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "SQL", "[", "'get-collated-state'", "]", ",", "args", ")", "res", "=", "cursor", ".", "fetchall", "(", ")", "if", "res", "and", "res", "[", "0", "]", "[", "0", "]", ":", "statement", "=", "SQL", "[", "'get-in-collated-book-search'", "]", "else", ":", "statement", "=", "SQL", "[", "'get-in-book-search'", "]", "cursor", ".", "execute", "(", "statement", ".", "format", "(", "combiner", "=", "combiner", ")", ",", "args", ")", "res", "=", "cursor", ".", "fetchall", "(", ")", "results", "[", "'results'", "]", "=", "{", "'query'", ":", "[", "]", ",", "'total'", ":", "len", "(", "res", ")", ",", "'items'", ":", "[", "]", "}", "results", "[", "'results'", "]", "[", "'query'", "]", "=", "{", "'id'", ":", "ident_hash", ",", "'search_term'", ":", "args", "[", "'search_term'", "]", ",", "}", "for", "uuid", ",", "version", ",", "title", ",", "snippet", ",", "matches", ",", "rank", "in", "res", ":", "results", "[", "'results'", "]", "[", "'items'", "]", ".", "append", "(", "{", "'rank'", ":", "'{}'", ".", "format", "(", "rank", ")", ",", "'id'", ":", "'{}@{}'", ".", "format", "(", "uuid", ",", "version", ")", ",", "'title'", ":", "'{}'", ".", "format", "(", "title", ")", ",", "'snippet'", ":", "'{}'", ".", "format", "(", "snippet", ")", ",", "'matches'", ":", "'{}'", ".", "format", "(", "matches", ")", ",", "}", ")", "resp", "=", "request", ".", "response", "resp", ".", "status", "=", "'200 OK'", "resp", ".", "content_type", "=", "'application/json'", "resp", ".", "body", "=", "json", ".", "dumps", "(", "results", ")", "return", "resp" ]
33.627451
0.000567
def assign_highest_value(exposure, hazard): """Assign the highest hazard value to an indivisible feature. For indivisible polygon exposure layers such as buildings, we need to assigned the greatest hazard that each polygon touches and use that as the effective hazard class. Issue https://github.com/inasafe/inasafe/issues/3192 We follow the concept here that any part of the exposure dataset that touches the hazard is affected, and the greatest hazard is the effective hazard. :param exposure: The building vector layer. :type exposure: QgsVectorLayer :param hazard: The vector layer to use for hazard. :type hazard: QgsVectorLayer :return: The new impact layer. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ output_layer_name = assign_highest_value_steps['output_layer_name'] hazard_inasafe_fields = hazard.keywords['inasafe_fields'] if not hazard.keywords.get('classification'): raise InvalidKeywordsForProcessingAlgorithm if not hazard_inasafe_fields.get(hazard_class_field['key']): raise InvalidKeywordsForProcessingAlgorithm indices = [] exposure.startEditing() for field in hazard.fields(): exposure.addAttribute(field) indices.append(exposure.fields().lookupField(field.name())) exposure.commitChanges() provider = exposure.dataProvider() spatial_index = create_spatial_index(exposure) # cache features from exposure layer for faster retrieval exposure_features = {} for f in exposure.getFeatures(): exposure_features[f.id()] = f # Todo callback # total = 100.0 / len(selectionA) hazard_field = hazard_inasafe_fields[hazard_class_field['key']] layer_classification = None for classification in hazard_classification['types']: if classification['key'] == hazard.keywords['classification']: layer_classification = classification break # Get a ordered list of classes like ['high', 'medium', 'low'] levels = [key['key'] for key in layer_classification['classes']] levels.append(not_exposed_class['key']) # Let's loop over the hazard layer, from high to low hazard zone. for hazard_value in levels: expression = '"%s" = \'%s\'' % (hazard_field, hazard_value) hazard_request = QgsFeatureRequest().setFilterExpression(expression) update_map = {} for area in hazard.getFeatures(hazard_request): geometry = area.geometry().constGet() intersects = spatial_index.intersects(geometry.boundingBox()) # use prepared geometry: makes multiple intersection tests faster geometry_prepared = QgsGeometry.createGeometryEngine( geometry) geometry_prepared.prepareGeometry() # We need to loop over each intersections exposure / hazard. for i in intersects: building = exposure_features[i] building_geometry = building.geometry() if geometry_prepared.intersects(building_geometry.constGet()): update_map[building.id()] = {} for index, value in zip(indices, area.attributes()): update_map[building.id()][index] = value # We don't want this building again, let's remove it from # the index. spatial_index.deleteFeature(building) provider.changeAttributeValues(update_map) exposure.updateExtents() exposure.updateFields() exposure.keywords['inasafe_fields'].update( hazard.keywords['inasafe_fields']) exposure.keywords['layer_purpose'] = layer_purpose_exposure_summary['key'] exposure.keywords['exposure_keywords'] = exposure.keywords.copy() exposure.keywords['aggregation_keywords'] = ( hazard.keywords['aggregation_keywords'].copy()) exposure.keywords['hazard_keywords'] = ( hazard.keywords['hazard_keywords'].copy()) exposure.keywords['title'] = output_layer_name check_layer(exposure) return exposure
[ "def", "assign_highest_value", "(", "exposure", ",", "hazard", ")", ":", "output_layer_name", "=", "assign_highest_value_steps", "[", "'output_layer_name'", "]", "hazard_inasafe_fields", "=", "hazard", ".", "keywords", "[", "'inasafe_fields'", "]", "if", "not", "hazard", ".", "keywords", ".", "get", "(", "'classification'", ")", ":", "raise", "InvalidKeywordsForProcessingAlgorithm", "if", "not", "hazard_inasafe_fields", ".", "get", "(", "hazard_class_field", "[", "'key'", "]", ")", ":", "raise", "InvalidKeywordsForProcessingAlgorithm", "indices", "=", "[", "]", "exposure", ".", "startEditing", "(", ")", "for", "field", "in", "hazard", ".", "fields", "(", ")", ":", "exposure", ".", "addAttribute", "(", "field", ")", "indices", ".", "append", "(", "exposure", ".", "fields", "(", ")", ".", "lookupField", "(", "field", ".", "name", "(", ")", ")", ")", "exposure", ".", "commitChanges", "(", ")", "provider", "=", "exposure", ".", "dataProvider", "(", ")", "spatial_index", "=", "create_spatial_index", "(", "exposure", ")", "# cache features from exposure layer for faster retrieval", "exposure_features", "=", "{", "}", "for", "f", "in", "exposure", ".", "getFeatures", "(", ")", ":", "exposure_features", "[", "f", ".", "id", "(", ")", "]", "=", "f", "# Todo callback", "# total = 100.0 / len(selectionA)", "hazard_field", "=", "hazard_inasafe_fields", "[", "hazard_class_field", "[", "'key'", "]", "]", "layer_classification", "=", "None", "for", "classification", "in", "hazard_classification", "[", "'types'", "]", ":", "if", "classification", "[", "'key'", "]", "==", "hazard", ".", "keywords", "[", "'classification'", "]", ":", "layer_classification", "=", "classification", "break", "# Get a ordered list of classes like ['high', 'medium', 'low']", "levels", "=", "[", "key", "[", "'key'", "]", "for", "key", "in", "layer_classification", "[", "'classes'", "]", "]", "levels", ".", "append", "(", "not_exposed_class", "[", "'key'", "]", ")", "# Let's loop over the hazard layer, from high to low hazard zone.", "for", "hazard_value", "in", "levels", ":", "expression", "=", "'\"%s\" = \\'%s\\''", "%", "(", "hazard_field", ",", "hazard_value", ")", "hazard_request", "=", "QgsFeatureRequest", "(", ")", ".", "setFilterExpression", "(", "expression", ")", "update_map", "=", "{", "}", "for", "area", "in", "hazard", ".", "getFeatures", "(", "hazard_request", ")", ":", "geometry", "=", "area", ".", "geometry", "(", ")", ".", "constGet", "(", ")", "intersects", "=", "spatial_index", ".", "intersects", "(", "geometry", ".", "boundingBox", "(", ")", ")", "# use prepared geometry: makes multiple intersection tests faster", "geometry_prepared", "=", "QgsGeometry", ".", "createGeometryEngine", "(", "geometry", ")", "geometry_prepared", ".", "prepareGeometry", "(", ")", "# We need to loop over each intersections exposure / hazard.", "for", "i", "in", "intersects", ":", "building", "=", "exposure_features", "[", "i", "]", "building_geometry", "=", "building", ".", "geometry", "(", ")", "if", "geometry_prepared", ".", "intersects", "(", "building_geometry", ".", "constGet", "(", ")", ")", ":", "update_map", "[", "building", ".", "id", "(", ")", "]", "=", "{", "}", "for", "index", ",", "value", "in", "zip", "(", "indices", ",", "area", ".", "attributes", "(", ")", ")", ":", "update_map", "[", "building", ".", "id", "(", ")", "]", "[", "index", "]", "=", "value", "# We don't want this building again, let's remove it from", "# the index.", "spatial_index", ".", "deleteFeature", "(", "building", ")", "provider", ".", "changeAttributeValues", "(", "update_map", ")", "exposure", ".", "updateExtents", "(", ")", "exposure", ".", "updateFields", "(", ")", "exposure", ".", "keywords", "[", "'inasafe_fields'", "]", ".", "update", "(", "hazard", ".", "keywords", "[", "'inasafe_fields'", "]", ")", "exposure", ".", "keywords", "[", "'layer_purpose'", "]", "=", "layer_purpose_exposure_summary", "[", "'key'", "]", "exposure", ".", "keywords", "[", "'exposure_keywords'", "]", "=", "exposure", ".", "keywords", ".", "copy", "(", ")", "exposure", ".", "keywords", "[", "'aggregation_keywords'", "]", "=", "(", "hazard", ".", "keywords", "[", "'aggregation_keywords'", "]", ".", "copy", "(", ")", ")", "exposure", ".", "keywords", "[", "'hazard_keywords'", "]", "=", "(", "hazard", ".", "keywords", "[", "'hazard_keywords'", "]", ".", "copy", "(", ")", ")", "exposure", ".", "keywords", "[", "'title'", "]", "=", "output_layer_name", "check_layer", "(", "exposure", ")", "return", "exposure" ]
36.481818
0.000243
def __threshold(self, ymx_i): """ Calculates the difference threshold for a given difference local maximum. Parameters ----------- ymx_i : float The normalized y value of a local maximum. """ return ymx_i - (self.S * np.diff(self.xsn).mean())
[ "def", "__threshold", "(", "self", ",", "ymx_i", ")", ":", "return", "ymx_i", "-", "(", "self", ".", "S", "*", "np", ".", "diff", "(", "self", ".", "xsn", ")", ".", "mean", "(", ")", ")" ]
28.090909
0.009404
def load_gifti(filename, to='auto'): ''' load_gifti(filename) yields the nibabel gifti data structure loaded by nibabel from the given filename. Currently, this load method is not particlarly sophisticated and simply returns this data. The optional argument to may be used to coerce the resulting data to a particular format; the following arguments are understood: * 'auto' currently returns the nibabel data structure. * 'mesh' returns the data as a mesh, assuming that there are two darray elements stored in the gifti file, the first of which must be a coordinate matrix and a triangle topology. * 'coordinates' returns the data as a coordinate matrix. * 'tesselation' returns the data as a tesselation object. * 'raw' returns the entire gifti image object (None will also yield this result). ''' dat = nib.load(filename) to = 'raw' if to is None else to.lower() if to in ['raw', 'image', 'gifti', 'all', 'full']: return dat if to in ['auto', 'automatic']: # is this a mesh gifti? pset = dat.get_arrays_from_intent('pointset') tris = dat.get_arrays_from_intent('triangle') if len(pset) == 1 and len(tris) == 1: (cor, tri) = (pset[0].data, tris[0].data) # okay, try making it: try: return Mesh(tri, cor) except Exception: pass elif len(pset) == 1 and len(tris) == 0: # just a pointset return pset[0].data elif len(tris) == 1 and len(pset) == 0: # Just a topology... return Tesselation(tris[0].data) # Maybe it's a stat? If so, we want to return the data array... # see the nifti1 header for these numbers, but stats are intent 2-24 stats = [v for k in range(2,25) for v in dat.get_arrays_from_intent(k)] if len(stats) == 1: return np.squeeze(stats[0].data) # most other possibilities are also basic arrays, so if there's only one of them, we can # just yield that array if len(dat.darrays) == 1: return np.squeeze(dat.darrays[0].data) # We don't know what it is; return the whole thing: return dat elif to in ['coords', 'coordinates', 'xyz']: cor = dat.darrays[0].data if pimms.is_matrix(cor, np.inexact): return cor else: raise ValueError('give gifti file did not contain coordinates') elif to in ['tess', 'tesselation', 'triangles', 'tri', 'triangulation']: cor = dat.darrays[0].data if pimms.is_matrix(cor, 'int'): return Tesselation(cor) else: raise ValueError('give gifti file did not contain tesselation') elif to in ['mesh']: if len(dat.darrays) == 2: (cor, tri) = dat.darrays else: (cor, _, tri) = dat.darrays cor = cor.data tri = tri.data # possible that these were given in the wrong order: if pimms.is_matrix(tri, np.inexact) and pimms.is_matrix(cor, 'int'): (cor,tri) = (tri,cor) # okay, try making it: return Mesh(tri, cor) else: raise ValueError('option "to" given to load_gift could not be understood')
[ "def", "load_gifti", "(", "filename", ",", "to", "=", "'auto'", ")", ":", "dat", "=", "nib", ".", "load", "(", "filename", ")", "to", "=", "'raw'", "if", "to", "is", "None", "else", "to", ".", "lower", "(", ")", "if", "to", "in", "[", "'raw'", ",", "'image'", ",", "'gifti'", ",", "'all'", ",", "'full'", "]", ":", "return", "dat", "if", "to", "in", "[", "'auto'", ",", "'automatic'", "]", ":", "# is this a mesh gifti?", "pset", "=", "dat", ".", "get_arrays_from_intent", "(", "'pointset'", ")", "tris", "=", "dat", ".", "get_arrays_from_intent", "(", "'triangle'", ")", "if", "len", "(", "pset", ")", "==", "1", "and", "len", "(", "tris", ")", "==", "1", ":", "(", "cor", ",", "tri", ")", "=", "(", "pset", "[", "0", "]", ".", "data", ",", "tris", "[", "0", "]", ".", "data", ")", "# okay, try making it:", "try", ":", "return", "Mesh", "(", "tri", ",", "cor", ")", "except", "Exception", ":", "pass", "elif", "len", "(", "pset", ")", "==", "1", "and", "len", "(", "tris", ")", "==", "0", ":", "# just a pointset", "return", "pset", "[", "0", "]", ".", "data", "elif", "len", "(", "tris", ")", "==", "1", "and", "len", "(", "pset", ")", "==", "0", ":", "# Just a topology...", "return", "Tesselation", "(", "tris", "[", "0", "]", ".", "data", ")", "# Maybe it's a stat? If so, we want to return the data array...", "# see the nifti1 header for these numbers, but stats are intent 2-24", "stats", "=", "[", "v", "for", "k", "in", "range", "(", "2", ",", "25", ")", "for", "v", "in", "dat", ".", "get_arrays_from_intent", "(", "k", ")", "]", "if", "len", "(", "stats", ")", "==", "1", ":", "return", "np", ".", "squeeze", "(", "stats", "[", "0", "]", ".", "data", ")", "# most other possibilities are also basic arrays, so if there's only one of them, we can", "# just yield that array", "if", "len", "(", "dat", ".", "darrays", ")", "==", "1", ":", "return", "np", ".", "squeeze", "(", "dat", ".", "darrays", "[", "0", "]", ".", "data", ")", "# We don't know what it is; return the whole thing:", "return", "dat", "elif", "to", "in", "[", "'coords'", ",", "'coordinates'", ",", "'xyz'", "]", ":", "cor", "=", "dat", ".", "darrays", "[", "0", "]", ".", "data", "if", "pimms", ".", "is_matrix", "(", "cor", ",", "np", ".", "inexact", ")", ":", "return", "cor", "else", ":", "raise", "ValueError", "(", "'give gifti file did not contain coordinates'", ")", "elif", "to", "in", "[", "'tess'", ",", "'tesselation'", ",", "'triangles'", ",", "'tri'", ",", "'triangulation'", "]", ":", "cor", "=", "dat", ".", "darrays", "[", "0", "]", ".", "data", "if", "pimms", ".", "is_matrix", "(", "cor", ",", "'int'", ")", ":", "return", "Tesselation", "(", "cor", ")", "else", ":", "raise", "ValueError", "(", "'give gifti file did not contain tesselation'", ")", "elif", "to", "in", "[", "'mesh'", "]", ":", "if", "len", "(", "dat", ".", "darrays", ")", "==", "2", ":", "(", "cor", ",", "tri", ")", "=", "dat", ".", "darrays", "else", ":", "(", "cor", ",", "_", ",", "tri", ")", "=", "dat", ".", "darrays", "cor", "=", "cor", ".", "data", "tri", "=", "tri", ".", "data", "# possible that these were given in the wrong order:", "if", "pimms", ".", "is_matrix", "(", "tri", ",", "np", ".", "inexact", ")", "and", "pimms", ".", "is_matrix", "(", "cor", ",", "'int'", ")", ":", "(", "cor", ",", "tri", ")", "=", "(", "tri", ",", "cor", ")", "# okay, try making it:", "return", "Mesh", "(", "tri", ",", "cor", ")", "else", ":", "raise", "ValueError", "(", "'option \"to\" given to load_gift could not be understood'", ")" ]
50.596774
0.007505
def ge(self, value): """Construct a greater than or equal to (``>=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field """ self.op = '>=' self.negate_op = '<' self.value = self._value(value) return self
[ "def", "ge", "(", "self", ",", "value", ")", ":", "self", ".", "op", "=", "'>='", "self", ".", "negate_op", "=", "'<'", "self", ".", "value", "=", "self", ".", "_value", "(", "value", ")", "return", "self" ]
29.818182
0.005917
def reduce(self, mapped_props, aggregated, value_type, visitor): """This reduction is called to combine the mapped slot and collection item values into a single value for return. The default implementation tries to behave naturally; you'll almost always get a dict back when mapping over a record, and list or some other collection when mapping over collections. If the collection has additional properties which are not ignored (eg, not extraneous, not filtered), then the result will be a dictionary with the results of mapping the properties, and a 'values' key will be added with the result of mapping the items in the collection. args: ``mapped_props=``\ *generator* Iterating over this generator will yield K, V pairs, where K is **the Property object** and V is the mapped value. ``aggregated=``\ *object* This contains whatever ``aggregate`` returned, normally a list. ``value_type=``\ *RecordType* This is the type which is currently being reduced. A :py:class:`normalize.record.Record` subclass ``visitor=``\ *Visitor* Contenxt/options object. """ reduced = None if mapped_props: reduced = dict((k.name, v) for k, v in mapped_props) if issubclass(value_type, Collection) and aggregated is not None: if all(visitor.is_filtered(prop) for prop in value_type.properties.values()): reduced = aggregated else: if reduced.get("values", False): raise exc.VisitorTooSimple( fs=visitor.field_selector, value_type_name=value_type.__name__, visitor=type(self).__name__, ) else: reduced['values'] = aggregated return reduced
[ "def", "reduce", "(", "self", ",", "mapped_props", ",", "aggregated", ",", "value_type", ",", "visitor", ")", ":", "reduced", "=", "None", "if", "mapped_props", ":", "reduced", "=", "dict", "(", "(", "k", ".", "name", ",", "v", ")", "for", "k", ",", "v", "in", "mapped_props", ")", "if", "issubclass", "(", "value_type", ",", "Collection", ")", "and", "aggregated", "is", "not", "None", ":", "if", "all", "(", "visitor", ".", "is_filtered", "(", "prop", ")", "for", "prop", "in", "value_type", ".", "properties", ".", "values", "(", ")", ")", ":", "reduced", "=", "aggregated", "else", ":", "if", "reduced", ".", "get", "(", "\"values\"", ",", "False", ")", ":", "raise", "exc", ".", "VisitorTooSimple", "(", "fs", "=", "visitor", ".", "field_selector", ",", "value_type_name", "=", "value_type", ".", "__name__", ",", "visitor", "=", "type", "(", "self", ")", ".", "__name__", ",", ")", "else", ":", "reduced", "[", "'values'", "]", "=", "aggregated", "return", "reduced" ]
41.291667
0.002957
def sum(self, axis=None, dtype=None, out=None, keepdims=False): """Return the sum of ``self``. See Also -------- numpy.sum prod """ return self.elem.__array_ufunc__( np.add, 'reduce', self.elem, axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
[ "def", "sum", "(", "self", ",", "axis", "=", "None", ",", "dtype", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "False", ")", ":", "return", "self", ".", "elem", ".", "__array_ufunc__", "(", "np", ".", "add", ",", "'reduce'", ",", "self", ".", "elem", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ",", "out", "=", "(", "out", ",", ")", ",", "keepdims", "=", "keepdims", ")" ]
29.090909
0.006061
def google_analytics(parser, token): """ Google Analytics tracking template tag. Renders Javascript code to track page visits. You must supply your website property ID (as a string) in the ``GOOGLE_ANALYTICS_PROPERTY_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return GoogleAnalyticsNode()
[ "def", "google_analytics", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", ">", "1", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' takes no arguments\"", "%", "bits", "[", "0", "]", ")", "return", "GoogleAnalyticsNode", "(", ")" ]
34.083333
0.002381
def minValue(self, value): """gets/sets the min value""" if isinstance(value, [int, float, long]): self._rangeMin = value
[ "def", "minValue", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "[", "int", ",", "float", ",", "long", "]", ")", ":", "self", ".", "_rangeMin", "=", "value" ]
36.5
0.013423
def nearest_roads(client, points): """Find the closest road segments for each point Takes up to 100 independent coordinates, and returns the closest road segment for each point. The points passed do not need to be part of a continuous path. :param points: The points for which the nearest road segments are to be located. :type points: a single location, or a list of locations, where a location is a string, dict, list, or tuple :rtype: A list of snapped points. """ params = {"points": convert.location_list(points)} return client._request("/v1/nearestRoads", params, base_url=_ROADS_BASE_URL, accepts_clientid=False, extract_body=_roads_extract).get("snappedPoints", [])
[ "def", "nearest_roads", "(", "client", ",", "points", ")", ":", "params", "=", "{", "\"points\"", ":", "convert", ".", "location_list", "(", "points", ")", "}", "return", "client", ".", "_request", "(", "\"/v1/nearestRoads\"", ",", "params", ",", "base_url", "=", "_ROADS_BASE_URL", ",", "accepts_clientid", "=", "False", ",", "extract_body", "=", "_roads_extract", ")", ".", "get", "(", "\"snappedPoints\"", ",", "[", "]", ")" ]
37.285714
0.004981
def has_previous_assessment_section(self, assessment_section_id): """Tests if there is a previous assessment section in the assessment following the given assessment section ``Id``. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` return: (boolean) - ``true`` if there is a previous assessment section, ``false`` otherwise raise: IllegalState - ``has_assessment_begun()`` is ``false`` raise: NotFound - ``assessment_section_id`` is not found raise: NullArgument - ``assessment_section_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ try: self.get_previous_assessment_section(assessment_section_id) except errors.IllegalState: return False else: return True
[ "def", "has_previous_assessment_section", "(", "self", ",", "assessment_section_id", ")", ":", "try", ":", "self", ".", "get_previous_assessment_section", "(", "assessment_section_id", ")", "except", "errors", ".", "IllegalState", ":", "return", "False", "else", ":", "return", "True" ]
47.047619
0.002976
def daemon(self): """ Return whether process is a daemon :return: """ if self._process: return self._process.daemon else: return self._pargs.get('daemonic', False)
[ "def", "daemon", "(", "self", ")", ":", "if", "self", ".", "_process", ":", "return", "self", ".", "_process", ".", "daemon", "else", ":", "return", "self", ".", "_pargs", ".", "get", "(", "'daemonic'", ",", "False", ")" ]
25.222222
0.008511
def unselect(self, rows, status=True, progress=True): "Unselect given rows. Don't show progress if progress=False; don't show status if status=False." before = len(self._selectedRows) for r in (Progress(rows, 'unselecting') if progress else rows): self.unselectRow(r) if status: vd().status('unselected %s/%s %s' % (before-len(self._selectedRows), before, self.rowtype))
[ "def", "unselect", "(", "self", ",", "rows", ",", "status", "=", "True", ",", "progress", "=", "True", ")", ":", "before", "=", "len", "(", "self", ".", "_selectedRows", ")", "for", "r", "in", "(", "Progress", "(", "rows", ",", "'unselecting'", ")", "if", "progress", "else", "rows", ")", ":", "self", ".", "unselectRow", "(", "r", ")", "if", "status", ":", "vd", "(", ")", ".", "status", "(", "'unselected %s/%s %s'", "%", "(", "before", "-", "len", "(", "self", ".", "_selectedRows", ")", ",", "before", ",", "self", ".", "rowtype", ")", ")" ]
60
0.00939
def accept_default_labels(self, other): """Applies labels for default meta labels from other onto self. Parameters ---------- other : Meta Meta object to take default labels from Returns ------- Meta """ self.units_label = other.units_label self.name_label = other.name_label self.notes_label = other.notes_label self.desc_label = other.desc_label self.plot_label = other.plot_label self.axis_label = other.axis_label self.scale_label = other.scale_label self.min_label = other.min_label self.max_label = other.max_label self.fill_label = other.fill_label return
[ "def", "accept_default_labels", "(", "self", ",", "other", ")", ":", "self", ".", "units_label", "=", "other", ".", "units_label", "self", ".", "name_label", "=", "other", ".", "name_label", "self", ".", "notes_label", "=", "other", ".", "notes_label", "self", ".", "desc_label", "=", "other", ".", "desc_label", "self", ".", "plot_label", "=", "other", ".", "plot_label", "self", ".", "axis_label", "=", "other", ".", "axis_label", "self", ".", "scale_label", "=", "other", ".", "scale_label", "self", ".", "min_label", "=", "other", ".", "min_label", "self", ".", "max_label", "=", "other", ".", "max_label", "self", ".", "fill_label", "=", "other", ".", "fill_label", "return" ]
29.2
0.006631
def get_html(self): """Method to convert the repository list to a search results page.""" here = path.abspath(path.dirname(__file__)) env = Environment(loader=FileSystemLoader(path.join(here, "res/"))) suggest = env.get_template("suggest.htm.j2") return suggest.render( logo=path.join(here, "res/logo.png"), user_login=self.user, repos=self.repos, )
[ "def", "get_html", "(", "self", ")", ":", "here", "=", "path", ".", "abspath", "(", "path", ".", "dirname", "(", "__file__", ")", ")", "env", "=", "Environment", "(", "loader", "=", "FileSystemLoader", "(", "path", ".", "join", "(", "here", ",", "\"res/\"", ")", ")", ")", "suggest", "=", "env", ".", "get_template", "(", "\"suggest.htm.j2\"", ")", "return", "suggest", ".", "render", "(", "logo", "=", "path", ".", "join", "(", "here", ",", "\"res/logo.png\"", ")", ",", "user_login", "=", "self", ".", "user", ",", "repos", "=", "self", ".", "repos", ",", ")" ]
35.333333
0.004598
def laplacian_pca(self, coordinates, num_dims=None, beta=0.5): '''Graph-Laplacian PCA (CVPR 2013). coordinates : (n,d) array-like, assumed to be mean-centered. beta : float in [0,1], scales how much PCA/LapEig contributes. Returns an approximation of input coordinates, ala PCA.''' X = np.atleast_2d(coordinates) L = self.laplacian(normed=True) kernel = X.dot(X.T) kernel /= eigsh(kernel, k=1, which='LM', return_eigenvectors=False) L /= eigsh(L, k=1, which='LM', return_eigenvectors=False) W = (1-beta)*(np.identity(kernel.shape[0]) - kernel) + beta*L if num_dims is None: vals, vecs = np.linalg.eigh(W) else: vals, vecs = eigh(W, eigvals=(0, num_dims-1), overwrite_a=True) return X.T.dot(vecs).dot(vecs.T).T
[ "def", "laplacian_pca", "(", "self", ",", "coordinates", ",", "num_dims", "=", "None", ",", "beta", "=", "0.5", ")", ":", "X", "=", "np", ".", "atleast_2d", "(", "coordinates", ")", "L", "=", "self", ".", "laplacian", "(", "normed", "=", "True", ")", "kernel", "=", "X", ".", "dot", "(", "X", ".", "T", ")", "kernel", "/=", "eigsh", "(", "kernel", ",", "k", "=", "1", ",", "which", "=", "'LM'", ",", "return_eigenvectors", "=", "False", ")", "L", "/=", "eigsh", "(", "L", ",", "k", "=", "1", ",", "which", "=", "'LM'", ",", "return_eigenvectors", "=", "False", ")", "W", "=", "(", "1", "-", "beta", ")", "*", "(", "np", ".", "identity", "(", "kernel", ".", "shape", "[", "0", "]", ")", "-", "kernel", ")", "+", "beta", "*", "L", "if", "num_dims", "is", "None", ":", "vals", ",", "vecs", "=", "np", ".", "linalg", ".", "eigh", "(", "W", ")", "else", ":", "vals", ",", "vecs", "=", "eigh", "(", "W", ",", "eigvals", "=", "(", "0", ",", "num_dims", "-", "1", ")", ",", "overwrite_a", "=", "True", ")", "return", "X", ".", "T", ".", "dot", "(", "vecs", ")", ".", "dot", "(", "vecs", ".", "T", ")", ".", "T" ]
47.375
0.003881
def query_walkers(): """Return query walker instances.""" return [ import_string(walker)() if isinstance(walker, six.string_types) else walker() for walker in current_app.config[ 'COLLECTIONS_QUERY_WALKERS'] ]
[ "def", "query_walkers", "(", ")", ":", "return", "[", "import_string", "(", "walker", ")", "(", ")", "if", "isinstance", "(", "walker", ",", "six", ".", "string_types", ")", "else", "walker", "(", ")", "for", "walker", "in", "current_app", ".", "config", "[", "'COLLECTIONS_QUERY_WALKERS'", "]", "]" ]
34.714286
0.004016
def do_raw(self, subcmd, opts, message): """${cmd_name}: dump the complete raw message ${cmd_usage} """ client = MdClient(self.maildir) client.getraw(message, self.stdout)
[ "def", "do_raw", "(", "self", ",", "subcmd", ",", "opts", ",", "message", ")", ":", "client", "=", "MdClient", "(", "self", ".", "maildir", ")", "client", ".", "getraw", "(", "message", ",", "self", ".", "stdout", ")" ]
29.428571
0.009434
def adjacent(self, other): """ Returns True if ranges are directly next to each other but does not overlap. >>> intrange(1, 5).adjacent(intrange(5, 10)) True >>> intrange(1, 5).adjacent(intrange(10, 15)) False The empty set is not adjacent to any set. This is the same as the ``-|-`` operator for two ranges in PostgreSQL. :param other: Range to test against. :return: ``True`` if this range is adjacent with `other`, otherwise ``False``. :raises TypeError: If given argument is of invalid type """ if not self.is_valid_range(other): raise TypeError( "Unsupported type to test for inclusion '{0.__class__.__name__}'".format( other)) # Must return False if either is an empty set elif not self or not other: return False return ( (self.lower == other.upper and self.lower_inc != other.upper_inc) or (self.upper == other.lower and self.upper_inc != other.lower_inc))
[ "def", "adjacent", "(", "self", ",", "other", ")", ":", "if", "not", "self", ".", "is_valid_range", "(", "other", ")", ":", "raise", "TypeError", "(", "\"Unsupported type to test for inclusion '{0.__class__.__name__}'\"", ".", "format", "(", "other", ")", ")", "# Must return False if either is an empty set", "elif", "not", "self", "or", "not", "other", ":", "return", "False", "return", "(", "(", "self", ".", "lower", "==", "other", ".", "upper", "and", "self", ".", "lower_inc", "!=", "other", ".", "upper_inc", ")", "or", "(", "self", ".", "upper", "==", "other", ".", "lower", "and", "self", ".", "upper_inc", "!=", "other", ".", "lower_inc", ")", ")" ]
36.433333
0.003565
def get_queryset(self, request): ''' Restrict the listed assignments for the current user.''' qs = super(AssignmentAdmin, self).get_queryset(request) if not request.user.is_superuser: qs = qs.filter(course__active=True).filter(Q(course__tutors__pk=request.user.pk) | Q(course__owner=request.user)).distinct() return qs.order_by('title')
[ "def", "get_queryset", "(", "self", ",", "request", ")", ":", "qs", "=", "super", "(", "AssignmentAdmin", ",", "self", ")", ".", "get_queryset", "(", "request", ")", "if", "not", "request", ".", "user", ".", "is_superuser", ":", "qs", "=", "qs", ".", "filter", "(", "course__active", "=", "True", ")", ".", "filter", "(", "Q", "(", "course__tutors__pk", "=", "request", ".", "user", ".", "pk", ")", "|", "Q", "(", "course__owner", "=", "request", ".", "user", ")", ")", ".", "distinct", "(", ")", "return", "qs", ".", "order_by", "(", "'title'", ")" ]
62.5
0.007895
def convert_units(self, desired, guess=False): """ Convert the units of the mesh into a specified unit. Parameters ---------- desired : string Units to convert to (eg 'inches') guess : boolean If self.units are not defined should we guess the current units of the document and then convert? """ units._convert_units(self, desired, guess) return self
[ "def", "convert_units", "(", "self", ",", "desired", ",", "guess", "=", "False", ")", ":", "units", ".", "_convert_units", "(", "self", ",", "desired", ",", "guess", ")", "return", "self" ]
31.357143
0.004425
def organize_models(self, outdir, force_rerun=False): """Organize and rename SWISS-MODEL models to a single folder with a name containing template information. Args: outdir (str): New directory to copy renamed models to force_rerun (bool): If models should be copied again even if they already exist Returns: dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values """ uniprot_to_swissmodel = defaultdict(list) for u, models in self.all_models.items(): for m in models: original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id']) file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:], 'swissmodel', '{}.pdb'.format(original_filename)) if op.exists(file_path): new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4]) shutil.copy(file_path, op.join(outdir, new_filename)) uniprot_to_swissmodel[u].append(new_filename) else: log.warning('{}: no file {} found for model'.format(u, file_path)) return uniprot_to_swissmodel
[ "def", "organize_models", "(", "self", ",", "outdir", ",", "force_rerun", "=", "False", ")", ":", "uniprot_to_swissmodel", "=", "defaultdict", "(", "list", ")", "for", "u", ",", "models", "in", "self", ".", "all_models", ".", "items", "(", ")", ":", "for", "m", "in", "models", ":", "original_filename", "=", "'{}_{}_{}_{}'", ".", "format", "(", "m", "[", "'from'", "]", ",", "m", "[", "'to'", "]", ",", "m", "[", "'template'", "]", ",", "m", "[", "'coordinate_id'", "]", ")", "file_path", "=", "op", ".", "join", "(", "self", ".", "metadata_dir", ",", "u", "[", ":", "2", "]", ",", "u", "[", "2", ":", "4", "]", ",", "u", "[", "4", ":", "]", ",", "'swissmodel'", ",", "'{}.pdb'", ".", "format", "(", "original_filename", ")", ")", "if", "op", ".", "exists", "(", "file_path", ")", ":", "new_filename", "=", "'{}_{}_{}_{}.pdb'", ".", "format", "(", "u", ",", "m", "[", "'from'", "]", ",", "m", "[", "'to'", "]", ",", "m", "[", "'template'", "]", "[", ":", "4", "]", ")", "shutil", ".", "copy", "(", "file_path", ",", "op", ".", "join", "(", "outdir", ",", "new_filename", ")", ")", "uniprot_to_swissmodel", "[", "u", "]", ".", "append", "(", "new_filename", ")", "else", ":", "log", ".", "warning", "(", "'{}: no file {} found for model'", ".", "format", "(", "u", ",", "file_path", ")", ")", "return", "uniprot_to_swissmodel" ]
50.461538
0.005984
def editfile(fpath): """ Runs gvim. Can also accept a module / class / function """ if not isinstance(fpath, six.string_types): from six import types print('Rectify to module fpath = %r' % (fpath,)) if isinstance(fpath, types.ModuleType): fpath = fpath.__file__ else: fpath = sys.modules[fpath.__module__].__file__ fpath_py = fpath.replace('.pyc', '.py') if exists(fpath_py): fpath = fpath_py print('[cplat] startfile(%r)' % fpath) if not exists(fpath): raise Exception('Cannot start nonexistant file: %r' % fpath) if LINUX: out, err, ret = cmd(geteditor(), fpath, detatch=True) if not ret: raise Exception(out + ' -- ' + err) elif DARWIN: out, err, ret = cmd(geteditor(), fpath, detatch=True) if not ret: raise Exception(out + ' -- ' + err) else: out, err, ret = cmd(geteditor(), fpath, detatch=True) if not ret: raise Exception(out + ' -- ' + err) #os.startfile(fpath) pass
[ "def", "editfile", "(", "fpath", ")", ":", "if", "not", "isinstance", "(", "fpath", ",", "six", ".", "string_types", ")", ":", "from", "six", "import", "types", "print", "(", "'Rectify to module fpath = %r'", "%", "(", "fpath", ",", ")", ")", "if", "isinstance", "(", "fpath", ",", "types", ".", "ModuleType", ")", ":", "fpath", "=", "fpath", ".", "__file__", "else", ":", "fpath", "=", "sys", ".", "modules", "[", "fpath", ".", "__module__", "]", ".", "__file__", "fpath_py", "=", "fpath", ".", "replace", "(", "'.pyc'", ",", "'.py'", ")", "if", "exists", "(", "fpath_py", ")", ":", "fpath", "=", "fpath_py", "print", "(", "'[cplat] startfile(%r)'", "%", "fpath", ")", "if", "not", "exists", "(", "fpath", ")", ":", "raise", "Exception", "(", "'Cannot start nonexistant file: %r'", "%", "fpath", ")", "if", "LINUX", ":", "out", ",", "err", ",", "ret", "=", "cmd", "(", "geteditor", "(", ")", ",", "fpath", ",", "detatch", "=", "True", ")", "if", "not", "ret", ":", "raise", "Exception", "(", "out", "+", "' -- '", "+", "err", ")", "elif", "DARWIN", ":", "out", ",", "err", ",", "ret", "=", "cmd", "(", "geteditor", "(", ")", ",", "fpath", ",", "detatch", "=", "True", ")", "if", "not", "ret", ":", "raise", "Exception", "(", "out", "+", "' -- '", "+", "err", ")", "else", ":", "out", ",", "err", ",", "ret", "=", "cmd", "(", "geteditor", "(", ")", ",", "fpath", ",", "detatch", "=", "True", ")", "if", "not", "ret", ":", "raise", "Exception", "(", "out", "+", "' -- '", "+", "err", ")", "#os.startfile(fpath)", "pass" ]
35.466667
0.002745
def register(linter): """required method to auto register this checker""" linter.register_checker(EncodingChecker(linter)) linter.register_checker(ByIdManagedMessagesChecker(linter))
[ "def", "register", "(", "linter", ")", ":", "linter", ".", "register_checker", "(", "EncodingChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "ByIdManagedMessagesChecker", "(", "linter", ")", ")" ]
47.75
0.005155
def get_service_by_name(self, name): """ Implementation of :meth:`twitcher.api.IRegistry.get_service_by_name`. """ try: service = self.store.fetch_by_name(name=name) except Exception: LOGGER.error('Could not get service with name %s', name) return {} else: return service.params
[ "def", "get_service_by_name", "(", "self", ",", "name", ")", ":", "try", ":", "service", "=", "self", ".", "store", ".", "fetch_by_name", "(", "name", "=", "name", ")", "except", "Exception", ":", "LOGGER", ".", "error", "(", "'Could not get service with name %s'", ",", "name", ")", "return", "{", "}", "else", ":", "return", "service", ".", "params" ]
33.090909
0.005348
def neg(self): """ Unary operation: neg :return: 0 - self """ si = StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0).sub(self) si.uninitialized = self.uninitialized return si
[ "def", "neg", "(", "self", ")", ":", "si", "=", "StridedInterval", "(", "bits", "=", "self", ".", "bits", ",", "stride", "=", "0", ",", "lower_bound", "=", "0", ",", "upper_bound", "=", "0", ")", ".", "sub", "(", "self", ")", "si", ".", "uninitialized", "=", "self", ".", "uninitialized", "return", "si" ]
24.5
0.011811
def loadMsbwt(self, dirName, logger): ''' This functions loads a BWT file and constructs total counts, indexes start positions, and constructs an FM index in memory @param dirName - the directory to load, inside should be '<DIR>/comp_msbwt.npy' or it will fail ''' #open the file with our BWT in it self.dirName = dirName self.bwt = np.load(self.dirName+'/comp_msbwt.npy', 'r') #build auxiliary structures self.constructTotalCounts(logger) self.constructIndexing() self.constructFMIndex(logger)
[ "def", "loadMsbwt", "(", "self", ",", "dirName", ",", "logger", ")", ":", "#open the file with our BWT in it", "self", ".", "dirName", "=", "dirName", "self", ".", "bwt", "=", "np", ".", "load", "(", "self", ".", "dirName", "+", "'/comp_msbwt.npy'", ",", "'r'", ")", "#build auxiliary structures", "self", ".", "constructTotalCounts", "(", "logger", ")", "self", ".", "constructIndexing", "(", ")", "self", ".", "constructFMIndex", "(", "logger", ")" ]
44.538462
0.011844
def blobs(shape: List[int], porosity: float = 0.5, blobiness: int = 1): """ Generates an image containing amorphous blobs Parameters ---------- shape : list The size of the image to generate in [Nx, Ny, Nz] where N is the number of voxels porosity : float If specified, this will threshold the image to the specified value prior to returning. If ``None`` is specified, then the scalar noise field is converted to a uniform distribution and returned without thresholding. blobiness : int or list of ints(default = 1) Controls the morphology of the blobs. A higher number results in a larger number of small blobs. If a list is supplied then the blobs are anisotropic. Returns ------- image : ND-array A boolean array with ``True`` values denoting the pore space See Also -------- norm_to_uniform """ blobiness = sp.array(blobiness) shape = sp.array(shape) if sp.size(shape) == 1: shape = sp.full((3, ), int(shape)) sigma = sp.mean(shape)/(40*blobiness) im = sp.random.random(shape) im = spim.gaussian_filter(im, sigma=sigma) im = norm_to_uniform(im, scale=[0, 1]) if porosity: im = im < porosity return im
[ "def", "blobs", "(", "shape", ":", "List", "[", "int", "]", ",", "porosity", ":", "float", "=", "0.5", ",", "blobiness", ":", "int", "=", "1", ")", ":", "blobiness", "=", "sp", ".", "array", "(", "blobiness", ")", "shape", "=", "sp", ".", "array", "(", "shape", ")", "if", "sp", ".", "size", "(", "shape", ")", "==", "1", ":", "shape", "=", "sp", ".", "full", "(", "(", "3", ",", ")", ",", "int", "(", "shape", ")", ")", "sigma", "=", "sp", ".", "mean", "(", "shape", ")", "/", "(", "40", "*", "blobiness", ")", "im", "=", "sp", ".", "random", ".", "random", "(", "shape", ")", "im", "=", "spim", ".", "gaussian_filter", "(", "im", ",", "sigma", "=", "sigma", ")", "im", "=", "norm_to_uniform", "(", "im", ",", "scale", "=", "[", "0", ",", "1", "]", ")", "if", "porosity", ":", "im", "=", "im", "<", "porosity", "return", "im" ]
30
0.000769
def auto_slug_after_insert(mapper, connection, target): """Generate a slug from entity_type and id, unless slug is already set.""" if target.slug is None: target.slug = "{name}{sep}{id}".format( name=target.entity_class.lower(), sep=target.SLUG_SEPARATOR, id=target.id )
[ "def", "auto_slug_after_insert", "(", "mapper", ",", "connection", ",", "target", ")", ":", "if", "target", ".", "slug", "is", "None", ":", "target", ".", "slug", "=", "\"{name}{sep}{id}\"", ".", "format", "(", "name", "=", "target", ".", "entity_class", ".", "lower", "(", ")", ",", "sep", "=", "target", ".", "SLUG_SEPARATOR", ",", "id", "=", "target", ".", "id", ")" ]
50.166667
0.006536
def append(self, obj): """Append an object to end. If the object is a string, appends a :class:`Word <Word>` object. """ if isinstance(obj, basestring): return self._collection.append(Word(obj)) else: return self._collection.append(obj)
[ "def", "append", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "basestring", ")", ":", "return", "self", ".", "_collection", ".", "append", "(", "Word", "(", "obj", ")", ")", "else", ":", "return", "self", ".", "_collection", ".", "append", "(", "obj", ")" ]
33.125
0.003676
def _prt_line_detail(self, prt, values, lnum=""): """Print header and field values in a readable format.""" #### data = zip(self.req_str, self.ntgafobj._fields, values) data = zip(self.req_str, self.flds, values) txt = ["{:2}) {:3} {:20} {}".format(i, req, hdr, val) for i, (req, hdr, val) in enumerate(data)] prt.write("{LNUM}\n{TXT}\n".format(LNUM=lnum, TXT="\n".join(txt)))
[ "def", "_prt_line_detail", "(", "self", ",", "prt", ",", "values", ",", "lnum", "=", "\"\"", ")", ":", "#### data = zip(self.req_str, self.ntgafobj._fields, values)", "data", "=", "zip", "(", "self", ".", "req_str", ",", "self", ".", "flds", ",", "values", ")", "txt", "=", "[", "\"{:2}) {:3} {:20} {}\"", ".", "format", "(", "i", ",", "req", ",", "hdr", ",", "val", ")", "for", "i", ",", "(", "req", ",", "hdr", ",", "val", ")", "in", "enumerate", "(", "data", ")", "]", "prt", ".", "write", "(", "\"{LNUM}\\n{TXT}\\n\"", ".", "format", "(", "LNUM", "=", "lnum", ",", "TXT", "=", "\"\\n\"", ".", "join", "(", "txt", ")", ")", ")" ]
68.5
0.009615
def first(self) -> Signature: """ Retrieve the first Signature ordered by mangling descendant """ k = sorted(self._hsig.keys()) return self._hsig[k[0]]
[ "def", "first", "(", "self", ")", "->", "Signature", ":", "k", "=", "sorted", "(", "self", ".", "_hsig", ".", "keys", "(", ")", ")", "return", "self", ".", "_hsig", "[", "k", "[", "0", "]", "]" ]
43
0.011429
def main(): """Program entry point""" parser = argparse.ArgumentParser() parser.add_argument("path", help="Path to the CAPTCHA image file") parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr') parser.add_argument("--epoch", help="Checkpoint epoch [Default 100]", type=int, default=100) args = parser.parse_args() init_state_names, init_state_arrays = lstm_init_states(batch_size=1) img = read_img(args.path) sample = SimpleBatch( data_names=['data'] + init_state_names, data=[mx.nd.array(img)] + init_state_arrays) mod = load_module(args.prefix, args.epoch, sample.data_names, sample.provide_data) mod.forward(sample) prob = mod.get_outputs()[0].asnumpy() prediction = CtcMetrics.ctc_label(np.argmax(prob, axis=-1).tolist()) # Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit) prediction = [p - 1 for p in prediction] print("Digits:", prediction)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"path\"", ",", "help", "=", "\"Path to the CAPTCHA image file\"", ")", "parser", ".", "add_argument", "(", "\"--prefix\"", ",", "help", "=", "\"Checkpoint prefix [Default 'ocr']\"", ",", "default", "=", "'ocr'", ")", "parser", ".", "add_argument", "(", "\"--epoch\"", ",", "help", "=", "\"Checkpoint epoch [Default 100]\"", ",", "type", "=", "int", ",", "default", "=", "100", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "init_state_names", ",", "init_state_arrays", "=", "lstm_init_states", "(", "batch_size", "=", "1", ")", "img", "=", "read_img", "(", "args", ".", "path", ")", "sample", "=", "SimpleBatch", "(", "data_names", "=", "[", "'data'", "]", "+", "init_state_names", ",", "data", "=", "[", "mx", ".", "nd", ".", "array", "(", "img", ")", "]", "+", "init_state_arrays", ")", "mod", "=", "load_module", "(", "args", ".", "prefix", ",", "args", ".", "epoch", ",", "sample", ".", "data_names", ",", "sample", ".", "provide_data", ")", "mod", ".", "forward", "(", "sample", ")", "prob", "=", "mod", ".", "get_outputs", "(", ")", "[", "0", "]", ".", "asnumpy", "(", ")", "prediction", "=", "CtcMetrics", ".", "ctc_label", "(", "np", ".", "argmax", "(", "prob", ",", "axis", "=", "-", "1", ")", ".", "tolist", "(", ")", ")", "# Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit)", "prediction", "=", "[", "p", "-", "1", "for", "p", "in", "prediction", "]", "print", "(", "\"Digits:\"", ",", "prediction", ")" ]
40.791667
0.00499
async def read(self): """ Read from the box in a blocking manner. :returns: An item from the box. """ result = await self._queue.get() self._can_write.set() if self._queue.empty(): self._can_read.clear() return result
[ "async", "def", "read", "(", "self", ")", ":", "result", "=", "await", "self", ".", "_queue", ".", "get", "(", ")", "self", ".", "_can_write", ".", "set", "(", ")", "if", "self", ".", "_queue", ".", "empty", "(", ")", ":", "self", ".", "_can_read", ".", "clear", "(", ")", "return", "result" ]
20.285714
0.006734
def run_wfunc(self): ''' Execute a wrapper function Returns tuple of (json_data, '') ''' # Ensure that opts/grains are up to date # Execute routine data_cache = False data = None cdir = os.path.join(self.opts['cachedir'], 'minions', self.id) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'ssh_data.p') refresh = False if not os.path.isfile(datap): refresh = True else: passed_time = (time.time() - os.stat(datap).st_mtime) / 60 if passed_time > self.opts.get('cache_life', 60): refresh = True if self.opts.get('refresh_cache'): refresh = True conf_grains = {} # Save conf file grains before they get clobbered if 'ssh_grains' in self.opts: conf_grains = self.opts['ssh_grains'] if not data_cache: refresh = True if refresh: # Make the datap # TODO: Auto expire the datap pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper( self.opts, self.id, fsclient=self.fsclient, minion_opts=self.minion_opts, **self.target) opts_pkg = pre_wrapper['test.opts_pkg']() # pylint: disable=E1102 if '_error' in opts_pkg: #Refresh failed retcode = opts_pkg['retcode'] ret = salt.utils.json.dumps({'local': opts_pkg}) return ret, retcode opts_pkg['file_roots'] = self.opts['file_roots'] opts_pkg['pillar_roots'] = self.opts['pillar_roots'] opts_pkg['ext_pillar'] = self.opts['ext_pillar'] opts_pkg['extension_modules'] = self.opts['extension_modules'] opts_pkg['module_dirs'] = self.opts['module_dirs'] opts_pkg['_ssh_version'] = self.opts['_ssh_version'] opts_pkg['__master_opts__'] = self.context['master_opts'] if 'known_hosts_file' in self.opts: opts_pkg['known_hosts_file'] = self.opts['known_hosts_file'] if '_caller_cachedir' in self.opts: opts_pkg['_caller_cachedir'] = self.opts['_caller_cachedir'] else: opts_pkg['_caller_cachedir'] = self.opts['cachedir'] # Use the ID defined in the roster file opts_pkg['id'] = self.id retcode = 0 # Restore master grains for grain in conf_grains: opts_pkg['grains'][grain] = conf_grains[grain] # Enable roster grains support if 'grains' in self.target: for grain in self.target['grains']: opts_pkg['grains'][grain] = self.target['grains'][grain] popts = {} popts.update(opts_pkg) # Master centric operations such as mine.get must have master option loaded. # The pillar must then be compiled by passing master opts found in opts_pkg['__master_opts__'] # which causes the pillar renderer to loose track of salt master options # # Depending on popts merge order, it will overwrite some options found in opts_pkg['__master_opts__'] master_centric_funcs = [ "pillar.items", "mine.get" ] # Pillar compilation is a master centric operation. # Master options take precedence during Pillar compilation popts.update(opts_pkg['__master_opts__']) pillar = salt.pillar.Pillar( popts, opts_pkg['grains'], opts_pkg['id'], opts_pkg.get('saltenv', 'base') ) pillar_data = pillar.compile_pillar() # Once pillar has been compiled, restore priority of minion opts if self.fun not in master_centric_funcs: log.debug('%s is a minion function', self.fun) popts.update(opts_pkg) else: log.debug('%s is a master function', self.fun) # TODO: cache minion opts in datap in master.py data = {'opts': opts_pkg, 'grains': opts_pkg['grains'], 'pillar': pillar_data} if data_cache: with salt.utils.files.fopen(datap, 'w+b') as fp_: fp_.write( self.serial.dumps(data) ) if not data and data_cache: with salt.utils.files.fopen(datap, 'rb') as fp_: data = self.serial.load(fp_) opts = data.get('opts', {}) opts['grains'] = data.get('grains') # Restore master grains for grain in conf_grains: opts['grains'][grain] = conf_grains[grain] # Enable roster grains support if 'grains' in self.target: for grain in self.target['grains']: opts['grains'][grain] = self.target['grains'][grain] opts['pillar'] = data.get('pillar') wrapper = salt.client.ssh.wrapper.FunctionWrapper( opts, self.id, fsclient=self.fsclient, minion_opts=self.minion_opts, **self.target) self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context) wrapper.wfuncs = self.wfuncs # We're running in the mine, need to fetch the arguments from the # roster, pillar, master config (in that order) if self.mine: mine_args = None mine_fun_data = None mine_fun = self.fun if self.mine_functions and self.fun in self.mine_functions: mine_fun_data = self.mine_functions[self.fun] elif opts['pillar'] and self.fun in opts['pillar'].get('mine_functions', {}): mine_fun_data = opts['pillar']['mine_functions'][self.fun] elif self.fun in self.context['master_opts'].get('mine_functions', {}): mine_fun_data = self.context['master_opts']['mine_functions'][self.fun] if isinstance(mine_fun_data, dict): mine_fun = mine_fun_data.pop('mine_function', mine_fun) mine_args = mine_fun_data elif isinstance(mine_fun_data, list): for item in mine_fun_data[:]: if isinstance(item, dict) and 'mine_function' in item: mine_fun = item['mine_function'] mine_fun_data.pop(mine_fun_data.index(item)) mine_args = mine_fun_data else: mine_args = mine_fun_data # If we found mine_args, replace our command's args if isinstance(mine_args, dict): self.args = [] self.kwargs = mine_args elif isinstance(mine_args, list): self.args = mine_args self.kwargs = {} try: if self.mine: result = wrapper[mine_fun](*self.args, **self.kwargs) else: result = self.wfuncs[self.fun](*self.args, **self.kwargs) except TypeError as exc: result = 'TypeError encountered executing {0}: {1}'.format(self.fun, exc) log.error(result, exc_info_on_loglevel=logging.DEBUG) retcode = 1 except Exception as exc: result = 'An Exception occurred while executing {0}: {1}'.format(self.fun, exc) log.error(result, exc_info_on_loglevel=logging.DEBUG) retcode = 1 # Mimic the json data-structure that "salt-call --local" will # emit (as seen in ssh_py_shim.py) if isinstance(result, dict) and 'local' in result: ret = salt.utils.json.dumps({'local': result['local']}) else: ret = salt.utils.json.dumps({'local': {'return': result}}) return ret, retcode
[ "def", "run_wfunc", "(", "self", ")", ":", "# Ensure that opts/grains are up to date", "# Execute routine", "data_cache", "=", "False", "data", "=", "None", "cdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'cachedir'", "]", ",", "'minions'", ",", "self", ".", "id", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "cdir", ")", ":", "os", ".", "makedirs", "(", "cdir", ")", "datap", "=", "os", ".", "path", ".", "join", "(", "cdir", ",", "'ssh_data.p'", ")", "refresh", "=", "False", "if", "not", "os", ".", "path", ".", "isfile", "(", "datap", ")", ":", "refresh", "=", "True", "else", ":", "passed_time", "=", "(", "time", ".", "time", "(", ")", "-", "os", ".", "stat", "(", "datap", ")", ".", "st_mtime", ")", "/", "60", "if", "passed_time", ">", "self", ".", "opts", ".", "get", "(", "'cache_life'", ",", "60", ")", ":", "refresh", "=", "True", "if", "self", ".", "opts", ".", "get", "(", "'refresh_cache'", ")", ":", "refresh", "=", "True", "conf_grains", "=", "{", "}", "# Save conf file grains before they get clobbered", "if", "'ssh_grains'", "in", "self", ".", "opts", ":", "conf_grains", "=", "self", ".", "opts", "[", "'ssh_grains'", "]", "if", "not", "data_cache", ":", "refresh", "=", "True", "if", "refresh", ":", "# Make the datap", "# TODO: Auto expire the datap", "pre_wrapper", "=", "salt", ".", "client", ".", "ssh", ".", "wrapper", ".", "FunctionWrapper", "(", "self", ".", "opts", ",", "self", ".", "id", ",", "fsclient", "=", "self", ".", "fsclient", ",", "minion_opts", "=", "self", ".", "minion_opts", ",", "*", "*", "self", ".", "target", ")", "opts_pkg", "=", "pre_wrapper", "[", "'test.opts_pkg'", "]", "(", ")", "# pylint: disable=E1102", "if", "'_error'", "in", "opts_pkg", ":", "#Refresh failed", "retcode", "=", "opts_pkg", "[", "'retcode'", "]", "ret", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "{", "'local'", ":", "opts_pkg", "}", ")", "return", "ret", ",", "retcode", "opts_pkg", "[", "'file_roots'", "]", "=", "self", ".", "opts", "[", "'file_roots'", "]", "opts_pkg", "[", "'pillar_roots'", "]", "=", "self", ".", "opts", "[", "'pillar_roots'", "]", "opts_pkg", "[", "'ext_pillar'", "]", "=", "self", ".", "opts", "[", "'ext_pillar'", "]", "opts_pkg", "[", "'extension_modules'", "]", "=", "self", ".", "opts", "[", "'extension_modules'", "]", "opts_pkg", "[", "'module_dirs'", "]", "=", "self", ".", "opts", "[", "'module_dirs'", "]", "opts_pkg", "[", "'_ssh_version'", "]", "=", "self", ".", "opts", "[", "'_ssh_version'", "]", "opts_pkg", "[", "'__master_opts__'", "]", "=", "self", ".", "context", "[", "'master_opts'", "]", "if", "'known_hosts_file'", "in", "self", ".", "opts", ":", "opts_pkg", "[", "'known_hosts_file'", "]", "=", "self", ".", "opts", "[", "'known_hosts_file'", "]", "if", "'_caller_cachedir'", "in", "self", ".", "opts", ":", "opts_pkg", "[", "'_caller_cachedir'", "]", "=", "self", ".", "opts", "[", "'_caller_cachedir'", "]", "else", ":", "opts_pkg", "[", "'_caller_cachedir'", "]", "=", "self", ".", "opts", "[", "'cachedir'", "]", "# Use the ID defined in the roster file", "opts_pkg", "[", "'id'", "]", "=", "self", ".", "id", "retcode", "=", "0", "# Restore master grains", "for", "grain", "in", "conf_grains", ":", "opts_pkg", "[", "'grains'", "]", "[", "grain", "]", "=", "conf_grains", "[", "grain", "]", "# Enable roster grains support", "if", "'grains'", "in", "self", ".", "target", ":", "for", "grain", "in", "self", ".", "target", "[", "'grains'", "]", ":", "opts_pkg", "[", "'grains'", "]", "[", "grain", "]", "=", "self", ".", "target", "[", "'grains'", "]", "[", "grain", "]", "popts", "=", "{", "}", "popts", ".", "update", "(", "opts_pkg", ")", "# Master centric operations such as mine.get must have master option loaded.", "# The pillar must then be compiled by passing master opts found in opts_pkg['__master_opts__']", "# which causes the pillar renderer to loose track of salt master options", "#", "# Depending on popts merge order, it will overwrite some options found in opts_pkg['__master_opts__']", "master_centric_funcs", "=", "[", "\"pillar.items\"", ",", "\"mine.get\"", "]", "# Pillar compilation is a master centric operation.", "# Master options take precedence during Pillar compilation", "popts", ".", "update", "(", "opts_pkg", "[", "'__master_opts__'", "]", ")", "pillar", "=", "salt", ".", "pillar", ".", "Pillar", "(", "popts", ",", "opts_pkg", "[", "'grains'", "]", ",", "opts_pkg", "[", "'id'", "]", ",", "opts_pkg", ".", "get", "(", "'saltenv'", ",", "'base'", ")", ")", "pillar_data", "=", "pillar", ".", "compile_pillar", "(", ")", "# Once pillar has been compiled, restore priority of minion opts", "if", "self", ".", "fun", "not", "in", "master_centric_funcs", ":", "log", ".", "debug", "(", "'%s is a minion function'", ",", "self", ".", "fun", ")", "popts", ".", "update", "(", "opts_pkg", ")", "else", ":", "log", ".", "debug", "(", "'%s is a master function'", ",", "self", ".", "fun", ")", "# TODO: cache minion opts in datap in master.py", "data", "=", "{", "'opts'", ":", "opts_pkg", ",", "'grains'", ":", "opts_pkg", "[", "'grains'", "]", ",", "'pillar'", ":", "pillar_data", "}", "if", "data_cache", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "datap", ",", "'w+b'", ")", "as", "fp_", ":", "fp_", ".", "write", "(", "self", ".", "serial", ".", "dumps", "(", "data", ")", ")", "if", "not", "data", "and", "data_cache", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "datap", ",", "'rb'", ")", "as", "fp_", ":", "data", "=", "self", ".", "serial", ".", "load", "(", "fp_", ")", "opts", "=", "data", ".", "get", "(", "'opts'", ",", "{", "}", ")", "opts", "[", "'grains'", "]", "=", "data", ".", "get", "(", "'grains'", ")", "# Restore master grains", "for", "grain", "in", "conf_grains", ":", "opts", "[", "'grains'", "]", "[", "grain", "]", "=", "conf_grains", "[", "grain", "]", "# Enable roster grains support", "if", "'grains'", "in", "self", ".", "target", ":", "for", "grain", "in", "self", ".", "target", "[", "'grains'", "]", ":", "opts", "[", "'grains'", "]", "[", "grain", "]", "=", "self", ".", "target", "[", "'grains'", "]", "[", "grain", "]", "opts", "[", "'pillar'", "]", "=", "data", ".", "get", "(", "'pillar'", ")", "wrapper", "=", "salt", ".", "client", ".", "ssh", ".", "wrapper", ".", "FunctionWrapper", "(", "opts", ",", "self", ".", "id", ",", "fsclient", "=", "self", ".", "fsclient", ",", "minion_opts", "=", "self", ".", "minion_opts", ",", "*", "*", "self", ".", "target", ")", "self", ".", "wfuncs", "=", "salt", ".", "loader", ".", "ssh_wrapper", "(", "opts", ",", "wrapper", ",", "self", ".", "context", ")", "wrapper", ".", "wfuncs", "=", "self", ".", "wfuncs", "# We're running in the mine, need to fetch the arguments from the", "# roster, pillar, master config (in that order)", "if", "self", ".", "mine", ":", "mine_args", "=", "None", "mine_fun_data", "=", "None", "mine_fun", "=", "self", ".", "fun", "if", "self", ".", "mine_functions", "and", "self", ".", "fun", "in", "self", ".", "mine_functions", ":", "mine_fun_data", "=", "self", ".", "mine_functions", "[", "self", ".", "fun", "]", "elif", "opts", "[", "'pillar'", "]", "and", "self", ".", "fun", "in", "opts", "[", "'pillar'", "]", ".", "get", "(", "'mine_functions'", ",", "{", "}", ")", ":", "mine_fun_data", "=", "opts", "[", "'pillar'", "]", "[", "'mine_functions'", "]", "[", "self", ".", "fun", "]", "elif", "self", ".", "fun", "in", "self", ".", "context", "[", "'master_opts'", "]", ".", "get", "(", "'mine_functions'", ",", "{", "}", ")", ":", "mine_fun_data", "=", "self", ".", "context", "[", "'master_opts'", "]", "[", "'mine_functions'", "]", "[", "self", ".", "fun", "]", "if", "isinstance", "(", "mine_fun_data", ",", "dict", ")", ":", "mine_fun", "=", "mine_fun_data", ".", "pop", "(", "'mine_function'", ",", "mine_fun", ")", "mine_args", "=", "mine_fun_data", "elif", "isinstance", "(", "mine_fun_data", ",", "list", ")", ":", "for", "item", "in", "mine_fun_data", "[", ":", "]", ":", "if", "isinstance", "(", "item", ",", "dict", ")", "and", "'mine_function'", "in", "item", ":", "mine_fun", "=", "item", "[", "'mine_function'", "]", "mine_fun_data", ".", "pop", "(", "mine_fun_data", ".", "index", "(", "item", ")", ")", "mine_args", "=", "mine_fun_data", "else", ":", "mine_args", "=", "mine_fun_data", "# If we found mine_args, replace our command's args", "if", "isinstance", "(", "mine_args", ",", "dict", ")", ":", "self", ".", "args", "=", "[", "]", "self", ".", "kwargs", "=", "mine_args", "elif", "isinstance", "(", "mine_args", ",", "list", ")", ":", "self", ".", "args", "=", "mine_args", "self", ".", "kwargs", "=", "{", "}", "try", ":", "if", "self", ".", "mine", ":", "result", "=", "wrapper", "[", "mine_fun", "]", "(", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")", "else", ":", "result", "=", "self", ".", "wfuncs", "[", "self", ".", "fun", "]", "(", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")", "except", "TypeError", "as", "exc", ":", "result", "=", "'TypeError encountered executing {0}: {1}'", ".", "format", "(", "self", ".", "fun", ",", "exc", ")", "log", ".", "error", "(", "result", ",", "exc_info_on_loglevel", "=", "logging", ".", "DEBUG", ")", "retcode", "=", "1", "except", "Exception", "as", "exc", ":", "result", "=", "'An Exception occurred while executing {0}: {1}'", ".", "format", "(", "self", ".", "fun", ",", "exc", ")", "log", ".", "error", "(", "result", ",", "exc_info_on_loglevel", "=", "logging", ".", "DEBUG", ")", "retcode", "=", "1", "# Mimic the json data-structure that \"salt-call --local\" will", "# emit (as seen in ssh_py_shim.py)", "if", "isinstance", "(", "result", ",", "dict", ")", "and", "'local'", "in", "result", ":", "ret", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "{", "'local'", ":", "result", "[", "'local'", "]", "}", ")", "else", ":", "ret", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "{", "'local'", ":", "{", "'return'", ":", "result", "}", "}", ")", "return", "ret", ",", "retcode" ]
41.282723
0.001486
def process_kwargs(self, kwargs, prefix='default_', delete=True): """ set self attributes based on kwargs, optionally deleting kwargs that are processed """ processed = [] for k in kwargs: if hasattr(self, prefix + k): processed += [k] setattr(self, prefix + k, kwargs[k]) for k in processed: del(kwargs[k]) return kwargs
[ "def", "process_kwargs", "(", "self", ",", "kwargs", ",", "prefix", "=", "'default_'", ",", "delete", "=", "True", ")", ":", "processed", "=", "[", "]", "for", "k", "in", "kwargs", ":", "if", "hasattr", "(", "self", ",", "prefix", "+", "k", ")", ":", "processed", "+=", "[", "k", "]", "setattr", "(", "self", ",", "prefix", "+", "k", ",", "kwargs", "[", "k", "]", ")", "for", "k", "in", "processed", ":", "del", "(", "kwargs", "[", "k", "]", ")", "return", "kwargs" ]
35.166667
0.006928
def _make_names_unique(animations): """ Given a list of animations, some of which might have duplicate names, rename the first one to be <duplicate>_0, the second <duplicate>_1, <duplicate>_2, etc.""" counts = {} for a in animations: c = counts.get(a['name'], 0) + 1 counts[a['name']] = c if c > 1: a['name'] += '_' + str(c - 1) dupes = set(k for k, v in counts.items() if v > 1) for a in animations: if a['name'] in dupes: a['name'] += '_0'
[ "def", "_make_names_unique", "(", "animations", ")", ":", "counts", "=", "{", "}", "for", "a", "in", "animations", ":", "c", "=", "counts", ".", "get", "(", "a", "[", "'name'", "]", ",", "0", ")", "+", "1", "counts", "[", "a", "[", "'name'", "]", "]", "=", "c", "if", "c", ">", "1", ":", "a", "[", "'name'", "]", "+=", "'_'", "+", "str", "(", "c", "-", "1", ")", "dupes", "=", "set", "(", "k", "for", "k", ",", "v", "in", "counts", ".", "items", "(", ")", "if", "v", ">", "1", ")", "for", "a", "in", "animations", ":", "if", "a", "[", "'name'", "]", "in", "dupes", ":", "a", "[", "'name'", "]", "+=", "'_0'" ]
32.1875
0.003774
def _create_intermediate_target(self, address, suffix): """ :param string address: A target address. :param string suffix: A string used as a suffix of the intermediate target name. :returns: The address of a synthetic intermediary target. """ if not isinstance(address, string_types): raise self.ExpectedAddressError("Expected string address argument, got type {type}" .format(type=type(address))) address = Address.parse(address, self._parse_context.rel_path) # NB(gmalmquist): Ideally there should be a way to indicate that these targets are synthetic # and shouldn't show up in `./pants list` etc, because we really don't want people to write # handwritten dependencies on them. For now just give them names containing "-unstable-" as a # hint. hash_str = hash_target(str(address), suffix) name = '{name}-unstable-{suffix}-{index}'.format( name=address.target_name, suffix=suffix.replace(' ', '.'), index=hash_str, ) self._parse_context.create_object_if_not_exists( 'target', name=name, dependencies=[address.spec], **self.extra_target_arguments ) return ':{}'.format(name)
[ "def", "_create_intermediate_target", "(", "self", ",", "address", ",", "suffix", ")", ":", "if", "not", "isinstance", "(", "address", ",", "string_types", ")", ":", "raise", "self", ".", "ExpectedAddressError", "(", "\"Expected string address argument, got type {type}\"", ".", "format", "(", "type", "=", "type", "(", "address", ")", ")", ")", "address", "=", "Address", ".", "parse", "(", "address", ",", "self", ".", "_parse_context", ".", "rel_path", ")", "# NB(gmalmquist): Ideally there should be a way to indicate that these targets are synthetic", "# and shouldn't show up in `./pants list` etc, because we really don't want people to write", "# handwritten dependencies on them. For now just give them names containing \"-unstable-\" as a", "# hint.", "hash_str", "=", "hash_target", "(", "str", "(", "address", ")", ",", "suffix", ")", "name", "=", "'{name}-unstable-{suffix}-{index}'", ".", "format", "(", "name", "=", "address", ".", "target_name", ",", "suffix", "=", "suffix", ".", "replace", "(", "' '", ",", "'.'", ")", ",", "index", "=", "hash_str", ",", ")", "self", ".", "_parse_context", ".", "create_object_if_not_exists", "(", "'target'", ",", "name", "=", "name", ",", "dependencies", "=", "[", "address", ".", "spec", "]", ",", "*", "*", "self", ".", "extra_target_arguments", ")", "return", "':{}'", ".", "format", "(", "name", ")" ]
40.166667
0.005673
def mdprint(*values, plain=None, **options): """ Convert HTML to VTML and then print it. Follows same semantics as vtmlprint. """ print(*[mdrender(x, plain=plain) for x in values], **options)
[ "def", "mdprint", "(", "*", "values", ",", "plain", "=", "None", ",", "*", "*", "options", ")", ":", "print", "(", "*", "[", "mdrender", "(", "x", ",", "plain", "=", "plain", ")", "for", "x", "in", "values", "]", ",", "*", "*", "options", ")" ]
50
0.004926
def update_assembly(data): """ Create a new Assembly() and convert as many of our old params to the new version as we can. Also report out any parameters that are removed and what their values are. """ print("##############################################################") print("Updating assembly to current version") ## New assembly object to update pdate from. new_assembly = ip.Assembly("update", quiet=True) ## Hackersonly dict gets automatically overwritten ## Always use the current version for params in this dict. data._hackersonly = deepcopy(new_assembly._hackersonly) new_params = set(new_assembly.paramsdict.keys()) my_params = set(data.paramsdict.keys()) ## Find all params in loaded assembly that aren't in the new assembly. ## Make a new dict that doesn't include anything in removed_params removed_params = my_params.difference(new_params) for i in removed_params: print("Removing parameter: {} = {}".format(i, data.paramsdict[i])) ## Find all params that are in the new paramsdict and not in the old one. ## If the set isn't emtpy then we create a new dictionary based on the new ## assembly parameters and populated with currently loaded assembly values. ## Conditioning on not including any removed params. Magic. added_params = new_params.difference(my_params) for i in added_params: print("Adding parameter: {} = {}".format(i, new_assembly.paramsdict[i])) print("\nPlease take note of these changes. Every effort is made to\n"\ +"ensure compatibility across versions of ipyrad. See online\n"\ +"documentation for further details about new parameters.") time.sleep(5) print("##############################################################") if added_params: for i in data.paramsdict: if i not in removed_params: new_assembly.paramsdict[i] = data.paramsdict[i] data.paramsdict = deepcopy(new_assembly.paramsdict) data.save() return data
[ "def", "update_assembly", "(", "data", ")", ":", "print", "(", "\"##############################################################\"", ")", "print", "(", "\"Updating assembly to current version\"", ")", "## New assembly object to update pdate from.", "new_assembly", "=", "ip", ".", "Assembly", "(", "\"update\"", ",", "quiet", "=", "True", ")", "## Hackersonly dict gets automatically overwritten", "## Always use the current version for params in this dict.", "data", ".", "_hackersonly", "=", "deepcopy", "(", "new_assembly", ".", "_hackersonly", ")", "new_params", "=", "set", "(", "new_assembly", ".", "paramsdict", ".", "keys", "(", ")", ")", "my_params", "=", "set", "(", "data", ".", "paramsdict", ".", "keys", "(", ")", ")", "## Find all params in loaded assembly that aren't in the new assembly.", "## Make a new dict that doesn't include anything in removed_params", "removed_params", "=", "my_params", ".", "difference", "(", "new_params", ")", "for", "i", "in", "removed_params", ":", "print", "(", "\"Removing parameter: {} = {}\"", ".", "format", "(", "i", ",", "data", ".", "paramsdict", "[", "i", "]", ")", ")", "## Find all params that are in the new paramsdict and not in the old one.", "## If the set isn't emtpy then we create a new dictionary based on the new", "## assembly parameters and populated with currently loaded assembly values.", "## Conditioning on not including any removed params. Magic.", "added_params", "=", "new_params", ".", "difference", "(", "my_params", ")", "for", "i", "in", "added_params", ":", "print", "(", "\"Adding parameter: {} = {}\"", ".", "format", "(", "i", ",", "new_assembly", ".", "paramsdict", "[", "i", "]", ")", ")", "print", "(", "\"\\nPlease take note of these changes. Every effort is made to\\n\"", "+", "\"ensure compatibility across versions of ipyrad. See online\\n\"", "+", "\"documentation for further details about new parameters.\"", ")", "time", ".", "sleep", "(", "5", ")", "print", "(", "\"##############################################################\"", ")", "if", "added_params", ":", "for", "i", "in", "data", ".", "paramsdict", ":", "if", "i", "not", "in", "removed_params", ":", "new_assembly", ".", "paramsdict", "[", "i", "]", "=", "data", ".", "paramsdict", "[", "i", "]", "data", ".", "paramsdict", "=", "deepcopy", "(", "new_assembly", ".", "paramsdict", ")", "data", ".", "save", "(", ")", "return", "data" ]
42.395833
0.009606
def _set_params(self, p): """ change parameters in OrderedDict to list with or without uncertainties :param p: parameters in OrderedDict :return: parameters in list :note: internal function """ if self.force_norm: params = [value.n for key, value in p.items()] else: params = [value for key, value in p.items()] return params
[ "def", "_set_params", "(", "self", ",", "p", ")", ":", "if", "self", ".", "force_norm", ":", "params", "=", "[", "value", ".", "n", "for", "key", ",", "value", "in", "p", ".", "items", "(", ")", "]", "else", ":", "params", "=", "[", "value", "for", "key", ",", "value", "in", "p", ".", "items", "(", ")", "]", "return", "params" ]
31.538462
0.004739
def tas53(msg): """Aircraft true airspeed, BDS 5,3 message Args: msg (String): 28 bytes hexadecimal message Returns: float: true airspeed in knots """ d = hex2bin(data(msg)) if d[33] == '0': return None tas = bin2int(d[34:46]) * 0.5 # kts return round(tas, 1)
[ "def", "tas53", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "if", "d", "[", "33", "]", "==", "'0'", ":", "return", "None", "tas", "=", "bin2int", "(", "d", "[", "34", ":", "46", "]", ")", "*", "0.5", "# kts", "return", "round", "(", "tas", ",", "1", ")" ]
19.125
0.003115
def codec_desc(self): """string or None""" info = self.decSpecificInfo desc = None if info is not None: desc = info.description return desc
[ "def", "codec_desc", "(", "self", ")", ":", "info", "=", "self", ".", "decSpecificInfo", "desc", "=", "None", "if", "info", "is", "not", "None", ":", "desc", "=", "info", ".", "description", "return", "desc" ]
23.125
0.010417
def cal_frame_according_boundaries(left, right, top, bottom, parent_size, gaphas_editor=True, group=True): """ Generate margin and relative position and size handed boundary parameter and parent size """ # print("parent_size ->", parent_size) margin = cal_margin(parent_size) # Add margin and ensure that the upper left corner is within the state if group: # frame of grouped state rel_pos = max(left - margin, 0), max(top - margin, 0) # Add margin and ensure that the lower right corner is within the state size = (min(right - left + 2 * margin, parent_size[0] - rel_pos[0]), min(bottom - top + 2 * margin, parent_size[1] - rel_pos[1])) else: # frame inside of state # rel_pos = max(margin, 0), max(margin, 0) rel_pos = left, top size = right - left, bottom - top return margin, rel_pos, size
[ "def", "cal_frame_according_boundaries", "(", "left", ",", "right", ",", "top", ",", "bottom", ",", "parent_size", ",", "gaphas_editor", "=", "True", ",", "group", "=", "True", ")", ":", "# print(\"parent_size ->\", parent_size)", "margin", "=", "cal_margin", "(", "parent_size", ")", "# Add margin and ensure that the upper left corner is within the state", "if", "group", ":", "# frame of grouped state", "rel_pos", "=", "max", "(", "left", "-", "margin", ",", "0", ")", ",", "max", "(", "top", "-", "margin", ",", "0", ")", "# Add margin and ensure that the lower right corner is within the state", "size", "=", "(", "min", "(", "right", "-", "left", "+", "2", "*", "margin", ",", "parent_size", "[", "0", "]", "-", "rel_pos", "[", "0", "]", ")", ",", "min", "(", "bottom", "-", "top", "+", "2", "*", "margin", ",", "parent_size", "[", "1", "]", "-", "rel_pos", "[", "1", "]", ")", ")", "else", ":", "# frame inside of state", "# rel_pos = max(margin, 0), max(margin, 0)", "rel_pos", "=", "left", ",", "top", "size", "=", "right", "-", "left", ",", "bottom", "-", "top", "return", "margin", ",", "rel_pos", ",", "size" ]
49.166667
0.003326
def _chi_squared(self, proportions, margin, observed): """return ndarray of chi-squared measures for proportions' columns. *proportions* (ndarray): The basis of chi-squared calcualations *margin* (ndarray): Column margin for proportions (See `def _margin`) *observed* (ndarray): Row margin proportions (See `def _observed`) """ n = self._element_count chi_squared = np.zeros([n, n]) for i in xrange(1, n): for j in xrange(0, n - 1): denominator = 1 / margin[i] + 1 / margin[j] chi_squared[i, j] = chi_squared[j, i] = ( np.sum(np.square(proportions[:, i] - proportions[:, j]) / observed) / denominator ) return chi_squared
[ "def", "_chi_squared", "(", "self", ",", "proportions", ",", "margin", ",", "observed", ")", ":", "n", "=", "self", ".", "_element_count", "chi_squared", "=", "np", ".", "zeros", "(", "[", "n", ",", "n", "]", ")", "for", "i", "in", "xrange", "(", "1", ",", "n", ")", ":", "for", "j", "in", "xrange", "(", "0", ",", "n", "-", "1", ")", ":", "denominator", "=", "1", "/", "margin", "[", "i", "]", "+", "1", "/", "margin", "[", "j", "]", "chi_squared", "[", "i", ",", "j", "]", "=", "chi_squared", "[", "j", ",", "i", "]", "=", "(", "np", ".", "sum", "(", "np", ".", "square", "(", "proportions", "[", ":", ",", "i", "]", "-", "proportions", "[", ":", ",", "j", "]", ")", "/", "observed", ")", "/", "denominator", ")", "return", "chi_squared" ]
45.764706
0.003778
def root_and_children_to_graph(self,root): """Take a root node and its children and make them into graphs""" g = Graph() g.add_node(root) edges = [] edges += self.get_node_edges(root,"outgoing") for c in self.get_children(root): g.add_node(c) edges += self.get_node_edges(c,"outgoing") for e in edges: g.add_edge(e) return g
[ "def", "root_and_children_to_graph", "(", "self", ",", "root", ")", ":", "g", "=", "Graph", "(", ")", "g", ".", "add_node", "(", "root", ")", "edges", "=", "[", "]", "edges", "+=", "self", ".", "get_node_edges", "(", "root", ",", "\"outgoing\"", ")", "for", "c", "in", "self", ".", "get_children", "(", "root", ")", ":", "g", ".", "add_node", "(", "c", ")", "edges", "+=", "self", ".", "get_node_edges", "(", "c", ",", "\"outgoing\"", ")", "for", "e", "in", "edges", ":", "g", ".", "add_edge", "(", "e", ")", "return", "g" ]
34.181818
0.044041
def sys_writev(self, fd, iov, count): """ Works just like C{sys_write} except that multiple buffers are written out. :rtype: int :param fd: the file descriptor of the file to write. :param iov: the buffer where the the bytes to write are taken. :param count: amount of C{iov} buffers to write into the file. :return: the amount of bytes written in total. """ cpu = self.current ptrsize = cpu.address_bit_size sizeof_iovec = 2 * (ptrsize // 8) total = 0 try: write_fd = self._get_fd(fd) except FdError as e: logger.error(f"writev: Not a valid file descriptor ({fd})") return -e.err for i in range(0, count): buf = cpu.read_int(iov + i * sizeof_iovec, ptrsize) size = cpu.read_int(iov + i * sizeof_iovec + (sizeof_iovec // 2), ptrsize) data = [Operators.CHR(cpu.read_int(buf + i, 8)) for i in range(size)] data = self._transform_write_data(data) write_fd.write(data) self.syscall_trace.append(("_write", fd, data)) total += size return total
[ "def", "sys_writev", "(", "self", ",", "fd", ",", "iov", ",", "count", ")", ":", "cpu", "=", "self", ".", "current", "ptrsize", "=", "cpu", ".", "address_bit_size", "sizeof_iovec", "=", "2", "*", "(", "ptrsize", "//", "8", ")", "total", "=", "0", "try", ":", "write_fd", "=", "self", ".", "_get_fd", "(", "fd", ")", "except", "FdError", "as", "e", ":", "logger", ".", "error", "(", "f\"writev: Not a valid file descriptor ({fd})\"", ")", "return", "-", "e", ".", "err", "for", "i", "in", "range", "(", "0", ",", "count", ")", ":", "buf", "=", "cpu", ".", "read_int", "(", "iov", "+", "i", "*", "sizeof_iovec", ",", "ptrsize", ")", "size", "=", "cpu", ".", "read_int", "(", "iov", "+", "i", "*", "sizeof_iovec", "+", "(", "sizeof_iovec", "//", "2", ")", ",", "ptrsize", ")", "data", "=", "[", "Operators", ".", "CHR", "(", "cpu", ".", "read_int", "(", "buf", "+", "i", ",", "8", ")", ")", "for", "i", "in", "range", "(", "size", ")", "]", "data", "=", "self", ".", "_transform_write_data", "(", "data", ")", "write_fd", ".", "write", "(", "data", ")", "self", ".", "syscall_trace", ".", "append", "(", "(", "\"_write\"", ",", "fd", ",", "data", ")", ")", "total", "+=", "size", "return", "total" ]
38.7
0.004202
def _set_service(self, v, load=False): """ Setter method for service, mapped from YANG variable /service (container) If this variable is read-only (config: false) in the source YANG file, then _set_service is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_service() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=service.service, is_container='container', presence=False, yang_name="service", rest_name="service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Perform services', u'sort-priority': u'18'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """service must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=service.service, is_container='container', presence=False, yang_name="service", rest_name="service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Perform services', u'sort-priority': u'18'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""", }) self.__service = t if hasattr(self, '_set'): self._set()
[ "def", "_set_service", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "service", ".", "service", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"service\"", ",", "rest_name", "=", "\"service\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Perform services'", ",", "u'sort-priority'", ":", "u'18'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-aaa'", ",", "defining_module", "=", "'brocade-aaa'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"service must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=service.service, is_container='container', presence=False, yang_name=\"service\", rest_name=\"service\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Perform services', u'sort-priority': u'18'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__service", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
70.863636
0.005696
def cli(sequencepath, report, refseq_database): """ Pass command line arguments to, and run the feature extraction functions """ main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
[ "def", "cli", "(", "sequencepath", ",", "report", ",", "refseq_database", ")", ":", "main", "(", "sequencepath", ",", "report", ",", "refseq_database", ",", "num_threads", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")" ]
45
0.008734
def changes_since(self, domain, date_or_datetime): """ Gets the changes for a domain since the specified date/datetime. The date can be one of: - a Python datetime object - a Python date object - a string in the format 'YYYY-MM-YY HH:MM:SS' - a string in the format 'YYYY-MM-YY' It returns a list of dicts, whose keys depend on the specific change that was made. A simple example of such a change dict: {u'accountId': 000000, u'action': u'update', u'changeDetails': [{u'field': u'serial_number', u'newValue': u'1354038941', u'originalValue': u'1354038940'}, {u'field': u'updated_at', u'newValue': u'Tue Nov 27 17:55:41 UTC 2012', u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}], u'domain': u'example.com', u'targetId': 00000000, u'targetType': u'Domain'} """ domain_id = utils.get_id(domain) dt = utils.iso_time_string(date_or_datetime, show_tzinfo=True) uri = "/domains/%s/changes?since=%s" % (domain_id, dt) resp, body = self._retry_get(uri) return body.get("changes", [])
[ "def", "changes_since", "(", "self", ",", "domain", ",", "date_or_datetime", ")", ":", "domain_id", "=", "utils", ".", "get_id", "(", "domain", ")", "dt", "=", "utils", ".", "iso_time_string", "(", "date_or_datetime", ",", "show_tzinfo", "=", "True", ")", "uri", "=", "\"/domains/%s/changes?since=%s\"", "%", "(", "domain_id", ",", "dt", ")", "resp", ",", "body", "=", "self", ".", "_retry_get", "(", "uri", ")", "return", "body", ".", "get", "(", "\"changes\"", ",", "[", "]", ")" ]
42.724138
0.001579
def pretty_dump(fn): """ Decorator used to output prettified JSON. ``response.content_type`` is set to ``application/json; charset=utf-8``. Args: fn (fn pointer): Function returning any basic python data structure. Returns: str: Data converted to prettified JSON. """ @wraps(fn) def pretty_dump_wrapper(*args, **kwargs): response.content_type = "application/json; charset=utf-8" return json.dumps( fn(*args, **kwargs), # sort_keys=True, indent=4, separators=(',', ': ') ) return pretty_dump_wrapper
[ "def", "pretty_dump", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "pretty_dump_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", ".", "content_type", "=", "\"application/json; charset=utf-8\"", "return", "json", ".", "dumps", "(", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "# sort_keys=True,", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "return", "pretty_dump_wrapper" ]
24.16
0.001592
def parse_conf(self, keys=[]): """Parse configuration values from the database. The extension must have been previously initialized. If a key is not found in the database, it will be created with the default value specified. Arguments: keys (list[str]): list of keys to parse. If the list is empty, then all the keys known to the application will be used. Returns: dict of the parsed config values. """ confs = self.app.config.get('WAFFLE_CONFS', {}) if not keys: keys = confs.keys() result = {} for key in keys: # Some things cannot be changed... if key.startswith('WAFFLE_'): continue # No arbitrary keys if key not in confs.keys(): continue stored_conf = self.configstore.get(key) if not stored_conf: # Store new record in database value = confs[key].get('default', '') stored_conf = self.configstore.put(key, util.serialize(value)) self.configstore.commit() else: # Get stored value value = util.deserialize(stored_conf.get_value()) result[stored_conf.get_key()] = value return result
[ "def", "parse_conf", "(", "self", ",", "keys", "=", "[", "]", ")", ":", "confs", "=", "self", ".", "app", ".", "config", ".", "get", "(", "'WAFFLE_CONFS'", ",", "{", "}", ")", "if", "not", "keys", ":", "keys", "=", "confs", ".", "keys", "(", ")", "result", "=", "{", "}", "for", "key", "in", "keys", ":", "# Some things cannot be changed...", "if", "key", ".", "startswith", "(", "'WAFFLE_'", ")", ":", "continue", "# No arbitrary keys", "if", "key", "not", "in", "confs", ".", "keys", "(", ")", ":", "continue", "stored_conf", "=", "self", ".", "configstore", ".", "get", "(", "key", ")", "if", "not", "stored_conf", ":", "# Store new record in database", "value", "=", "confs", "[", "key", "]", ".", "get", "(", "'default'", ",", "''", ")", "stored_conf", "=", "self", ".", "configstore", ".", "put", "(", "key", ",", "util", ".", "serialize", "(", "value", ")", ")", "self", ".", "configstore", ".", "commit", "(", ")", "else", ":", "# Get stored value", "value", "=", "util", ".", "deserialize", "(", "stored_conf", ".", "get_value", "(", ")", ")", "result", "[", "stored_conf", ".", "get_key", "(", ")", "]", "=", "value", "return", "result" ]
29.511111
0.001458
def to_file(file_): """Serializes file to id string :param file_: object to serialize :return: string id """ from sevenbridges.models.file import File if not file_: raise SbgError('File is required!') elif isinstance(file_, File): return file_.id elif isinstance(file_, six.string_types): return file_ else: raise SbgError('Invalid file parameter!')
[ "def", "to_file", "(", "file_", ")", ":", "from", "sevenbridges", ".", "models", ".", "file", "import", "File", "if", "not", "file_", ":", "raise", "SbgError", "(", "'File is required!'", ")", "elif", "isinstance", "(", "file_", ",", "File", ")", ":", "return", "file_", ".", "id", "elif", "isinstance", "(", "file_", ",", "six", ".", "string_types", ")", ":", "return", "file_", "else", ":", "raise", "SbgError", "(", "'Invalid file parameter!'", ")" ]
32.571429
0.004264
def inasafe_field_header(field, feature, parent): """Retrieve a header name of the field name from definitions. For instance: inasafe_field_header('minimum_needs__clean_water') -> 'Clean water' """ _ = feature, parent # NOQA age_fields = [under_5_displaced_count_field, over_60_displaced_count_field] symbol_mapping = { 'over': '>', 'under': '<' } field_definition = definition(field, 'field_name') if field_definition: if field_definition in age_fields: header_format = tr('{symbol} {age} y.o') field_name = field_definition.get('field_name') if field_name: symbol, age = field_name.split('_')[:2] if symbol.lower() in list(symbol_mapping.keys()): header_name = header_format.format( symbol=symbol_mapping[symbol.lower()], age=age ) return header_name header_name = field_definition.get('header_name') name = field_definition.get('name') if header_name: return header_name.capitalize() else: return name.capitalize() return None
[ "def", "inasafe_field_header", "(", "field", ",", "feature", ",", "parent", ")", ":", "_", "=", "feature", ",", "parent", "# NOQA", "age_fields", "=", "[", "under_5_displaced_count_field", ",", "over_60_displaced_count_field", "]", "symbol_mapping", "=", "{", "'over'", ":", "'>'", ",", "'under'", ":", "'<'", "}", "field_definition", "=", "definition", "(", "field", ",", "'field_name'", ")", "if", "field_definition", ":", "if", "field_definition", "in", "age_fields", ":", "header_format", "=", "tr", "(", "'{symbol} {age} y.o'", ")", "field_name", "=", "field_definition", ".", "get", "(", "'field_name'", ")", "if", "field_name", ":", "symbol", ",", "age", "=", "field_name", ".", "split", "(", "'_'", ")", "[", ":", "2", "]", "if", "symbol", ".", "lower", "(", ")", "in", "list", "(", "symbol_mapping", ".", "keys", "(", ")", ")", ":", "header_name", "=", "header_format", ".", "format", "(", "symbol", "=", "symbol_mapping", "[", "symbol", ".", "lower", "(", ")", "]", ",", "age", "=", "age", ")", "return", "header_name", "header_name", "=", "field_definition", ".", "get", "(", "'header_name'", ")", "name", "=", "field_definition", ".", "get", "(", "'name'", ")", "if", "header_name", ":", "return", "header_name", ".", "capitalize", "(", ")", "else", ":", "return", "name", ".", "capitalize", "(", ")", "return", "None" ]
35.294118
0.000811
def create_customer_request(self, fields=None, prefetch=True, **fieldargs): """Create a new customer request and return an issue Resource for it. Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments will be ignored. By default, the client will immediately reload the issue Resource created by this method in order to return a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument. JIRA projects may contain many different issue types. Some issue screens have different requirements for fields in a new issue. This information is available through the 'createmeta' method. Further examples are available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue :param fields: a dict containing field names and the values to use. If present, all other keyword arguments will be ignored :type fields: Dict[str, Any] :param prefetch: whether to reload the created issue Resource so that all of its data is present in the value returned from this method :type prefetch: bool :rtype: Issue """ data = fields p = data['serviceDeskId'] service_desk = None if isinstance(p, string_types) or isinstance(p, integer_types): service_desk = self.service_desk(p) elif isinstance(p, ServiceDesk): service_desk = p data['serviceDeskId'] = service_desk.id p = data['requestTypeId'] if isinstance(p, integer_types): data['requestTypeId'] = p elif isinstance(p, string_types): data['requestTypeId'] = self.request_type_by_name( service_desk, p).id url = self._options['server'] + '/rest/servicedeskapi/request' headers = {'X-ExperimentalApi': 'opt-in'} r = self._session.post(url, headers=headers, data=json.dumps(data)) raw_issue_json = json_loads(r) if 'issueKey' not in raw_issue_json: raise JIRAError(r.status_code, request=r) if prefetch: return self.issue(raw_issue_json['issueKey']) else: return Issue(self._options, self._session, raw=raw_issue_json)
[ "def", "create_customer_request", "(", "self", ",", "fields", "=", "None", ",", "prefetch", "=", "True", ",", "*", "*", "fieldargs", ")", ":", "data", "=", "fields", "p", "=", "data", "[", "'serviceDeskId'", "]", "service_desk", "=", "None", "if", "isinstance", "(", "p", ",", "string_types", ")", "or", "isinstance", "(", "p", ",", "integer_types", ")", ":", "service_desk", "=", "self", ".", "service_desk", "(", "p", ")", "elif", "isinstance", "(", "p", ",", "ServiceDesk", ")", ":", "service_desk", "=", "p", "data", "[", "'serviceDeskId'", "]", "=", "service_desk", ".", "id", "p", "=", "data", "[", "'requestTypeId'", "]", "if", "isinstance", "(", "p", ",", "integer_types", ")", ":", "data", "[", "'requestTypeId'", "]", "=", "p", "elif", "isinstance", "(", "p", ",", "string_types", ")", ":", "data", "[", "'requestTypeId'", "]", "=", "self", ".", "request_type_by_name", "(", "service_desk", ",", "p", ")", ".", "id", "url", "=", "self", ".", "_options", "[", "'server'", "]", "+", "'/rest/servicedeskapi/request'", "headers", "=", "{", "'X-ExperimentalApi'", ":", "'opt-in'", "}", "r", "=", "self", ".", "_session", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "raw_issue_json", "=", "json_loads", "(", "r", ")", "if", "'issueKey'", "not", "in", "raw_issue_json", ":", "raise", "JIRAError", "(", "r", ".", "status_code", ",", "request", "=", "r", ")", "if", "prefetch", ":", "return", "self", ".", "issue", "(", "raw_issue_json", "[", "'issueKey'", "]", ")", "else", ":", "return", "Issue", "(", "self", ".", "_options", ",", "self", ".", "_session", ",", "raw", "=", "raw_issue_json", ")" ]
46.442308
0.004461
def info(self, **kwargs): """ Get the primary information about a TV season by its season number. Args: language: (optional) ISO 639 code. append_to_response: (optional) Comma separated, any TV series method. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_series_id_season_number_path('info') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_series_id_season_number_path", "(", "'info'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
32.176471
0.003552
def section_end_info(template, tag_key, state, index): """ Given the tag key of an opening section tag, find the corresponding closing tag (if it exists) and return information about that match. """ state.section.push(tag_key) match = None matchinfo = None search_index = index while state.section: match = state.tag_re.search(template, search_index) if not match: raise Exception("Open section %s never closed" % tag_key) matchinfo = get_match_info(template, match, state) # If we find a new section tag, add it to the stack and keep going if matchinfo['tag_type'] in ('#', '^'): state.section.push(matchinfo['tag_key']) # If we find a closing tag for the current section, 'close' it by # popping the stack elif matchinfo['tag_type'] == '/': if matchinfo['tag_key'] == state.section(): state.section.pop() else: raise Exception( 'Unexpected section end: received %s, expected {{/%s}}' % ( repr(match.group(0)), tag_key)) search_index = matchinfo['tag_end'] return matchinfo
[ "def", "section_end_info", "(", "template", ",", "tag_key", ",", "state", ",", "index", ")", ":", "state", ".", "section", ".", "push", "(", "tag_key", ")", "match", "=", "None", "matchinfo", "=", "None", "search_index", "=", "index", "while", "state", ".", "section", ":", "match", "=", "state", ".", "tag_re", ".", "search", "(", "template", ",", "search_index", ")", "if", "not", "match", ":", "raise", "Exception", "(", "\"Open section %s never closed\"", "%", "tag_key", ")", "matchinfo", "=", "get_match_info", "(", "template", ",", "match", ",", "state", ")", "# If we find a new section tag, add it to the stack and keep going", "if", "matchinfo", "[", "'tag_type'", "]", "in", "(", "'#'", ",", "'^'", ")", ":", "state", ".", "section", ".", "push", "(", "matchinfo", "[", "'tag_key'", "]", ")", "# If we find a closing tag for the current section, 'close' it by", "# popping the stack", "elif", "matchinfo", "[", "'tag_type'", "]", "==", "'/'", ":", "if", "matchinfo", "[", "'tag_key'", "]", "==", "state", ".", "section", "(", ")", ":", "state", ".", "section", ".", "pop", "(", ")", "else", ":", "raise", "Exception", "(", "'Unexpected section end: received %s, expected {{/%s}}'", "%", "(", "repr", "(", "match", ".", "group", "(", "0", ")", ")", ",", "tag_key", ")", ")", "search_index", "=", "matchinfo", "[", "'tag_end'", "]", "return", "matchinfo" ]
35.848485
0.000823
def _on_stream_disconnect(self, stream): """ Respond to disconnection of a local stream by propagating DEL_ROUTE for any contexts we know were attached to it. """ # During a stream crash it is possible for disconnect signal to fire # twice, in which case ignore the second instance. routes = self._routes_by_stream.pop(stream, None) if routes is None: return LOG.debug('%r: %r is gone; propagating DEL_ROUTE for %r', self, stream, routes) for target_id in routes: self.router.del_route(target_id) self._propagate_up(mitogen.core.DEL_ROUTE, target_id) self._propagate_down(mitogen.core.DEL_ROUTE, target_id) context = self.router.context_by_id(target_id, create=False) if context: mitogen.core.fire(context, 'disconnect')
[ "def", "_on_stream_disconnect", "(", "self", ",", "stream", ")", ":", "# During a stream crash it is possible for disconnect signal to fire", "# twice, in which case ignore the second instance.", "routes", "=", "self", ".", "_routes_by_stream", ".", "pop", "(", "stream", ",", "None", ")", "if", "routes", "is", "None", ":", "return", "LOG", ".", "debug", "(", "'%r: %r is gone; propagating DEL_ROUTE for %r'", ",", "self", ",", "stream", ",", "routes", ")", "for", "target_id", "in", "routes", ":", "self", ".", "router", ".", "del_route", "(", "target_id", ")", "self", ".", "_propagate_up", "(", "mitogen", ".", "core", ".", "DEL_ROUTE", ",", "target_id", ")", "self", ".", "_propagate_down", "(", "mitogen", ".", "core", ".", "DEL_ROUTE", ",", "target_id", ")", "context", "=", "self", ".", "router", ".", "context_by_id", "(", "target_id", ",", "create", "=", "False", ")", "if", "context", ":", "mitogen", ".", "core", ".", "fire", "(", "context", ",", "'disconnect'", ")" ]
42.285714
0.002203
def axes(self, axes): '''Set the angular axis of rotation for this joint. Parameters ---------- axes : list containing one 3-tuple of floats A list of the axes for this joint. For a hinge joint, which has one degree of freedom, this must contain one 3-tuple specifying the X, Y, and Z axis for the joint. ''' self.amotor.axes = [axes[0]] self.ode_obj.setAxis(tuple(axes[0]))
[ "def", "axes", "(", "self", ",", "axes", ")", ":", "self", ".", "amotor", ".", "axes", "=", "[", "axes", "[", "0", "]", "]", "self", ".", "ode_obj", ".", "setAxis", "(", "tuple", "(", "axes", "[", "0", "]", ")", ")" ]
38
0.004283
def cancel(self, mark_completed_as_cancelled=False): """ Cancel the future. If the future has not been started yet, it will never start running. If the future is already running, it will run until the worker function exists. The worker function can check if the future has been cancelled using the :meth:`cancelled` method. If the future has already been completed, it will not be marked as cancelled unless you set *mark_completed_as_cancelled* to :const:`True`. :param mark_completed_as_cancelled: If this is :const:`True` and the future has already completed, it will be marked as cancelled anyway. """ with self._lock: if not self._completed or mark_completed_as_cancelled: self._cancelled = True callbacks = self._prepare_done_callbacks() callbacks()
[ "def", "cancel", "(", "self", ",", "mark_completed_as_cancelled", "=", "False", ")", ":", "with", "self", ".", "_lock", ":", "if", "not", "self", ".", "_completed", "or", "mark_completed_as_cancelled", ":", "self", ".", "_cancelled", "=", "True", "callbacks", "=", "self", ".", "_prepare_done_callbacks", "(", ")", "callbacks", "(", ")" ]
42.631579
0.003623
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ item = super(ReferenceSamplesView, self).folderitem(obj, item, index) # ensure we have an object and not a brain obj = api.get_object(obj) url = api.get_url(obj) title = api.get_title(obj) item["Title"] = title item["replace"]["Title"] = get_link(url, value=title) item["allow_edit"] = self.get_editable_columns() # Supported Services supported_services_choices = self.make_supported_services_choices(obj) item["choices"]["SupportedServices"] = supported_services_choices # Position item["Position"] = "new" item["choices"]["Position"] = self.make_position_choices() return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "item", "=", "super", "(", "ReferenceSamplesView", ",", "self", ")", ".", "folderitem", "(", "obj", ",", "item", ",", "index", ")", "# ensure we have an object and not a brain", "obj", "=", "api", ".", "get_object", "(", "obj", ")", "url", "=", "api", ".", "get_url", "(", "obj", ")", "title", "=", "api", ".", "get_title", "(", "obj", ")", "item", "[", "\"Title\"", "]", "=", "title", "item", "[", "\"replace\"", "]", "[", "\"Title\"", "]", "=", "get_link", "(", "url", ",", "value", "=", "title", ")", "item", "[", "\"allow_edit\"", "]", "=", "self", ".", "get_editable_columns", "(", ")", "# Supported Services", "supported_services_choices", "=", "self", ".", "make_supported_services_choices", "(", "obj", ")", "item", "[", "\"choices\"", "]", "[", "\"SupportedServices\"", "]", "=", "supported_services_choices", "# Position", "item", "[", "\"Position\"", "]", "=", "\"new\"", "item", "[", "\"choices\"", "]", "[", "\"Position\"", "]", "=", "self", ".", "make_position_choices", "(", ")", "return", "item" ]
35.8
0.001813