src
stringlengths
75
47.4k
cfg
stringlengths
32
2.5k
ast
stringlengths
78
54.9k
def findArgs(args, prefixes): """ Extracts the list of arguments that start with any of the specified prefix values """ return list([ arg for arg in args if len([p for p in prefixes if arg.lower().startswith(p.lower())]) > 0 ])
[2][SEP1][Return][SEP2][][SEP3][5]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_RETURN_CALL_NAME_LOAD_LISTCOMP_NAME_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_COMPARE_CALL_NAME_LOAD_LISTCOMP_NAME_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_CALL_ATTRIBUTE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_GT_CONSTANT
def buildDescriptor(self, dir=os.getcwd(), configuration='Development', args=[], suppressOutput=False): """ Builds the editor modules for the Unreal project or plugin in the specified directory, using the specified build configuration """ # Verify that an Unreal project or plugin exists in the specified directory descriptor = self.getDescriptor(dir) descriptorType = 'project' if self.isProject(descriptor) else 'plugin' # If the project or plugin is Blueprint-only, there is no C++ code to build if os.path.exists(os.path.join(dir, 'Source')) == False: Utility.printStderr('Pure Blueprint {}, nothing to build.'.format(descriptorType)) return # Verify that the specified build configuration is valid if configuration not in self.validBuildConfigurations(): raise UnrealManagerException('invalid build configuration "' + configuration + '"') # Generate the arguments to pass to UBT target = self.getDescriptorName(descriptor) + 'Editor' if self.isProject(descriptor) else 'UE4Editor' baseArgs = ['-{}='.format(descriptorType) + descriptor] # Perform the build self._runUnrealBuildTool(target, self.getPlatformIdentifier(), configuration, baseArgs + args, capture=suppressOutput)
[5][SEP1][If][Return][If][None][None][SEP2][1,2][][3,4][][][SEP3][4][2][1][1][5]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LIST_LOAD_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_IFEXP_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CONSTANT_CONSTANT_IF_COMPARE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_CONSTANT_EQ_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_RETURN_IF_COMPARE_NAME_LOAD_NOTIN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_RAISE_CALL_NAME_LOAD_BINOP_BINOP_CONSTANT_ADD_NAME_LOAD_ADD_CONSTANT_ASSIGN_NAME_STORE_IFEXP_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_BINOP_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ADD_CONSTANT_CONSTANT_ASSIGN_NAME_STORE_LIST_BINOP_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_ADD_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_BINOP_NAME_LOAD_ADD_NAME_LOAD_KEYWORD_NAME_LOAD
def _getThirdPartyLibs(self, platformIdentifier, configuration): """ Runs UnrealBuildTool in JSON export mode and extracts the list of third-party libraries """ # If we have previously cached the library list for the current engine version, use the cached data cachedList = CachedDataManager.getCachedDataKey(self.engineVersionHash, 'ThirdPartyLibraries') if cachedList != None: return cachedList # Create a temp directory to hold the JSON file tempDir = tempfile.mkdtemp() jsonFile = os.path.join(tempDir, 'ubt_output.json') # Installed Builds of the Engine only contain a small handful of third-party libraries, rather than the full set # included in a source build of the Engine. However, if the ThirdParty directory from a source build is copied # into an Installed Build and the `InstalledBuild.txt` sentinel file is temporarily renamed, we can get the best # of both worlds and utilise the full set of third-party libraries. Enable this sentinel renaming behaviour only # if you have copied the ThirdParty directory from a source build into your Installed Build, or else the UBT # command will fail trying to rebuild UnrealHeaderTool. sentinelFile = os.path.join(self.engineRoot, 'Engine', 'Build', 'InstalledBuild.txt') sentinelBackup = sentinelFile + '.bak' renameSentinel = os.path.exists(sentinelFile) and os.environ.get('UE4CLI_SENTINEL_RENAME', '0') == '1' if renameSentinel == True: shutil.move(sentinelFile, sentinelBackup) # Invoke UnrealBuildTool in JSON export mode (make sure we specify gathering mode, since this is a prerequisite of JSON export) # (Ensure we always perform sentinel file cleanup even when errors occur) try: args = ['-Mode=JsonExport', '-OutputFile=' +jsonFile ] if self.engineVersion['MinorVersion'] >= 22 else ['-gather', '-jsonexport=' + jsonFile, '-SkipBuild'] self.runUBTFunc('UE4Editor', platformIdentifier, configuration, args) finally: if renameSentinel == True: shutil.move(sentinelBackup, sentinelFile) # Parse the JSON output result = json.loads(Utility.readFile(jsonFile)) # Extract the list of third-party library modules # (Note that since UE4.21, modules no longer have a "Type" field, so we must # rely on the "Directory" field filter below to identify third-party libraries) modules = [result['Modules'][key] for key in result['Modules']] # Filter out any modules from outside the Engine/Source/ThirdParty directory thirdPartyRoot = os.path.join(self.engineRoot, 'Engine', 'Source', 'ThirdParty') thirdparty = list([m for m in modules if thirdPartyRoot in m['Directory']]) # Remove the temp directory try: shutil.rmtree(tempDir) except: pass # Cache the list of libraries for use by subsequent runs CachedDataManager.setCachedDataKey(self.engineVersionHash, 'ThirdPartyLibraries', thirdparty) return thirdparty
[3][SEP1][If][Return][If][None][Try][None][Try][None][None][Return][SEP2][1,2][][3,4][4][5][6][7,8][9][9][][SEP3][1][0][5][1][1][1][4][1][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_COMPARE_NAME_LOAD_NOTEQ_CONSTANT_RETURN_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_CONSTANT_ASSIGN_NAME_STORE_BINOP_NAME_LOAD_ADD_CONSTANT_ASSIGN_NAME_STORE_BOOLOP_AND_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_COMPARE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_CONSTANT_EQ_CONSTANT_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_TRY_ASSIGN_NAME_STORE_IFEXP_COMPARE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_GTE_CONSTANT_LIST_CONSTANT_BINOP_CONSTANT_ADD_NAME_LOAD_LOAD_LIST_CONSTANT_BINOP_CONSTANT_ADD_NAME_LOAD_CONSTANT_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_NAME_LOAD_NAME_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_LISTCOMP_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_NAME_LOAD_LOAD_COMPREHENSION_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_LISTCOMP_NAME_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_COMPARE_NAME_LOAD_IN_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_TRY_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXCEPTHANDLER_PASS_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_RETURN_NAME_LOAD
def set_command_attributes(self, name, attributes): """ Sets the xml attributes of a specified command. """ if self.command_exists(name): command = self.commands.get(name) command['attributes'] = attributes
[3][SEP1][If][None][SEP2][1][][SEP3][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_IF_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD
def start_scan(self, scan_id, targets, parallel=1): """ Handle N parallel scans if 'parallel' is greater than 1. """ os.setsid() multiscan_proc = [] logger.info("%s: Scan started.", scan_id) target_list = targets if target_list is None or not target_list: raise OSPDError('Erroneous targets list', 'start_scan') for index, target in enumerate(target_list): while len(multiscan_proc) >= parallel: progress = self.calculate_progress(scan_id) self.set_scan_progress(scan_id, progress) multiscan_proc = self.check_pending_target(scan_id, multiscan_proc) time.sleep(1) #If the scan status is stopped, does not launch anymore target scans if self.get_scan_status(scan_id) == ScanStatus.STOPPED: return logger.info("%s: Host scan started on ports %s.", target[0], target[1]) scan_process = multiprocessing.Process(target=self.parallel_scan, args=(scan_id, target[0])) multiscan_proc.append((scan_process, target[0])) scan_process.start() self.set_scan_status(scan_id, ScanStatus.RUNNING) # Wait until all single target were scanned while multiscan_proc: multiscan_proc = self.check_pending_target(scan_id, multiscan_proc) if multiscan_proc: progress = self.calculate_progress(scan_id) self.set_scan_progress(scan_id, progress) time.sleep(1) # Only set the scan as finished if the scan was not stopped. if self.get_scan_status(scan_id) != ScanStatus.STOPPED: self.finish_scan(scan_id)
[4][SEP1][If][None][For][While][While][None][If][If][If][Return][None][None][None][None][SEP2][1,2][][3,4][5,6][7,8][3][9,10][11,12][13][][2][12][4][][SEP3][2][1][7][1][1][4][1][1][1][0][5][2][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_CONSTANT_EXPR_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_ASSIGN_NAME_STORE_NAME_LOAD_IF_BOOLOP_OR_COMPARE_NAME_LOAD_IS_CONSTANT_UNARYOP_NOT_NAME_LOAD_RAISE_CALL_NAME_LOAD_CONSTANT_CONSTANT_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_NAME_LOAD_NAME_LOAD_WHILE_COMPARE_CALL_NAME_LOAD_NAME_LOAD_GTE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_COMPARE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EQ_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_TUPLE_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_TUPLE_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_WHILE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_IF_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_COMPARE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NOTEQ_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def create_scan(self, scan_id, targets, options, vts): """ Creates a new scan. @target: Target to scan. @options: Miscellaneous scan options. @return: New scan's ID. """ if self.scan_exists(scan_id): logger.info("Scan %s exists. Resuming scan.", scan_id) return self.scan_collection.create_scan(scan_id, targets, options, vts)
[5][SEP1][If][None][Return][SEP2][1,2][2][][SEP3][1][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_EXPR_CONSTANT_IF_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_RETURN_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD
def target_to_list(target): """ Attempt to return a list of single hosts from a target string. """ # Is it an IPv4 address ? new_list = target_to_ipv4(target) # Is it an IPv6 address ? if not new_list: new_list = target_to_ipv6(target) # Is it an IPv4 CIDR ? if not new_list: new_list = target_to_ipv4_cidr(target) # Is it an IPv6 CIDR ? if not new_list: new_list = target_to_ipv6_cidr(target) # Is it an IPv4 short-range ? if not new_list: new_list = target_to_ipv4_short(target) # Is it an IPv4 long-range ? if not new_list: new_list = target_to_ipv4_long(target) # Is it an IPv6 short-range ? if not new_list: new_list = target_to_ipv6_short(target) # Is it an IPv6 long-range ? if not new_list: new_list = target_to_ipv6_long(target) # Is it a hostname ? if not new_list: new_list = target_to_hostname(target) return new_list
[1][SEP1][If][None][If][None][If][None][If][None][If][None][If][None][If][None][If][None][Return][SEP2][1,2][2][3,4][4][5,6][6][7,8][8][9,10][10][11,12][12][13,14][14][15,16][16][][SEP3][1][1][0][1][0][1][0][1][0][1][0][1][0][1][0][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_RETURN_NAME_LOAD
def create_scan(self, scan_id='', targets='', options=None, vts=''): """ Creates a new scan with provided scan information. """ if self.data_manager is None: self.data_manager = multiprocessing.Manager() # Check if it is possible to resume task. To avoid to resume, the # scan must be deleted from the scans_table. if scan_id and self.id_exists(scan_id) and ( self.get_status(scan_id) == ScanStatus.STOPPED): return self.resume_scan(scan_id, options) if not options: options = dict() scan_info = self.data_manager.dict() scan_info['results'] = list() scan_info['finished_hosts'] = dict( [[target, []] for target, _, _ in targets]) scan_info['progress'] = 0 scan_info['target_progress'] = dict( [[target, {}] for target, _, _ in targets]) scan_info['targets'] = targets scan_info['vts'] = vts scan_info['options'] = options scan_info['start_time'] = int(time.time()) scan_info['end_time'] = "0" scan_info['status'] = ScanStatus.INIT if scan_id is None or scan_id == '': scan_id = str(uuid.uuid4()) scan_info['scan_id'] = scan_id self.scans_table[scan_id] = scan_info return scan_id
[5][SEP1][If][None][If][Return][If][None][If][None][Return][SEP2][1,2][2][3,4][][5,6][6][7,8][8][][SEP3][0][1][2][1][0][1][6][2][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_CONSTANT_CONSTANT_EXPR_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_IS_CONSTANT_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_IF_BOOLOP_AND_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_COMPARE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EQ_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_NAME_LOAD_LISTCOMP_LIST_NAME_LOAD_LIST_LOAD_LOAD_COMPREHENSION_TUPLE_NAME_STORE_NAME_STORE_NAME_STORE_STORE_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_NAME_LOAD_LISTCOMP_LIST_NAME_LOAD_DICT_LOAD_COMPREHENSION_TUPLE_NAME_STORE_NAME_STORE_NAME_STORE_STORE_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_BOOLOP_OR_COMPARE_NAME_LOAD_IS_CONSTANT_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_STORE_NAME_LOAD_RETURN_NAME_LOAD
def write_gene_recs(self, db, gene_id): """ NOTE: The goal of this function is to have a canonical ordering when outputting a gene and all of its records to a file. The order is intended to be: gene # mRNAs sorted by length, with longest mRNA first mRNA_1 # Exons of mRNA, sorted by start position (ascending) exon_1 # Children of exon, sorted by start position exon_child_1 exon_child_2 exon_2 ... # Non-exonic children here ... mRNA_2 ... # Non-mRNA children here ... Output records of a gene to a file, given a GFF database and a gene_id. Outputs records in canonical order: gene record first, then longest mRNA, followed by longest mRNA exons, followed by rest, followed by next longest mRNA, and so on. Includes the gene record itself in the output. TODO: This probably doesn't handle deep GFF hierarchies. """ gene_rec = db[gene_id] # Output gene record self.write_rec(gene_rec) # Get each mRNA's lengths mRNA_lens = {} c = list(db.children(gene_id, featuretype="mRNA")) for mRNA in db.children(gene_id, featuretype="mRNA"): mRNA_lens[mRNA.id] = \ sum(len(exon) for exon in db.children(mRNA, featuretype="exon")) # Sort mRNAs by length sorted_mRNAs = \ sorted(mRNA_lens.items(), key=lambda x: x[1], reverse=True) for curr_mRNA in sorted_mRNAs: mRNA_id = curr_mRNA[0] mRNA_rec = db[mRNA_id] # Write mRNA record to file self.write_rec(mRNA_rec) # Write mRNA's children records to file self.write_mRNA_children(db, mRNA_id) # Write non-mRNA children of gene (only level1) for gene_child in db.children(gene_id, level=1): if gene_child.featuretype != "mRNA": self.write_rec(gene_child)
[3][SEP1][None][For][None][None][For][None][For][If][None][SEP2][1][2,3][1][4][5,6][4][7][8,6][6][SEP3][3][1][3][2][0][2][1][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_DICT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_KEYWORD_CONSTANT_FOR_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_KEYWORD_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_STORE_CALL_NAME_LOAD_GENERATOREXP_CALL_NAME_LOAD_NAME_LOAD_COMPREHENSION_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_KEYWORD_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_LAMBDA_ARGUMENTS_ARG_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_KEYWORD_CONSTANT_FOR_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_FOR_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_KEYWORD_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_NOTEQ_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def sequence(self, fasta, use_strand=True): """ Retrieves the sequence of this feature as a string. Uses the pyfaidx package. Parameters ---------- fasta : str If str, then it's a FASTA-format filename; otherwise assume it's a pyfaidx.Fasta object. use_strand : bool If True (default), the sequence returned will be reverse-complemented for minus-strand features. Returns ------- string """ if isinstance(fasta, six.string_types): fasta = Fasta(fasta, as_raw=False) # recall GTF/GFF is 1-based closed; pyfaidx uses Python slice notation # and is therefore 0-based half-open. seq = fasta[self.chrom][self.start-1:self.stop] if use_strand and self.strand == '-': seq = seq.reverse.complement return seq.seq
[3][SEP1][If][None][If][None][Return][SEP2][1,2][2][3,4][4][][SEP3][1][1][0][0][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_CONSTANT_EXPR_CONSTANT_IF_CALL_NAME_LOAD_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_KEYWORD_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_SLICE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_SUB_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_IF_BOOLOP_AND_NAME_LOAD_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_RETURN_ATTRIBUTE_NAME_LOAD_LOAD
def _replace(self, feature, cursor): """ Insert a feature into the database. """ try: cursor.execute( constants._UPDATE, list(feature.astuple()) + [feature.id]) except sqlite3.ProgrammingError: cursor.execute( constants._INSERT, list(feature.astuple(self.default_encoding)) + [feature.id])
[3][SEP1][Try][None][None][SEP2][1,2][][][SEP3][0][3][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_TRY_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ADD_LIST_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXCEPTHANDLER_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ADD_LIST_ATTRIBUTE_NAME_LOAD_LOAD_LOAD
def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value """ Determines the kwargs needed to set up a proxy based on the browser type. Returns: a dictionary of arguments needed to pass when instantiating the WebDriver instance. """ proxy_dict = { "httpProxy": proxy.proxy, "proxyType": 'manual', } if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs: # This one works for firefox locally wd_proxy = webdriver.common.proxy.Proxy(proxy_dict) browser_kwargs['proxy'] = wd_proxy else: # This one works with chrome, both locally and remote # This one works with firefox remote, but not locally if 'desired_capabilities' not in browser_kwargs: browser_kwargs['desired_capabilities'] = {} browser_kwargs['desired_capabilities']['proxy'] = proxy_dict return browser_kwargs
[3][SEP1][If][None][If][Return][None][None][SEP2][1,2][3][4,5][][5][3][SEP3][0][1][0][0][0][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_DICT_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_BOOLOP_AND_COMPARE_NAME_LOAD_EQ_CONSTANT_COMPARE_CONSTANT_NOTIN_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_LOAD_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_IF_COMPARE_CONSTANT_NOTIN_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_DICT_ASSIGN_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CONSTANT_STORE_NAME_LOAD_RETURN_NAME_LOAD
def _parse_options(self, options): """Copy needed options to self""" attributes = ('host', 'wapi_version', 'username', 'password', 'ssl_verify', 'http_request_timeout', 'max_retries', 'http_pool_connections', 'http_pool_maxsize', 'silent_ssl_warnings', 'log_api_calls_as_info', 'max_results', 'paging') for attr in attributes: if isinstance(options, dict) and attr in options: setattr(self, attr, options[attr]) elif hasattr(options, attr): value = getattr(options, attr) setattr(self, attr, value) elif attr in self.DEFAULT_OPTIONS: setattr(self, attr, self.DEFAULT_OPTIONS[attr]) else: msg = "WAPI config error. Option %s is not defined" % attr raise ib_ex.InfobloxConfigException(msg=msg) for attr in ('host', 'username', 'password'): if not getattr(self, attr): msg = "WAPI config error. Option %s can not be blank" % attr raise ib_ex.InfobloxConfigException(msg=msg) self.wapi_url = "https://%s/wapi/v%s/" % (self.host, self.wapi_version) self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
[2][SEP1][None][For][If][For][None][If][If][None][None][If][None][None][None][SEP2][1][2,3][4,5][6,7][1][8,9][10,3][][1][11,12][][1][][SEP3][0][0][1][0][1][1][1][1][2][0][1][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_TUPLE_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_LOAD_FOR_NAME_STORE_NAME_LOAD_IF_BOOLOP_AND_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_COMPARE_NAME_LOAD_IN_NAME_LOAD_EXPR_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_SUBSCRIPT_NAME_LOAD_NAME_LOAD_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_IF_COMPARE_NAME_LOAD_IN_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_BINOP_CONSTANT_MOD_NAME_LOAD_RAISE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_FOR_NAME_STORE_TUPLE_CONSTANT_CONSTANT_CONSTANT_LOAD_IF_UNARYOP_NOT_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_BINOP_CONSTANT_MOD_NAME_LOAD_RAISE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_BINOP_CONSTANT_MOD_TUPLE_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD
def _sincedb_update_position(self, lines=0, force_update=False): """Retrieves the starting position from the sincedb sql db for a given file Returns a boolean representing whether or not it updated the record """ if not self._sincedb_path: return False self._line_count = self._line_count + lines old_count = self._line_count_sincedb lines = self._line_count current_time = int(time.time()) if not force_update: if self._last_sincedb_write and current_time - self._last_sincedb_write <= self._sincedb_write_interval: return False if old_count == lines: return False self._sincedb_init() self._last_sincedb_write = current_time self._log_debug('updating sincedb to {0}'.format(lines)) conn = sqlite3.connect(self._sincedb_path, isolation_level=None) cursor = conn.cursor() query = 'insert or replace into sincedb (fid, filename) values (:fid, :filename);' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename }) query = 'update sincedb set position = :position where fid = :fid and filename = :filename' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename, 'position': lines, }) conn.close() self._line_count_sincedb = lines return True
[3][SEP1][If][Return][If][If][Return][Return][If][Return][SEP2][1,2][][3,4][5,6][][][7,4][][SEP3][0][0][2][0][8][0][0][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_CONSTANT_CONSTANT_EXPR_CONSTANT_IF_UNARYOP_NOT_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CONSTANT_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_ADD_NAME_LOAD_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_IF_UNARYOP_NOT_NAME_LOAD_IF_BOOLOP_AND_ATTRIBUTE_NAME_LOAD_LOAD_COMPARE_BINOP_NAME_LOAD_SUB_ATTRIBUTE_NAME_LOAD_LOAD_LTE_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CONSTANT_IF_COMPARE_NAME_LOAD_EQ_NAME_LOAD_RETURN_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_DICT_CONSTANT_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_DICT_CONSTANT_CONSTANT_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_NAME_LOAD_RETURN_CONSTANT
def read(file_path): """ Read a gmt file at the path specified by file_path. Args: file_path (string): path to gmt file Returns: gmt (GMT object): list of dicts, where each dict corresponds to one line of the GMT file """ # Read in file actual_file_path = os.path.expanduser(file_path) with open(actual_file_path, 'r') as f: lines = f.readlines() # Create GMT object gmt = [] # Iterate over each line for line_num, line in enumerate(lines): # Separate along tabs fields = line.split('\t') assert len(fields) > 2, ( "Each line must have at least 3 tab-delimited items. " + "line_num: {}, fields: {}").format(line_num, fields) # Get rid of trailing whitespace fields[-1] = fields[-1].rstrip() # Collect entries entries = fields[2:] # Remove empty entries entries = [x for x in entries if x] assert len(set(entries)) == len(entries), ( "There should not be duplicate entries for the same set. " + "line_num: {}, entries: {}").format(line_num, entries) # Store this line as a dictionary line_dict = {SET_IDENTIFIER_FIELD: fields[0], SET_DESC_FIELD: fields[1], SET_MEMBERS_FIELD: entries} gmt.append(line_dict) verify_gmt_integrity(gmt) return gmt
[1][SEP1][None][None][None][For][None][Return][None][None][SEP2][1][2][3][4,5][6][][7][3][SEP3][2][1][0][7][3][1][5][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_WITH_WITHITEM_CALL_NAME_LOAD_NAME_LOAD_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ASSERT_COMPARE_CALL_NAME_LOAD_NAME_LOAD_GT_CONSTANT_CALL_ATTRIBUTE_BINOP_CONSTANT_ADD_CONSTANT_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_UNARYOP_USUB_CONSTANT_STORE_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_UNARYOP_USUB_CONSTANT_LOAD_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_SLICE_CONSTANT_LOAD_ASSIGN_NAME_STORE_LISTCOMP_NAME_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_NAME_LOAD_ASSERT_COMPARE_CALL_NAME_LOAD_CALL_NAME_LOAD_NAME_LOAD_EQ_CALL_NAME_LOAD_NAME_LOAD_CALL_ATTRIBUTE_BINOP_CONSTANT_ADD_CONSTANT_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_DICT_NAME_LOAD_NAME_LOAD_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_NAME_LOAD_NAME_LOAD_RETURN_NAME_LOAD
def write_version_and_dims(version, dims, f): """Write first two lines of gct file. Args: version (string): 1.3 by default dims (list of strings): length = 4 f (file handle): handle of output file Returns: nothing """ f.write(("#" + version + "\n")) f.write((dims[0] + "\t" + dims[1] + "\t" + dims[2] + "\t" + dims[3] + "\n"))
[3][SEP1][None][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_BINOP_CONSTANT_ADD_NAME_LOAD_ADD_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_BINOP_BINOP_BINOP_BINOP_BINOP_BINOP_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ADD_CONSTANT_ADD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ADD_CONSTANT_ADD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ADD_CONSTANT_ADD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ADD_CONSTANT
def read(in_path): """ Read a grp file at the path specified by in_path. Args: in_path (string): path to GRP file Returns: grp (list) """ assert os.path.exists(in_path), "The following GRP file can't be found. in_path: {}".format(in_path) with open(in_path, "r") as f: lines = f.readlines() # need the second conditional to ignore comment lines grp = [line.strip() for line in lines if line and not re.match("^#", line)] return grp
[1][SEP1][None][None][None][Return][SEP2][1][2][3][][SEP3][2][1][3][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSERT_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_WITH_WITHITEM_CALL_NAME_LOAD_NAME_LOAD_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_LISTCOMP_CALL_ATTRIBUTE_NAME_LOAD_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_BOOLOP_AND_NAME_LOAD_UNARYOP_NOT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_RETURN_NAME_LOAD
def timesince(when): """Returns string representing "time since" or "time until". Examples: 3 days ago, 5 hours ago, 3 minutes from now, 5 hours from now, now. """ if not when: return '' now = datetime.datetime.utcnow() if now > when: diff = now - when suffix = 'ago' else: diff = when - now suffix = 'from now' periods = ( (diff.days / 365, 'year', 'years'), (diff.days / 30, 'month', 'months'), (diff.days / 7, 'week', 'weeks'), (diff.days, 'day', 'days'), (diff.seconds / 3600, 'hour', 'hours'), (diff.seconds / 60, 'minute', 'minutes'), (diff.seconds, 'second', 'seconds'), ) for period, singular, plural in periods: if period: return '%d %s %s' % ( period, singular if period == 1 else plural, suffix) return 'now'
[1][SEP1][If][Return][If][None][None][None][For][If][Return][Return][SEP2][1,2][][3,4][5][5][6][7,8][9,6][][][SEP3][0][0][1][0][0][0][0][0][0][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_IF_UNARYOP_NOT_NAME_LOAD_RETURN_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_IF_COMPARE_NAME_LOAD_GT_NAME_LOAD_ASSIGN_NAME_STORE_BINOP_NAME_LOAD_SUB_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_BINOP_NAME_LOAD_SUB_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_TUPLE_TUPLE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_DIV_CONSTANT_CONSTANT_CONSTANT_LOAD_TUPLE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_DIV_CONSTANT_CONSTANT_CONSTANT_LOAD_TUPLE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_DIV_CONSTANT_CONSTANT_CONSTANT_LOAD_TUPLE_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_LOAD_TUPLE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_DIV_CONSTANT_CONSTANT_CONSTANT_LOAD_TUPLE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_DIV_CONSTANT_CONSTANT_CONSTANT_LOAD_TUPLE_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_LOAD_LOAD_FOR_TUPLE_NAME_STORE_NAME_STORE_NAME_STORE_STORE_NAME_LOAD_IF_NAME_LOAD_RETURN_BINOP_CONSTANT_MOD_TUPLE_NAME_LOAD_IFEXP_COMPARE_NAME_LOAD_EQ_CONSTANT_NAME_LOAD_NAME_LOAD_NAME_LOAD_LOAD_RETURN_CONSTANT
def _get_artifact_context(run, file_type): """Gets the artifact details for the given run and file_type.""" sha1sum = None image_file = False log_file = False config_file = False if request.path == '/image': image_file = True if file_type == 'before': sha1sum = run.ref_image elif file_type == 'diff': sha1sum = run.diff_image elif file_type == 'after': sha1sum = run.image else: abort(400) elif request.path == '/log': log_file = True if file_type == 'before': sha1sum = run.ref_log elif file_type == 'diff': sha1sum = run.diff_log elif file_type == 'after': sha1sum = run.log else: abort(400) elif request.path == '/config': config_file = True if file_type == 'before': sha1sum = run.ref_config elif file_type == 'after': sha1sum = run.config else: abort(400) return image_file, log_file, config_file, sha1sum
[2][SEP1][If][If][If][None][If][If][If][Return][None][If][None][If][If][None][None][None][If][None][If][None][None][None][None][SEP2][1,2][3,4][5,6][7][8,9][10,11][12,7][][7][13,14][7][15,16][17,18][7][7][7][19,20][7][21,22][7][7][7][7][SEP3][0][0][0][0][0][0][0][0][0][0][0][0][0][0][1][0][0][0][0][0][1][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_NAME_LOAD_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_NAME_LOAD_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_NAME_LOAD_CONSTANT_RETURN_TUPLE_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_LOAD
def verify_binary(flag_name, process_args=None): """Exits the program if the binary from the given flag doesn't run. Args: flag_name: Name of the flag that should be the path to the binary. process_args: Args to pass to the binary to do nothing but verify that it's working correctly (something like "--version") is good. Optional. Defaults to no args. Raises: SystemExit with error if the process did not work. """ if process_args is None: process_args = [] path = getattr(FLAGS, flag_name) if not path: logging.error('Flag %r not set' % flag_name) sys.exit(1) with open(os.devnull, 'w') as dev_null: try: subprocess.check_call( [path] + process_args, stdout=dev_null, stderr=subprocess.STDOUT) except: logging.exception('--%s binary at path %r does not work', flag_name, path) sys.exit(1)
[2][SEP1][If][None][If][None][None][Try][None][None][SEP2][1,2][2][3,4][4][5][6,7][][][SEP3][0][0][1][2][1][0][1][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_CONSTANT_EXPR_CONSTANT_IF_COMPARE_NAME_LOAD_IS_CONSTANT_ASSIGN_NAME_STORE_LIST_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_WITH_WITHITEM_CALL_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_STORE_TRY_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_LIST_NAME_LOAD_LOAD_ADD_NAME_LOAD_KEYWORD_NAME_LOAD_KEYWORD_ATTRIBUTE_NAME_LOAD_LOAD_EXCEPTHANDLER_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT
def list(self, detail=False): """ A generator that yields information about each file in the upload area :param detail: return detailed file information (slower) :return: a list of dicts containing at least 'name', or more of detail was requested """ creds_provider = CredentialsManager(upload_area=self) s3agent = S3Agent(credentials_provider=creds_provider) key_prefix = self.uuid + "/" key_prefix_length = len(key_prefix) for page in s3agent.list_bucket_by_page(bucket_name=self.uri.bucket_name, key_prefix=key_prefix): file_list = [key[key_prefix_length:] for key in page] # cut off upload-area-id/ if detail: files_info = self.upload_service.api_client.files_info(self.uuid, file_list) else: files_info = [{'name': filename} for filename in file_list] for file_info in files_info: yield file_info
[2][SEP1][None][For][If][None][None][For][None][SEP2][1][2][3,4][5][5][6,1][5][SEP3][3][1][0][1][0][0][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_KEYWORD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_KEYWORD_NAME_LOAD_ASSIGN_NAME_STORE_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_ADD_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_FOR_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_KEYWORD_NAME_LOAD_ASSIGN_NAME_STORE_LISTCOMP_SUBSCRIPT_NAME_LOAD_SLICE_NAME_LOAD_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_IF_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_LISTCOMP_DICT_CONSTANT_NAME_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_EXPR_YIELD_NAME_LOAD
def select_area(self, area_uuid): """ Update the "current area" to be the area with this UUID. :param str area_uuid: The RFC4122-compliant UUID of the Upload Area. """ self._config.upload.current_area = area_uuid self.save()
[2][SEP1][None][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_ATTRIBUTE_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_STORE_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD
def safe_read_file(file_path: Path) -> str: """Read a text file. Several text encodings are tried until the file content is correctly decoded. :raise GuesslangError: when the file encoding is not supported :param file_path: path to the input file :return: text file content """ for encoding in FILE_ENCODINGS: try: return file_path.read_text(encoding=encoding) except UnicodeError: pass # Ignore encoding error raise GuesslangError('Encoding not supported for {!s}'.format(file_path))
[1][SEP1][None][For][Try][None][Return][None][SEP2][1][2,3][4,5][][][1][SEP3][0][0][0][2][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_NAME_LOAD_EXPR_CONSTANT_FOR_NAME_STORE_NAME_LOAD_TRY_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_EXCEPTHANDLER_NAME_LOAD_PASS_RAISE_CALL_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_NAME_LOAD
def make_relative(cls, course_locator, block_type, block_id): """ Return a new instance which has the given block_id in the given course :param course_locator: may be a BlockUsageLocator in the same snapshot """ if hasattr(course_locator, 'course_key'): course_locator = course_locator.course_key return course_locator.make_usage_key( block_type=block_type, block_id=block_id )
[4][SEP1][If][None][Return][SEP2][1,2][2][][SEP3][1][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_EXPR_CONSTANT_IF_CALL_NAME_LOAD_NAME_LOAD_CONSTANT_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_KEYWORD_NAME_LOAD
def _clean(cls, value, invalid): """Deprecated. See BlockUsageLocator._clean""" cls._deprecation_warning() return BlockUsageLocator._clean(value, invalid)
[3][SEP1][Return][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD
def is_mutating(status): """Determines if the statement is mutating based on the status.""" if not status: return False mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop', 'replace', 'truncate', 'load']) return status.split(None, 1)[0].lower() in mutating
[1][SEP1][If][Return][Return][SEP2][1,2][][][SEP3][0][0][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_IF_UNARYOP_NOT_NAME_LOAD_RETURN_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_LIST_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_LOAD_RETURN_COMPARE_CALL_ATTRIBUTE_SUBSCRIPT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_CONSTANT_LOAD_LOAD_IN_NAME_LOAD
def _get_vi_mode(cli): """Get the current vi mode for display.""" return { InputMode.INSERT: 'I', InputMode.NAVIGATION: 'N', InputMode.REPLACE: 'R', InputMode.INSERT_MULTIPLE: 'M' }[cli.vi_state.input_mode]
[1][SEP1][Return][SEP2][][SEP3][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_RETURN_SUBSCRIPT_DICT_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_CONSTANT_CONSTANT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_LOAD
def _find_relevant_nodes(query_nodes, relevance_network, relevance_node_lim): """Return a list of nodes that are relevant for the query. Parameters ---------- query_nodes : list[str] A list of node names to query for. relevance_network : str The UUID of the NDEx network to query relevance in. relevance_node_lim : int The number of top relevant nodes to return. Returns ------- nodes : list[str] A list of node names that are relevant for the query. """ all_nodes = relevance_client.get_relevant_nodes(relevance_network, query_nodes) nodes = [n[0] for n in all_nodes[:relevance_node_lim]] return nodes
[3][SEP1][Return][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_LISTCOMP_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_COMPREHENSION_NAME_STORE_SUBSCRIPT_NAME_LOAD_SLICE_NAME_LOAD_LOAD_RETURN_NAME_LOAD
def extract_statements(self): """Extracts INDRA statements from the RDF graph via SPARQL queries. """ # Look for events that have an AGENT and an AFFECTED, and get the # start and ending text indices for each. query = prefixes + """ SELECT ?agent_start ?agent_end ?affected_start ?affected_end WHERE { ?rel role:AGENT ?agent . ?rel role:AFFECTED ?affected . ?agent lf:start ?agent_start . ?agent lf:end ?agent_end . ?affected lf:start ?affected_start . ?affected lf:end ?affected_end . } """ results = self.graph.query(query) for res in results: # Make a statement for each query match self.extract_statement_from_query_result(res) # Look for events that have an AGENT and a RESULT, and get the start # and ending text indices for each. query = query.replace('role:AFFECTED', 'role:RESULT') results = self.graph.query(query) for res in results: # Make a statement for each query match self.extract_statement_from_query_result(res)
[1][SEP1][None][For][None][None][For][None][SEP2][1][2,3][1][4][5][4][SEP3][1][0][1][2][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_BINOP_NAME_LOAD_ADD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def ungrounded_texts(stmts): """Return a list of all ungrounded entities ordered by number of mentions Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Returns ------- ungroundc : list of tuple list of tuples of the form (text: str, count: int) sorted in descending order by count. """ ungrounded = [ag.db_refs['TEXT'] for s in stmts for ag in s.agent_list() if ag is not None and list(ag.db_refs.keys()) == ['TEXT']] ungroundc = Counter(ungrounded) ungroundc = ungroundc.items() ungroundc = sorted(ungroundc, key=lambda x: x[1], reverse=True) return ungroundc
[1][SEP1][Return][SEP2][][SEP3][6]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_LISTCOMP_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_COMPREHENSION_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BOOLOP_AND_COMPARE_NAME_LOAD_ISNOT_CONSTANT_COMPARE_CALL_NAME_LOAD_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EQ_LIST_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_KEYWORD_LAMBDA_ARGUMENTS_ARG_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_KEYWORD_CONSTANT_RETURN_NAME_LOAD
def _is_physical_entity(pe): """Return True if the element is a physical entity""" val = isinstance(pe, _bp('PhysicalEntity')) or \ isinstance(pe, _bpimpl('PhysicalEntity')) return val
[1][SEP1][Return][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_BOOLOP_OR_CALL_NAME_LOAD_NAME_LOAD_CALL_NAME_LOAD_CONSTANT_CALL_NAME_LOAD_NAME_LOAD_CALL_NAME_LOAD_CONSTANT_RETURN_NAME_LOAD
def _get_db_refs(term): """Extract database references for a TERM.""" db_refs = {} # Here we extract the text name of the Agent # There are two relevant tags to consider here. # The <text> tag typically contains a larger phrase surrounding the # term but it contains the term in a raw, non-canonicalized form. # The <name> tag only contains the name of the entity but it is # canonicalized. For instance, MAP2K1 appears as MAP-2-K-1. agent_text_tag = term.find('name') if agent_text_tag is not None: db_refs['TEXT'] = agent_text_tag.text # If we have some drum-terms, the matched-name of the first # drum-term (e.g. "MAP2K1") is a better value for TEXT than # the name of the TERM (e.g. "MAP-2-K-1") so we put that in there drum_terms = term.findall('drum-terms/drum-term') if drum_terms: matched_name = drum_terms[0].attrib.get('matched-name') if matched_name: db_refs['TEXT'] = matched_name # We make a list of scored grounding terms from the DRUM terms grounding_terms = _get_grounding_terms(term) if not grounding_terms: # This is for backwards compatibility with EKBs without drum-term # scored entries. It is important to keep for Bioagents # compatibility. dbid = term.attrib.get('dbid') if dbid: dbids = dbid.split('|') for dbname, dbid in [d.split(':') for d in dbids]: if not db_refs.get(dbname): db_refs[dbname] = dbid return db_refs, None, [] # This is the INDRA prioritization of grounding name spaces. Lower score # takes precedence. ns_priority = { 'HGNC': 1, 'UP': 1, 'FPLX': 2, 'CHEBI': 3, 'PC': 3, 'GO': 4, 'FA': 5, 'XFAM': 5, 'NCIT': 5 } # We get the top priority entry from each score group score_groups = itertools.groupby(grounding_terms, lambda x: x['score']) top_per_score_group = [] ambiguities = [] for score, group in score_groups: entries = list(group) for entry in entries: priority = 100 for ref_ns, ref_id in entry['refs'].items(): # Skip etc UP entries if ref_ns == 'UP' and ref_id == 'etc': continue try: priority = min(priority, ns_priority[ref_ns]) except KeyError: pass if ref_ns == 'UP': if not up_client.is_human(ref_id): priority = 4 entry['priority'] = priority if len(entries) > 1: top_entry = entries[0] top_idx = 0 for i, entry in enumerate(entries): # We take the lowest priority entry within the score group # as the top entry if entry['priority'] < top_entry['priority']: # This is a corner case in which a protein family # should be prioritized over a specific protein, # specifically when HGNC was mapped from NCIT but # FPLX was not mapped from NCIT, the HGNC shouldn't # take precedence. if entry.get('comment') == 'HGNC_FROM_NCIT' and \ 'FPLX' in top_entry['refs'] and \ top_entry.get('comment') != 'FPLX_FROM_NCIT': continue top_entry = entry top_idx = i for i, entry in enumerate(entries): if i == top_idx: continue if (entry['priority'] - top_entry['priority']) <= 1: ambiguities.append((top_entry, entry)) else: top_entry = entries[0] top_per_score_group.append(top_entry) # Get the top priority for each score group priorities = [entry['priority'] for entry in top_per_score_group] # By default, we coose the top priority entry from the highest score group top_grounding = top_per_score_group[0] # Sometimes the top grounding has much lower priority and not much higher # score than the second grounding. Typically 1.0 vs 0.82857 and 5 vs 2. # In this case we take the second entry. A special case is handled where # a FPLX entry was mapped from FA, in which case priority difference of < 2 # is also accepted. if len(top_per_score_group) > 1: score_diff = top_per_score_group[0]['score'] - \ top_per_score_group[1]['score'] priority_diff = top_per_score_group[0]['priority'] - \ top_per_score_group[1]['priority'] if score_diff < 0.2 and (priority_diff >= 2 or \ top_per_score_group[0].get('comment') == 'FPLX_FROM_FA'): top_grounding = top_per_score_group[1] relevant_ambiguities = [] for amb in ambiguities: if top_grounding not in amb: continue if top_grounding == amb[0]: relevant_ambiguities.append({'preferred': amb[0], 'alternative': amb[1]}) else: relevant_ambiguities.append({'preferred': amb[1], 'alternative': amb[0]}) for k, v in top_grounding['refs'].items(): db_refs[k] = v # Now standardize db_refs to the INDRA standards # We need to add a prefix for CHEBI chebi_id = db_refs.get('CHEBI') if chebi_id and not chebi_id.startswith('CHEBI:'): db_refs['CHEBI'] = 'CHEBI:%s' % chebi_id # We need to strip the trailing version number for XFAM and rename to PF pfam_id = db_refs.get('XFAM') if pfam_id: pfam_id = pfam_id.split('.')[0] db_refs.pop('XFAM', None) db_refs['PF'] = pfam_id # We need to add GO prefix if it is missing go_id = db_refs.get('GO') if go_id: if not go_id.startswith('GO:'): db_refs['GO'] = 'GO:%s' % go_id # We need to deal with Nextprot families nxp_id = db_refs.get('FA') if nxp_id: db_refs.pop('FA', None) db_refs['NXPFA'] = nxp_id # We need to rename PC to PUBCHEM pc_id = db_refs.get('PC') if pc_id: db_refs.pop('PC', None) db_refs['PUBCHEM'] = pc_id # Here we also get and return the type, which is a TRIPS # ontology type. This is to be used in the context of # Bioagents. ont_type = top_grounding['type'] return db_refs, ont_type, relevant_ambiguities
[1][SEP1][If][If][If][If][If][None][None][None][Return][For][For][None][If][If][For][If][None][None][None][If][None][For][For][None][None][If][For][If][None][For][None][If][None][If][Try][If][For][None][None][None][If][None][None][If][If][None][If][If][None][If][If][If][If][None][None][None][If][None][None][Return][SEP2][1,2][3,2][4,5][6,2][7,8][9][2][10][][11,12][13,8][14][15,16][17,10][18,19][20,16][21][10][22][23,24][16][25,26][27,28][29][30][31,21][32,33][34,22][14][35,36][9][37,38][26][39,40][41,42][43,29][44,30][21][21][40][45,46][47][47][48,29][49,36][46][50,51][52,22][29][53,36][54,51][55,56][57,22][36][51][56][58,59][22][59][][SEP3][1][1][1][1][3][1][0][1][0][10][1][1][1][1][3][1][0][0][0][6][0][2][3][0][0][0][1][0][0][1][1][0][0][2][0][0][2][1][1][0][1][1][0][2][0][2][1][0][0][0][1][1][1][1][0][1][1][0][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_COMPARE_NAME_LOAD_ISNOT_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_LOAD_CONSTANT_IF_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_IF_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_LISTCOMP_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_COMPREHENSION_NAME_STORE_NAME_LOAD_IF_UNARYOP_NOT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_STORE_NAME_LOAD_RETURN_TUPLE_NAME_LOAD_CONSTANT_LIST_LOAD_LOAD_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LAMBDA_ARGUMENTS_ARG_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_IF_BOOLOP_AND_COMPARE_NAME_LOAD_EQ_CONSTANT_COMPARE_NAME_LOAD_EQ_CONSTANT_CONTINUE_TRY_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_SUBSCRIPT_NAME_LOAD_NAME_LOAD_LOAD_EXCEPTHANDLER_NAME_LOAD_PASS_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_IF_UNARYOP_NOT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_IF_COMPARE_CALL_NAME_LOAD_NAME_LOAD_GT_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CONSTANT_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_COMPARE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_IF_BOOLOP_AND_COMPARE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_EQ_CONSTANT_COMPARE_CONSTANT_IN_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_COMPARE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NOTEQ_CONSTANT_CONTINUE_ASSIGN_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_NAME_LOAD_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_COMPARE_NAME_LOAD_EQ_NAME_LOAD_CONTINUE_IF_COMPARE_BINOP_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_SUB_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LTE_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_TUPLE_NAME_LOAD_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_LISTCOMP_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_IF_COMPARE_CALL_NAME_LOAD_NAME_LOAD_GT_CONSTANT_ASSIGN_NAME_STORE_BINOP_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CONSTANT_LOAD_SUB_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_BINOP_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CONSTANT_LOAD_SUB_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CONSTANT_LOAD_IF_BOOLOP_AND_COMPARE_NAME_LOAD_LT_CONSTANT_BOOLOP_OR_COMPARE_NAME_LOAD_GTE_CONSTANT_COMPARE_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_CONSTANT_EQ_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_FOR_NAME_STORE_NAME_LOAD_IF_COMPARE_NAME_LOAD_NOTIN_NAME_LOAD_CONTINUE_IF_COMPARE_NAME_LOAD_EQ_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_DICT_CONSTANT_CONSTANT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_DICT_CONSTANT_CONSTANT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_BOOLOP_AND_NAME_LOAD_UNARYOP_NOT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_BINOP_CONSTANT_MOD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_NAME_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_NAME_LOAD_IF_UNARYOP_NOT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_BINOP_CONSTANT_MOD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_RETURN_TUPLE_NAME_LOAD_NAME_LOAD_NAME_LOAD_LOAD
def _assemble_transphosphorylation(self, stmt): """Example: complex(p(HGNC:EGFR)) => p(HGNC:EGFR, pmod(Ph, Tyr, 1173))""" # Check our assumptions about the bound condition of the enzyme assert len(stmt.enz.bound_conditions) == 1 assert stmt.enz.bound_conditions[0].is_bound # Create a modified protein node for the bound target sub_agent = deepcopy(stmt.enz.bound_conditions[0].agent) sub_agent.mods.append(stmt._get_mod_condition()) self._add_nodes_edges(stmt.enz, sub_agent, pc.DIRECTLY_INCREASES, stmt.evidence)
[2][SEP1][None][None][None][SEP2][1][2][][SEP3][1][0][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSERT_COMPARE_CALL_NAME_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EQ_CONSTANT_ASSERT_ATTRIBUTE_SUBSCRIPT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_ATTRIBUTE_SUBSCRIPT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD
def submit_curation(): """Submit curations for a given corpus. The submitted curations are handled to update the probability model but there is no return value here. The update_belief function can be called separately to calculate update belief scores. Parameters ---------- corpus_id : str The ID of the corpus for which the curation is submitted. curations : dict A set of curations where each key is a Statement UUID in the given corpus and each key is 0 or 1 with 0 corresponding to incorrect and 1 corresponding to correct. """ if request.json is None: abort(Response('Missing application/json header.', 415)) # Get input parameters corpus_id = request.json.get('corpus_id') curations = request.json.get('curations', {}) try: curator.submit_curation(corpus_id, curations) except InvalidCorpusError: abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400)) return return jsonify({})
[0][SEP1][If][None][Try][None][Return][Return][SEP2][1,2][2][3,4][5][][][SEP3][0][2][2][1][2][1]
MODULE_FUNCTIONDEF_ARGUMENTS_EXPR_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_IS_CONSTANT_EXPR_CALL_NAME_LOAD_CALL_NAME_LOAD_CONSTANT_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_DICT_TRY_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_EXCEPTHANDLER_NAME_LOAD_EXPR_CALL_NAME_LOAD_CALL_NAME_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_CONSTANT_RETURN_RETURN_CALL_NAME_LOAD_DICT
def extract_statements(self): """Extract the statements from the json.""" for p_info in self._json: para = RlimspParagraph(p_info, self.doc_id_type) self.statements.extend(para.get_statements()) return
[1][SEP1][None][For][None][Return][SEP2][1][2,3][1][][SEP3][0][0][3][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_FOR_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_RETURN
def process_nxml(nxml_filename, pmid=None, extra_annotations=None, cleanup=True, add_grounding=True): """Process an NXML file using the ISI reader First converts NXML to plain text and preprocesses it, then runs the ISI reader, and processes the output to extract INDRA Statements. Parameters ---------- nxml_filename : str nxml file to process pmid : Optional[str] pmid of this nxml file, to be added to the Evidence object of the extracted INDRA statements extra_annotations : Optional[dict] Additional annotations to add to the Evidence object of all extracted INDRA statements. Extra annotations called 'interaction' are ignored since this is used by the processor to store the corresponding raw ISI output. cleanup : Optional[bool] If True, the temporary folders created for preprocessed reading input and output are removed. Default: True add_grounding : Optional[bool] If True the extracted Statements' grounding is mapped Returns ------- ip : indra.sources.isi.processor.IsiProcessor A processor containing extracted Statements """ if extra_annotations is None: extra_annotations = {} # Create a temporary directory to store the proprocessed input pp_dir = tempfile.mkdtemp('indra_isi_pp_output') pp = IsiPreprocessor(pp_dir) extra_annotations = {} pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations) # Run the ISI reader and extract statements ip = process_preprocessed(pp) if add_grounding: ip.add_grounding() if cleanup: # Remove temporary directory with processed input shutil.rmtree(pp_dir) else: logger.info('Not cleaning up %s' % pp_dir) return ip
[5][SEP1][If][None][If][None][If][None][None][Return][SEP2][1,2][2][3,4][4][5,6][7][7][][SEP3][0][0][4][1][0][1][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_CONSTANT_CONSTANT_EXPR_CONSTANT_IF_COMPARE_NAME_LOAD_IS_CONSTANT_ASSIGN_NAME_STORE_DICT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_DICT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_RETURN_NAME_LOAD
def _get_node_key(self, node_dict_item): """Return a tuple of sorted sources and targets given a node dict.""" s = tuple(sorted(node_dict_item['sources'])) t = tuple(sorted(node_dict_item['targets'])) return (s, t)
[2][SEP1][Return][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_RETURN_TUPLE_NAME_LOAD_NAME_LOAD_LOAD
def process_pybel_graph(graph): """Return a PybelProcessor by processing a PyBEL graph. Parameters ---------- graph : pybel.struct.BELGraph A PyBEL graph to process Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements. """ bp = PybelProcessor(graph) bp.get_statements() if bp.annot_manager.failures: logger.warning('missing %d annotation pairs', sum(len(v) for v in bp.annot_manager.failures.values())) return bp
[1][SEP1][If][None][Return][SEP2][1,2][2][][SEP3][2][4][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_IF_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CALL_NAME_LOAD_GENERATOREXP_CALL_NAME_LOAD_NAME_LOAD_COMPREHENSION_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_LOAD_RETURN_NAME_LOAD
def isa_or_partof(self, ns1, id1, ns2, id2): """Return True if two entities are in an "isa" or "partof" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "isa" or "partof" relationship with t2, either directly or through a series of intermediates; False otherwise. """ rel_fun = lambda node, graph: self.isa_or_partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.isa_or_partof_closure, rel_fun)
[5][SEP1][Return][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_LAMBDA_ARGUMENTS_ARG_ARG_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def separate_reach_logs(log_str): """Get the list of reach logs from the overall logs.""" log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
[1][SEP1][None][For][If][If][None][If][None][Return][None][If][None][SEP2][1][2,3][4,5][6,7][1][8,9][7][][1][10,1][1][SEP3][1][0][0][0][0][0][2][1][2][0][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_ASSIGN_NAME_STORE_CONSTANT_FOR_NAME_STORE_SUBSCRIPT_NAME_LOAD_SLICE_LOAD_IF_BOOLOP_AND_UNARYOP_NOT_NAME_LOAD_COMPARE_CONSTANT_IN_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_IF_BOOLOP_AND_NAME_LOAD_COMPARE_CONSTANT_IN_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_TUPLE_CONSTANT_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_TUPLE_CONSTANT_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_LOAD_RETURN_TUPLE_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_NAME_LOAD_LOAD
def _link_to_action_mentions(self, actionmention_filename): """Add action mentions""" parser = GenewaysActionMentionParser(actionmention_filename) self.action_mentions = parser.action_mentions for action_mention in self.action_mentions: hiid = action_mention.hiid if hiid not in self.hiid_to_action_index: m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception((m1 + m2) % hiid) else: idx = self.hiid_to_action_index[hiid] self.actions[idx].action_mentions.append(action_mention)
[2][SEP1][None][For][If][None][None][SEP2][1][2][3,4][][1][SEP3][1][0][0][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_ATTRIBUTE_NAME_LOAD_LOAD_FOR_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_NOTIN_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_RAISE_CALL_NAME_LOAD_BINOP_BINOP_NAME_LOAD_ADD_NAME_LOAD_MOD_NAME_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_LOAD_LOAD_NAME_LOAD
def get_uncond_agent(agent): """Construct the unconditional state of an Agent. The unconditional Agent is a copy of the original agent but without any bound conditions and modification conditions. Mutation conditions, however, are preserved since they are static. """ agent_uncond = ist.Agent(_n(agent.name), mutations=agent.mutations) return agent_uncond
[1][SEP1][Return][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_NAME_LOAD
def get_statements(self): """Convert network edges into Statements. Returns ------- list of Statements Converted INDRA Statements. """ edges = _get_dict_from_list('edges', self.cx) for edge in edges: edge_type = edge.get('i') if not edge_type: continue stmt_type = _stmt_map.get(edge_type) if stmt_type: id = edge['@id'] source_agent = self._node_agents.get(edge['s']) target_agent = self._node_agents.get(edge['t']) if not source_agent or not target_agent: logger.info("Skipping edge %s->%s: %s" % (self._node_names[edge['s']], self._node_names[edge['t']], edge)) continue ev = self._create_evidence(id) if stmt_type == Complex: stmt = stmt_type([source_agent, target_agent], evidence=ev) else: stmt = stmt_type(source_agent, target_agent, evidence=ev) self.statements.append(stmt) return self.statements
[1][SEP1][None][For][If][Return][If][If][None][If][None][None][None][SEP2][1][2,3][4,1][][5,1][6,7][1][8,9][10][10][1][SEP3][1][8][1][0][5][2][1][1][1][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_FOR_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_UNARYOP_NOT_NAME_LOAD_CONTINUE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_NAME_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_IF_BOOLOP_OR_UNARYOP_NOT_NAME_LOAD_UNARYOP_NOT_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_TUPLE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_NAME_LOAD_LOAD_CONTINUE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_COMPARE_NAME_LOAD_EQ_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_LIST_NAME_LOAD_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_KEYWORD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_RETURN_ATTRIBUTE_NAME_LOAD_LOAD
def process_from_web(): """Return a TrrustProcessor based on the online interaction table. Returns ------- TrrustProcessor A TrrustProcessor object that has a list of INDRA Statements in its statements attribute. """ logger.info('Downloading table from %s' % trrust_human_url) res = requests.get(trrust_human_url) res.raise_for_status() df = pandas.read_table(io.StringIO(res.text)) tp = TrrustProcessor(df) tp.extract_statements() return tp
[0][SEP1][Return][SEP2][][SEP3][7]
MODULE_FUNCTIONDEF_ARGUMENTS_EXPR_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_NAME_LOAD
def _sanitize(text): """Return sanitized Eidos text field for human readability.""" d = {'-LRB-': '(', '-RRB-': ')'} return re.sub('|'.join(d.keys()), lambda m: d[m.group(0)], text)
[1][SEP1][Return][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LAMBDA_ARGUMENTS_ARG_SUBSCRIPT_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_NAME_LOAD
def get_activating_mods(self): """Extract INDRA ActiveForm Statements with a single mod from BEL. The SPARQL pattern used for extraction from BEL looks for a ModifiedProteinAbundance as subject and an Activiy of a ProteinAbundance as object. Examples: proteinAbundance(HGNC:INSR,proteinModification(P,Y)) directlyIncreases kinaseActivity(proteinAbundance(HGNC:INSR)) """ q_mods = prefixes + """ SELECT ?speciesName ?actType ?mod ?pos ?rel ?stmt ?species WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?object . ?object belvoc:hasActivityType ?actType . ?object belvoc:hasChild ?species . ?species a belvoc:ProteinAbundance . ?species belvoc:hasConcept ?speciesName . ?subject a belvoc:ModifiedProteinAbundance . ?subject belvoc:hasModificationType ?mod . ?subject belvoc:hasChild ?species . OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . } FILTER (?rel = belvoc:DirectlyIncreases || ?rel = belvoc:DirectlyDecreases) } """ # Now make the PySB for the phosphorylation res_mods = self.g.query(q_mods) for stmt in res_mods: evidence = self._get_evidence(stmt[5]) # Parse out the elements of the query species = self._get_agent(stmt[0], stmt[6]) act_type = term_from_uri(stmt[1]).lower() mod = term_from_uri(stmt[2]) mod_pos = term_from_uri(stmt[3]) mc = self._get_mod_condition(mod, mod_pos) species.mods = [mc] rel = term_from_uri(stmt[4]) if rel == 'DirectlyDecreases': is_active = False else: is_active = True stmt_str = strip_statement(stmt[5]) # Mark this as a converted statement self.converted_direct_stmts.append(stmt_str) st = ActiveForm(species, act_type, is_active, evidence) self.statements.append(st)
[1][SEP1][None][For][If][None][None][None][SEP2][1][2][3,4][5][5][1][SEP3][1][4][8][0][0][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_BINOP_NAME_LOAD_ADD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_CALL_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_LIST_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD
def process_nxml_file(file_name, citation=None, offline=False, output_fname=default_output_fname): """Return a ReachProcessor by processing the given NXML file. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- file_name : str The name of the NXML file to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ with open(file_name, 'rb') as f: nxml_str = f.read().decode('utf-8') return process_nxml_str(nxml_str, citation, False, output_fname)
[4][SEP1][None][Return][SEP2][1][][SEP3][1][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_NAME_LOAD_EXPR_CONSTANT_WITH_WITHITEM_CALL_NAME_LOAD_NAME_LOAD_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_RETURN_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_CONSTANT_NAME_LOAD
def process_json(json_dict): """Return an EidosProcessor by processing a Eidos JSON-LD dict. Parameters ---------- json_dict : dict The JSON-LD dict to be processed. Returns ------- ep : EidosProcessor A EidosProcessor containing the extracted INDRA Statements in its statements attribute. """ ep = EidosProcessor(json_dict) ep.extract_causal_relations() ep.extract_correlations() ep.extract_events() return ep
[1][SEP1][Return][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_NAME_LOAD
def _remove_bound_conditions(agent, keep_criterion): """Removes bound conditions of agent such that keep_criterion is False. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate keep_criterion: function Evaluates removal_criterion(a) for each agent a in a bound condition and if it evaluates to False, removes a from agent's bound_conditions """ new_bc = [] for ind in range(len(agent.bound_conditions)): if keep_criterion(agent.bound_conditions[ind].agent): new_bc.append(agent.bound_conditions[ind]) agent.bound_conditions = new_bc
[2][SEP1][None][For][If][None][None][SEP2][1][2,3][4,1][][1][SEP3][0][2][1][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_LIST_LOAD_FOR_NAME_STORE_CALL_NAME_LOAD_CALL_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_IF_CALL_NAME_LOAD_ATTRIBUTE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_NAME_LOAD
def export_kappa_cm(model, fname=None): """Return a networkx graph representing the model's Kappa contact map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa CM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the CM is rendered using pygraphviz. Returns ------- npygraphviz.Agraph A graph object representing the contact map. """ from .kappa_util import cm_json_to_graph kappa = _prepare_kappa(model) cmap = kappa.analyses_contact_map() cm = cm_json_to_graph(cmap) if fname: cm.draw(fname, prog='dot') return cm
[2][SEP1][If][None][Return][SEP2][1,2][2][][SEP3][3][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_CONSTANT_EXPR_CONSTANT_IMPORTFROM_ALIAS_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_KEYWORD_CONSTANT_RETURN_NAME_LOAD
def assemble_pysb(): """Assemble INDRA Statements and return PySB model string.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') export_format = body.get('export_format') stmts = stmts_from_json(stmts_json) pa = PysbAssembler() pa.add_statements(stmts) pa.make_model() try: for m in pa.model.monomers: pysb_assembler.set_extended_initial_condition(pa.model, m, 0) except Exception as e: logger.exception(e) if not export_format: model_str = pa.print_model() elif export_format in ('kappa_im', 'kappa_cm'): fname = 'model_%s.png' % export_format root = os.path.dirname(os.path.abspath(fname)) graph = pa.export_model(format=export_format, file_name=fname) with open(fname, 'rb') as fh: data = 'data:image/png;base64,%s' % \ base64.b64encode(fh.read()).decode() return {'image': data} else: try: model_str = pa.export_model(format=export_format) except Exception as e: logger.exception(e) model_str = '' res = {'model': model_str} return res
[0][SEP1][If][Return][Try][For][None][None][If][None][If][Return][None][Try][Return][None][None][SEP2][1,2][][3,4][5,6][6][3][7,8][9][10,11][][12][13,14][][9][9][SEP3][0][0][9][0][1][1][0][1][0][0][4][0][3][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_EXPR_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_EQ_CONSTANT_RETURN_DICT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_TRY_FOR_NAME_STORE_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CONSTANT_EXCEPTHANDLER_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_IN_TUPLE_CONSTANT_CONSTANT_LOAD_ASSIGN_NAME_STORE_BINOP_CONSTANT_MOD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_KEYWORD_NAME_LOAD_WITH_WITHITEM_CALL_NAME_LOAD_NAME_LOAD_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_BINOP_CONSTANT_MOD_CALL_ATTRIBUTE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_RETURN_DICT_CONSTANT_NAME_LOAD_TRY_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_EXCEPTHANDLER_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_NAME_LOAD_RETURN_NAME_LOAD
def make_model(self, add_indra_json=True): """Assemble the CX network from the collected INDRA Statements. This method assembles a CX network from the set of INDRA Statements. The assembled network is set as the assembler's cx argument. Parameters ---------- add_indra_json : Optional[bool] If True, the INDRA Statement JSON annotation is added to each edge in the network. Default: True Returns ------- cx_str : str The json serialized CX model. """ self.add_indra_json = add_indra_json for stmt in self.statements: if isinstance(stmt, Modification): self._add_modification(stmt) if isinstance(stmt, SelfModification): self._add_self_modification(stmt) elif isinstance(stmt, RegulateActivity) or \ isinstance(stmt, RegulateAmount): self._add_regulation(stmt) elif isinstance(stmt, Complex): self._add_complex(stmt) elif isinstance(stmt, Gef): self._add_gef(stmt) elif isinstance(stmt, Gap): self._add_gap(stmt) elif isinstance(stmt, Influence): self._add_influence(stmt) network_description = '' self.cx['networkAttributes'].append({'n': 'name', 'v': self.network_name}) self.cx['networkAttributes'].append({'n': 'description', 'v': network_description}) cx_str = self.print_cx() return cx_str
[2][SEP1][None][For][If][Return][None][If][None][If][None][If][None][If][None][If][None][If][None][SEP2][1][2,3][4,5][][5][6,7][1][8,9][1][10,11][1][12,13][1][14,15][1][16,1][1][SEP3][0][13][1][3][1][1][1][2][1][1][1][1][1][1][1][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_CONSTANT_EXPR_CONSTANT_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_NAME_LOAD_FOR_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_BOOLOP_OR_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_EXPR_CALL_ATTRIBUTE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_LOAD_DICT_CONSTANT_CONSTANT_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_LOAD_DICT_CONSTANT_CONSTANT_CONSTANT_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_NAME_LOAD
def _get_edge_sign(im, edge): """Get the polarity of the influence by examining the edge sign.""" edge_data = im[edge[0]][edge[1]] # Handle possible multiple edges between nodes signs = list(set([v['sign'] for v in edge_data.values() if v.get('sign')])) if len(signs) > 1: logger.warning("Edge %s has conflicting polarities; choosing " "positive polarity by default" % str(edge)) sign = 1 else: sign = signs[0] if sign is None: raise Exception('No sign attribute for edge.') elif abs(sign) == 1: return sign else: raise Exception('Unexpected edge sign: %s' % edge.attr['sign'])
[2][SEP1][If][None][None][If][None][If][Return][None][SEP2][1,2][3][3][4,5][][6,7][][][SEP3][5][2][0][0][1][1][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_NAME_LOAD_LISTCOMP_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_COMPREHENSION_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_COMPARE_CALL_NAME_LOAD_NAME_LOAD_GT_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_IF_COMPARE_NAME_LOAD_IS_CONSTANT_RAISE_CALL_NAME_LOAD_CONSTANT_IF_COMPARE_CALL_NAME_LOAD_NAME_LOAD_EQ_CONSTANT_RETURN_NAME_LOAD_RAISE_CALL_NAME_LOAD_BINOP_CONSTANT_MOD_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD
def id_lookup(paper_id, idtype=None): """This function takes a Pubmed ID, Pubmed Central ID, or DOI and use the Pubmed ID mapping service and looks up all other IDs from one of these. The IDs are returned in a dictionary.""" if idtype is not None and idtype not in ('pmid', 'pmcid', 'doi'): raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype) if paper_id.upper().startswith('PMC'): idtype = 'pmcid' # Strip off any prefix if paper_id.upper().startswith('PMID'): paper_id = paper_id[4:] elif paper_id.upper().startswith('DOI'): paper_id = paper_id[3:] data = {'ids': paper_id} if idtype is not None: data['idtype'] = idtype try: tree = pubmed_client.send_request(pmid_convert_url, data) except Exception as e: logger.error('Error looking up PMID in PMC: %s' % e) return {} if tree is None: return {} record = tree.find('record') if record is None: return {} doi = record.attrib.get('doi') pmid = record.attrib.get('pmid') pmcid = record.attrib.get('pmcid') ids = {'doi': doi, 'pmid': pmid, 'pmcid': pmcid} return ids
[2][SEP1][If][None][If][None][If][None][If][If][None][None][Try][None][Return][If][Return][If][Return][Return][SEP2][1,2][][3,4][4][5,6][7][8,7][9,10][7][10][11,12][13][][14,15][][16,17][][][SEP3][0][1][2][0][2][0][2][0][0][0][0][1][1][0][0][1][0][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_CONSTANT_EXPR_CONSTANT_IF_BOOLOP_AND_COMPARE_NAME_LOAD_ISNOT_CONSTANT_COMPARE_NAME_LOAD_NOTIN_TUPLE_CONSTANT_CONSTANT_CONSTANT_LOAD_RAISE_CALL_NAME_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_IF_CALL_ATTRIBUTE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_IF_CALL_ATTRIBUTE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_SLICE_CONSTANT_LOAD_IF_CALL_ATTRIBUTE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_SLICE_CONSTANT_LOAD_ASSIGN_NAME_STORE_DICT_CONSTANT_NAME_LOAD_IF_COMPARE_NAME_LOAD_ISNOT_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_TRY_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_EXCEPTHANDLER_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_RETURN_DICT_IF_COMPARE_NAME_LOAD_IS_CONSTANT_RETURN_DICT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_IF_COMPARE_NAME_LOAD_IS_CONSTANT_RETURN_DICT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_NAME_LOAD_NAME_LOAD_NAME_LOAD_RETURN_NAME_LOAD
def _make_sentence(txt): """Make a sentence from a piece of text.""" #Make sure first letter is capitalized txt = txt.strip(' ') txt = txt[0].upper() + txt[1:] + '.' return txt
[1][SEP1][Return][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ASSIGN_NAME_STORE_BINOP_BINOP_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_ADD_SUBSCRIPT_NAME_LOAD_SLICE_CONSTANT_LOAD_ADD_CONSTANT_RETURN_NAME_LOAD
def join_json_files(prefix): """Join different REACH output JSON files into a single JSON object. The output of REACH is broken into three files that need to be joined before processing. Specifically, there will be three files of the form: `<prefix>.uaz.<subcategory>.json`. Parameters ---------- prefix : str The absolute path up to the extensions that reach will add. Returns ------- json_obj : dict The result of joining the files, keyed by the three subcategories. """ try: with open(prefix + '.uaz.entities.json', 'rt') as f: entities = json.load(f) with open(prefix + '.uaz.events.json', 'rt') as f: events = json.load(f) with open(prefix + '.uaz.sentences.json', 'rt') as f: sentences = json.load(f) except IOError as e: logger.error( 'Failed to open JSON files for %s; REACH error?' % prefix ) logger.exception(e) return None return {'events': events, 'entities': entities, 'sentences': sentences}
[1][SEP1][Try][None][Return][None][None][None][None][None][Return][SEP2][1,2][3][][4][5][6][7][8][][SEP3][4][1][2][1][1][1][1][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_TRY_WITH_WITHITEM_CALL_NAME_LOAD_BINOP_NAME_LOAD_ADD_CONSTANT_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_WITH_WITHITEM_CALL_NAME_LOAD_BINOP_NAME_LOAD_ADD_CONSTANT_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_WITH_WITHITEM_CALL_NAME_LOAD_BINOP_NAME_LOAD_ADD_CONSTANT_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXCEPTHANDLER_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_RETURN_CONSTANT_RETURN_DICT_CONSTANT_CONSTANT_CONSTANT_NAME_LOAD_NAME_LOAD_NAME_LOAD
def _make_concept(self, entity): """Return Concept from a Hume entity.""" # Use the canonical name as the name of the Concept by default name = self._sanitize(entity['canonicalName']) # But if there is a trigger head text, we prefer that since # it almost always results in a cleaner name # This is removed for now since the head word seems to be too # minimal for some concepts, e.g. it gives us only "security" # for "food security". """ trigger = entity.get('trigger') if trigger is not None: head_text = trigger.get('head text') if head_text is not None: name = head_text """ # Save raw text and Hume scored groundings as db_refs db_refs = _get_grounding(entity) concept = Concept(name, db_refs=db_refs) metadata = {arg['type']: arg['value']['@id'] for arg in entity['arguments']} return concept, metadata
[2][SEP1][Return][SEP2][][SEP3][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_KEYWORD_NAME_LOAD_ASSIGN_NAME_STORE_DICTCOMP_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CONSTANT_LOAD_COMPREHENSION_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_RETURN_TUPLE_NAME_LOAD_NAME_LOAD_LOAD
def preprocess_plain_text_file(self, filename, pmid, extra_annotations): """Preprocess a plain text file for use with ISI reder. Preprocessing results in a new text file with one sentence per line. Parameters ---------- filename : str The name of the plain text file pmid : str The PMID from which it comes, or None if not specified extra_annotations : dict Extra annotations to be added to each statement, possibly including metadata about the source (annotations with the key "interaction" will be overridden) """ with codecs.open(filename, 'r', encoding='utf-8') as f: content = f.read() self.preprocess_plain_text_string(content, pmid, extra_annotations)
[4][SEP1][None][None][SEP2][1][][SEP3][1][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_EXPR_CONSTANT_WITH_WITHITEM_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CONSTANT_KEYWORD_CONSTANT_NAME_STORE_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD
def comment_update(self, comment_id, body): """Function to update a comment (Requires login). Parameters: comment_id (int): body (str): """ params = {'comment[body]': body} return self._get('comments/{0}.json'.format(comment_id), params, 'PUT', auth=True)
[3][SEP1][Return][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_NAME_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_NAME_LOAD_CONSTANT_KEYWORD_CONSTANT
def note_list(self, body_matches=None, post_id=None, post_tags_match=None, creator_name=None, creator_id=None, is_active=None): """Return list of notes. Parameters: body_matches (str): The note's body matches the given terms. post_id (int): A specific post. post_tags_match (str): The note's post's tags match the given terms. creator_name (str): The creator's name. Exact match. creator_id (int): The creator's user id. is_active (bool): Can be: True, False. """ params = { 'search[body_matches]': body_matches, 'search[post_id]': post_id, 'search[post_tags_match]': post_tags_match, 'search[creator_name]': creator_name, 'search[creator_id]': creator_id, 'search[is_active]': is_active } return self._get('notes.json', params)
[7][SEP1][Return][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD
def wiki_create(self, title, body, other_names=None): """Action to lets you create a wiki page (Requires login) (UNTESTED). Parameters: title (str): Page title. body (str): Page content. other_names (str): Other names. """ params = { 'wiki_page[title]': title, 'wiki_page[body]': body, 'wiki_page[other_names]': other_names } return self._get('wiki_pages.json', params, method='POST', auth=True)
[4][SEP1][Return][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_NAME_LOAD_NAME_LOAD_NAME_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_KEYWORD_CONSTANT_KEYWORD_CONSTANT
def artist_create(self, name, urls=None, alias=None, group=None): """Function to create an artist (Requires login) (UNTESTED). Parameters: name (str): The artist's name. urls (str): A list of URLs associated with the artist, whitespace delimited. alias (str): The artist that this artist is an alias for. Simply enter the alias artist's name. group (str): The group or cicle that this artist is a member of. Simply:param enter the group's name. """ params = { 'artist[name]': name, 'artist[urls]': urls, 'artist[alias]': alias, 'artist[group]': group } return self._get('artist/create', params, method='POST')
[5][SEP1][Return][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_KEYWORD_CONSTANT
def user_can_edit_news(user): """ Check if the user has permission to edit any of the registered NewsItem types. """ newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
[1][SEP1][If][Return][For][For][Return][If][Return][SEP2][1,2][][3,4][5,2][][6,3][][SEP3][1][1][0][1][0][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_LISTCOMP_CALL_ATTRIBUTE_NAME_LOAD_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_IF_BOOLOP_AND_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CALL_NAME_LOAD_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_FOR_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_LIST_CONSTANT_CONSTANT_CONSTANT_LOAD_IF_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_RETURN_CONSTANT_RETURN_CONSTANT
def datetime_match(data, dts): """ matching of datetimes in time columns for data filtering """ dts = dts if islistable(dts) else [dts] if any([not isinstance(i, datetime.datetime) for i in dts]): error_msg = ( "`time` can only be filtered by datetimes" ) raise TypeError(error_msg) return data.isin(dts)
[2][SEP1][If][None][Return][SEP2][1,2][][][SEP3][3][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_IFEXP_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_LIST_NAME_LOAD_LOAD_IF_CALL_NAME_LOAD_LISTCOMP_UNARYOP_NOT_CALL_NAME_LOAD_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CONSTANT_RAISE_CALL_NAME_LOAD_NAME_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def update(self, rc): """Add additional run control parameters Parameters ---------- rc : string, file, dictionary, optional a path to a YAML file, a file handle for a YAML file, or a dictionary describing run control configuration """ rc = self._load_yaml(rc) self.store = _recursive_update(self.store, rc)
[2][SEP1][None][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_CALL_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def mutate(self): """ Upgrades this Instance to the latest generation type """ self._client.post('{}/mutate'.format(Instance.api_endpoint), model=self) return True
[1][SEP1][Return][SEP2][][SEP3][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_RETURN_CONSTANT
def events_mark_seen(self, event): """ Marks event as the last event we have seen. If event is an int, it is treated as an event_id, otherwise it should be an event object whose id will be used. """ last_seen = event if isinstance(event, int) else event.id self.client.post('{}/seen'.format(Event.api_endpoint), model=Event(self.client, last_seen))
[2][SEP1][None][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_IFEXP_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CALL_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def iter_chain(cur): """Iterate over all of the chains in the database. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. Yields: list: The chain. """ select = "SELECT nodes FROM chain" for nodes, in cur.execute(select): yield json.loads(nodes)
[1][SEP1][None][For][None][SEP2][1][2][1][SEP3][0][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_FOR_TUPLE_NAME_STORE_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_YIELD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def _contains_line(self, line): """Test if a chain of qubits is completely contained in ``self``. In particular, test if all qubits are present and the couplers connecting those qubits are also connected. NOTE: this function assumes that ``line`` is a list or tuple of qubits which satisfies the precondition that ``(line[i],line[i+1])`` is supposed to be a coupler for all ``i``. INPUTS: line: a list of qubits satisfying the above precondition OUTPUT: boolean """ return all(v in self for v in line) and all(u in self[v] for u, v in zip(line, line[1::]))
[2][SEP1][Return][SEP2][][SEP3][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_RETURN_BOOLOP_AND_CALL_NAME_LOAD_GENERATOREXP_COMPARE_NAME_LOAD_IN_NAME_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_CALL_NAME_LOAD_GENERATOREXP_COMPARE_NAME_LOAD_IN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_LOAD_COMPREHENSION_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_NAME_LOAD_NAME_LOAD_SUBSCRIPT_NAME_LOAD_SLICE_CONSTANT_LOAD
def __related_categories(self, category_id): """ Get all related categories to a given one """ related = [] for cat in self.categories_tree: if category_id in self.categories_tree[cat]: related.append(self.categories[cat]) return related
[2][SEP1][None][For][If][Return][None][SEP2][1][2,3][4,1][][1][SEP3][0][0][0][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_LIST_LOAD_FOR_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_IN_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_RETURN_NAME_LOAD
def get_identities(self, item): """ Return the identities from an item """ data = item['data'] if 'assigned_to' in data: user = self.get_sh_identity(data, 'assigned_to') yield user author = self.get_sh_identity(data, 'author') yield author
[2][SEP1][If][None][None][SEP2][1,2][2][][SEP3][0][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_IF_COMPARE_CONSTANT_IN_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CONSTANT_EXPR_YIELD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CONSTANT_EXPR_YIELD_NAME_LOAD
def delete_items(self, retention_time, time_field="metadata__updated_on"): """Delete documents updated before a given date :param retention_time: maximum number of minutes wrt the current date to retain the data :param time_field: time field to delete the data """ if retention_time is None: logger.debug("[items retention] Retention policy disabled, no items will be deleted.") return if retention_time <= 0: logger.debug("[items retention] Minutes to retain must be greater than 0.") return before_date = get_diff_current_date(minutes=retention_time) before_date_str = before_date.isoformat() es_query = ''' { "query": { "range": { "%s": { "lte": "%s" } } } } ''' % (time_field, before_date_str) r = self.requests.post(self.index_url + "/_delete_by_query?refresh", data=es_query, headers=HEADER_JSON, verify=False) try: r.raise_for_status() r_json = r.json() logger.debug("[items retention] %s items deleted from %s before %s.", r_json['deleted'], self.anonymize_url(self.index_url), before_date) except requests.exceptions.HTTPError as ex: logger.error("[items retention] Error deleted items from %s.", self.anonymize_url(self.index_url)) logger.error(ex) return
[3][SEP1][If][Return][If][Return][Try][None][Return][SEP2][1,2][][3,4][][5,6][][][SEP3][0][1][0][1][3][4][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_CONSTANT_EXPR_CONSTANT_IF_COMPARE_NAME_LOAD_IS_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_RETURN_IF_COMPARE_NAME_LOAD_LTE_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_RETURN_ASSIGN_NAME_STORE_CALL_NAME_LOAD_KEYWORD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_BINOP_CONSTANT_MOD_TUPLE_NAME_LOAD_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_BINOP_ATTRIBUTE_NAME_LOAD_LOAD_ADD_CONSTANT_KEYWORD_NAME_LOAD_KEYWORD_NAME_LOAD_KEYWORD_CONSTANT_TRY_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXCEPTHANDLER_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_RETURN
def get_params_parser_create_dash(): """Parse command line arguments""" parser = argparse.ArgumentParser(usage="usage: e2k.py [options]", description="Create a Kibana dashboard from a template") ElasticOcean.add_params(parser) parser.add_argument("-d", "--dashboard", help="dashboard to be used as template") parser.add_argument("-i", "--index", help="enriched index to be used as data source") parser.add_argument("--kibana", dest="kibana_index", default=".kibana", help="Kibana index name (.kibana default)") parser.add_argument('-g', '--debug', dest='debug', action='store_true') return parser
[0][SEP1][Return][SEP2][][SEP3][6]
MODULE_FUNCTIONDEF_ARGUMENTS_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CONSTANT_KEYWORD_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_KEYWORD_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_KEYWORD_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_KEYWORD_CONSTANT_KEYWORD_CONSTANT_KEYWORD_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_KEYWORD_CONSTANT_KEYWORD_CONSTANT_RETURN_NAME_LOAD
def get_domain(self, identity): """ Get the domain from a SH identity """ domain = None if identity['email']: try: domain = identity['email'].split("@")[1] except IndexError: # logger.warning("Bad email format: %s" % (identity['email'])) pass return domain
[2][SEP1][If][Try][Return][None][None][SEP2][1,2][3,4][][2][2][SEP3][0][0][0][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_IF_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_TRY_ASSIGN_NAME_STORE_SUBSCRIPT_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_CONSTANT_CONSTANT_LOAD_EXCEPTHANDLER_NAME_LOAD_PASS_RETURN_NAME_LOAD
def _fix_review_dates(self, item): """Convert dates so ES detect them""" for date_field in ['timestamp', 'createdOn', 'lastUpdated']: if date_field in item.keys(): date_ts = item[date_field] item[date_field] = unixtime_to_datetime(date_ts).isoformat() if 'patchSets' in item.keys(): for patch in item['patchSets']: pdate_ts = patch['createdOn'] patch['createdOn'] = unixtime_to_datetime(pdate_ts).isoformat() if 'approvals' in patch: for approval in patch['approvals']: adate_ts = approval['grantedOn'] approval['grantedOn'] = unixtime_to_datetime(adate_ts).isoformat() if 'comments' in item.keys(): for comment in item['comments']: cdate_ts = comment['timestamp'] comment['timestamp'] = unixtime_to_datetime(cdate_ts).isoformat()
[2][SEP1][None][For][If][If][None][For][If][If][For][For][None][None][SEP2][1][2,3][4,1][5,6][1][7,6][8][9,5][10][11,5][8][9][SEP3][0][0][1][1][2][0][1][2][0][0][2][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_FOR_NAME_STORE_LIST_CONSTANT_CONSTANT_CONSTANT_LOAD_IF_COMPARE_NAME_LOAD_IN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_STORE_CALL_ATTRIBUTE_CALL_NAME_LOAD_NAME_LOAD_LOAD_IF_COMPARE_CONSTANT_IN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_FOR_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_ATTRIBUTE_CALL_NAME_LOAD_NAME_LOAD_LOAD_IF_COMPARE_CONSTANT_IN_NAME_LOAD_FOR_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_ATTRIBUTE_CALL_NAME_LOAD_NAME_LOAD_LOAD_IF_COMPARE_CONSTANT_IN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_FOR_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_ATTRIBUTE_CALL_NAME_LOAD_NAME_LOAD_LOAD
def update_items(self, ocean_backend, enrich_backend): """Retrieve the commits not present in the original repository and delete the corresponding documents from the raw and enriched indexes""" fltr = { 'name': 'origin', 'value': [self.perceval_backend.origin] } logger.debug("[update-items] Checking commits for %s.", self.perceval_backend.origin) git_repo = GitRepository(self.perceval_backend.uri, self.perceval_backend.gitpath) try: current_hashes = set([commit for commit in git_repo.rev_list()]) except Exception as e: logger.error("Skip updating branch info for repo %s, git rev-list command failed: %s", git_repo.uri, e) return raw_hashes = set([item['data']['commit'] for item in ocean_backend.fetch(ignore_incremental=True, _filter=fltr)]) hashes_to_delete = list(raw_hashes.difference(current_hashes)) to_process = [] for _hash in hashes_to_delete: to_process.append(_hash) if len(to_process) != MAX_BULK_UPDATE_SIZE: continue # delete documents from the raw index self.remove_commits(to_process, ocean_backend.elastic.index_url, 'data.commit', self.perceval_backend.origin) # delete documents from the enriched index self.remove_commits(to_process, enrich_backend.elastic.index_url, 'hash', self.perceval_backend.origin) to_process = [] if to_process: # delete documents from the raw index self.remove_commits(to_process, ocean_backend.elastic.index_url, 'data.commit', self.perceval_backend.origin) # delete documents from the enriched index self.remove_commits(to_process, enrich_backend.elastic.index_url, 'hash', self.perceval_backend.origin) logger.debug("[update-items] %s commits deleted from %s with origin %s.", len(hashes_to_delete), ocean_backend.elastic.anonymize_url(ocean_backend.elastic.index_url), self.perceval_backend.origin) logger.debug("[update-items] %s commits deleted from %s with origin %s.", len(hashes_to_delete), enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url), self.perceval_backend.origin) # update branch info self.delete_commit_branches(enrich_backend) self.add_commit_branches(git_repo, enrich_backend)
[3][SEP1][Try][None][Return][None][For][If][If][None][None][None][SEP2][1,2][3][][4][5,6][7,4][8,9][4][9][][SEP3][2][2][1][4][2][2][0][2][2][8]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_LIST_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_TRY_ASSIGN_NAME_STORE_CALL_NAME_LOAD_LISTCOMP_NAME_LOAD_COMPREHENSION_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_EXCEPTHANDLER_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_RETURN_ASSIGN_NAME_STORE_CALL_NAME_LOAD_LISTCOMP_SUBSCRIPT_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_CONSTANT_LOAD_COMPREHENSION_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CONSTANT_KEYWORD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_FOR_NAME_STORE_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_COMPARE_CALL_NAME_LOAD_NAME_LOAD_NOTEQ_NAME_LOAD_CONTINUE_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_IF_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CALL_NAME_LOAD_NAME_LOAD_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CALL_NAME_LOAD_NAME_LOAD_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_NAME_LOAD
def _serialize_fields(point): """Field values can be floats, integers, strings, or Booleans.""" output = [] for k, v in point['fields'].items(): k = escape(k, key_escape) if isinstance(v, bool): output.append(f'{k}={v}') elif isinstance(v, int): output.append(f'{k}={v}i') elif isinstance(v, str): output.append(f'{k}="{v.translate(str_escape)}"') elif v is None: # Empty values continue else: # Floats output.append(f'{k}={v}') return ','.join(output)
[1][SEP1][None][For][If][Return][None][If][None][If][None][If][None][SEP2][1][2,3][4,5][][1][6,7][1][8,9][1][10,1][1][SEP3][0][1][2][1][1][1][1][1][2][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_LIST_LOAD_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_JOINEDSTR_FORMATTEDVALUE_NAME_LOAD_CONSTANT_FORMATTEDVALUE_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_JOINEDSTR_FORMATTEDVALUE_NAME_LOAD_CONSTANT_FORMATTEDVALUE_NAME_LOAD_CONSTANT_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_JOINEDSTR_FORMATTEDVALUE_NAME_LOAD_CONSTANT_FORMATTEDVALUE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CONSTANT_IF_COMPARE_NAME_LOAD_IS_CONSTANT_CONTINUE_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_JOINEDSTR_FORMATTEDVALUE_NAME_LOAD_CONSTANT_FORMATTEDVALUE_NAME_LOAD_RETURN_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD
def user_login(self, email=None, password=None): """Login with email, password and get back a session cookie :type email: str :param email: The email used for authentication :type password: str :param password: The password used for authentication """ email = six.moves.input("Email: ") if email is None else email password = getpass.getpass() if password is None else password login_data = { "method": "user.login", "params": {"email": email, "pass": password} } # If the user/password match, the server respond will contain a # session cookie that you can use to authenticate future requests. r = self.session.post( self.base_api_urls["logic"], data=json.dumps(login_data), ) if r.json()["result"] not in ["OK"]: raise AuthenticationError("Could not authenticate.\n{}" .format(r.json()))
[3][SEP1][If][None][SEP2][1][][SEP3][5][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_CONSTANT_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_IFEXP_COMPARE_NAME_LOAD_IS_CONSTANT_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT_NAME_LOAD_ASSIGN_NAME_STORE_IFEXP_COMPARE_NAME_LOAD_IS_CONSTANT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_CONSTANT_DICT_CONSTANT_CONSTANT_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_KEYWORD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_COMPARE_SUBSCRIPT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_NOTIN_LIST_CONSTANT_LOAD_RAISE_CALL_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD
def resolve_post(self, post): """Mark post as resolved :type post: dict|str|int :param post: Either the post dict returned by another API method, or the `cid` field of that post. :returns: True if it is successful. False otherwise """ try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "resolved": "true" } return self._rpc.content_mark_resolved(params)
[2][SEP1][Try][None][None][Return][SEP2][1,2][3][3][][SEP3][0][0][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_TRY_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_EXCEPTHANDLER_NAME_LOAD_ASSIGN_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_NAME_LOAD_CONSTANT_RETURN_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD
def metric_names(self, id, name=None, page=None): """ Return a list of known metrics and their value names for the given resource. :type id: int :param id: Server ID :type name: str :param name: Filter metrics by name :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "metrics": [ { "name": "string", "values": [ "string" ] } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/servers/{server_id}/metrics.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/servers/{server_id}/metrics.json?page=2", "rel": "next" } } } """ params = [ 'name={0}'.format(name) if name else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}servers/{1}/metrics.json'.format(self.URL, id), headers=self.headers, params=self.build_param_string(params) )
[4][SEP1][Return][SEP2][][SEP3][5]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_LIST_IFEXP_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_CONSTANT_IFEXP_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_CONSTANT_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CALL_ATTRIBUTE_CONSTANT_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_KEYWORD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def list( self, application_id, filter_hostname=None, filter_ids=None, page=None): """ This API endpoint returns a paginated list of instances associated with the given application. Application instances can be filtered by hostname, or the list of application instance IDs. :type application_id: int :param application_id: Application ID :type filter_hostname: str :param filter_hostname: Filter by server hostname :type filter_ids: list of ints :param filter_ids: Filter by application instance ids :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "application_instances": [ { "id": "integer", "application_name": "string", "host": "string", "port": "integer", "language": "integer", "health_status": "string", "application_summary": { "response_time": "float", "throughput": "float", "error_rate": "float", "apdex_score": "float" }, "end_user_summary": { "response_time": "float", "throughput": "float", "apdex_score": "float" }, "links": { "application": "integer", "application_host": "integer", "server": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/applications/{application_id}/instances.json?page=2", "rel": "next" } } } """ filters = [ 'filter[hostname]={0}'.format(filter_hostname) if filter_hostname else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'page={0}'.format(page) if page else None ] return self._get( url='{root}applications/{application_id}/instances.json'.format( root=self.URL, application_id=application_id ), headers=self.headers, params=self.build_param_string(filters) )
[5][SEP1][Return][SEP2][][SEP3][8]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_LIST_IFEXP_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_CONSTANT_IFEXP_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_LISTCOMP_CALL_NAME_LOAD_NAME_LOAD_COMPREHENSION_NAME_STORE_NAME_LOAD_CONSTANT_IFEXP_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_CONSTANT_LOAD_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CALL_ATTRIBUTE_CONSTANT_LOAD_KEYWORD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_NAME_LOAD_KEYWORD_ATTRIBUTE_NAME_LOAD_LOAD_KEYWORD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def makeNodeTuple(citation, idVal, nodeInfo, fullInfo, nodeType, count, coreCitesDict, coreValues, detailedValues, addCR): """Makes a tuple of idVal and a dict of the selected attributes""" d = {} if nodeInfo: if nodeType == 'full': if coreValues: if citation in coreCitesDict: R = coreCitesDict[citation] d['MK-ID'] = R.id if not detailedValues: infoVals = [] for tag in coreValues: tagVal = R.get(tag) if isinstance(tagVal, str): infoVals.append(tagVal.replace(',','')) elif isinstance(tagVal, list): infoVals.append(tagVal[0].replace(',','')) else: pass d['info'] = ', '.join(infoVals) else: for tag in coreValues: v = R.get(tag, None) if isinstance(v, list): d[tag] = '|'.join(sorted(v)) else: d[tag] = v d['inCore'] = True if addCR: d['citations'] = '|'.join((str(c) for c in R.get('citations', []))) else: d['MK-ID'] = 'None' d['info'] = citation.allButDOI() d['inCore'] = False if addCR: d['citations'] = '' else: d['info'] = citation.allButDOI() elif nodeType == 'journal': if citation.isJournal(): d['info'] = str(citation.FullJournalName()) else: d['info'] = "None" elif nodeType == 'original': d['info'] = str(citation) else: d['info'] = idVal if fullInfo: d['fullCite'] = str(citation) if count: d['count'] = 1 return (idVal, d)
[10][SEP1][If][If][If][If][If][None][If][If][None][If][If][None][Return][If][If][None][None][None][None][None][For][None][For][If][If][If][None][None][None][None][None][If][None][None][SEP2][1,2][3,4][5,6][7,8][9,10][6][11,12][13,14][2][15,16][17,18][12][][19,20][21,2][2][2][2][2][22][23,24][2][25,26][27,28][29,2][30,31][24][20][20][2][22][32,33][22][22][SEP3][0][0][0][0][0][1][0][3][1][1][0][0][0][8][1][2][0][1][0][0][0][0][0][2][0][2][1][2][0][3][2][1][2][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_ARG_ARG_ARG_ARG_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_IF_NAME_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_IF_NAME_LOAD_IF_COMPARE_NAME_LOAD_IN_NAME_LOAD_ASSIGN_NAME_STORE_SUBSCRIPT_NAME_LOAD_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_ATTRIBUTE_NAME_LOAD_LOAD_IF_UNARYOP_NOT_NAME_LOAD_ASSIGN_NAME_STORE_LIST_LOAD_FOR_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_SUBSCRIPT_NAME_LOAD_CONSTANT_LOAD_LOAD_CONSTANT_CONSTANT_PASS_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_CONSTANT_IF_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_STORE_CALL_ATTRIBUTE_CONSTANT_LOAD_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_STORE_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_IF_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_ATTRIBUTE_CONSTANT_LOAD_GENERATOREXP_CALL_NAME_LOAD_NAME_LOAD_COMPREHENSION_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LIST_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_IF_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_IF_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_NAME_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_IF_COMPARE_NAME_LOAD_EQ_CONSTANT_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_IF_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CALL_NAME_LOAD_NAME_LOAD_IF_NAME_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_CONSTANT_RETURN_TUPLE_NAME_LOAD_NAME_LOAD_LOAD
def removeID(self, idVal): """Checks if the collected items contains the give _idVal_ and removes it if it is found, will raise a `KeyError` if item is not found # Parameters _idVal_ : `str` > The removed id string """ for i in self: if i.id == idVal: self._collection.remove(i) return raise KeyError("A Record with the ID '{}' was not found in the RecordCollection: '{}'.".format(idVal, self))
[2][SEP1][None][For][If][None][Return][SEP2][1][2,3][4,1][][][SEP3][0][0][0][2][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_FOR_NAME_STORE_NAME_LOAD_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_EQ_NAME_LOAD_EXPR_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_RETURN_RAISE_CALL_NAME_LOAD_CALL_ATTRIBUTE_CONSTANT_LOAD_NAME_LOAD_NAME_LOAD
def j9urlGenerator(nameDict = False): """How to get all the urls for the WOS Journal Title Abbreviations. Each is varies by only a few characters. These are the currently in use urls they may change. They are of the form: > "https://images.webofknowledge.com/images/help/WOS/{VAL}_abrvjt.html" > Where {VAL} is a capital letter or the string "0-9" # Returns `list[str]` > A list of all the url's strings """ start = "https://images.webofknowledge.com/images/help/WOS/" end = "_abrvjt.html" if nameDict: urls = {"0-9" : start + "0-9" + end} for c in string.ascii_uppercase: urls[c] = start + c + end else: urls = [start + "0-9" + end] for c in string.ascii_uppercase: urls.append(start + c + end) return urls
[1][SEP1][If][None][None][For][For][None][Return][None][SEP2][1,2][3][4][5,6][7,6][3][][4][SEP3][1][0][0][0][0][0][0][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_ASSIGN_NAME_STORE_CONSTANT_IF_NAME_LOAD_ASSIGN_NAME_STORE_DICT_CONSTANT_BINOP_BINOP_NAME_LOAD_ADD_CONSTANT_ADD_NAME_LOAD_FOR_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_STORE_BINOP_BINOP_NAME_LOAD_ADD_NAME_LOAD_ADD_NAME_LOAD_ASSIGN_NAME_STORE_LIST_BINOP_BINOP_NAME_LOAD_ADD_CONSTANT_ADD_NAME_LOAD_LOAD_FOR_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_BINOP_NAME_LOAD_ADD_NAME_LOAD_ADD_NAME_LOAD_RETURN_NAME_LOAD
def _filter_pending_updates(self): """ Return all the updates that need to be applied, from a list of all the updates that were called while the hold was active. This method is meant to be overridden by subclasses that want to customize how held updates are applied. The `self._pending_updates` member variable is a list containing a (method, args, kwargs) tuple for each update that was called while updates were being held. This list is in the order that the updates were actually called, and any updates that were called more than once will appear in this list more than once. This method should yield or return an list of the tuples in the same format representing the updates that should be applied, in the order they should be applied. The default implementation filters out duplicate updates without changing their order. In cases where it matters, the last call to each update is used to determine the order. """ from more_itertools import unique_everseen as unique yield from reversed(list(unique(reversed(self._pending_updates))))
[1][SEP1][None][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_IMPORTFROM_ALIAS_EXPR_YIELDFROM_CALL_NAME_LOAD_CALL_NAME_LOAD_CALL_NAME_LOAD_CALL_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD
def cli_main(pid, include_greenlet, debugger, verbose): '''Print stack of python process. $ pystack <pid> ''' try: print_stack(pid, include_greenlet, debugger, verbose) except DebuggerNotFound as e: click.echo('DebuggerNotFound: %s' % e.args[0], err=True) click.get_current_context().exit(1)
[4][SEP1][Try][None][None][SEP2][1,2][][][SEP3][0][1][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_EXPR_CONSTANT_TRY_EXPR_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD_EXCEPTHANDLER_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_BINOP_CONSTANT_MOD_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LOAD_KEYWORD_CONSTANT_EXPR_CALL_ATTRIBUTE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_CONSTANT
def validate_email(addr): """Validate an email address. This function raises ``ValueError`` if the email address is not valid. >>> validate_email('foo@bar.com') 'foo@bar.com' >>> validate_email('foo@bar com') Traceback (most recent call last): ... ValueError: Invalid domain: bar com """ if '@' not in addr: raise ValueError('Invalid email address: %s' % addr) node, domain = addr.split('@', 1) try: domain = idna.encode(force_text(domain)) except idna.core.IDNAError: raise ValueError('Invalid domain: %s' % domain) return '%s@%s' % (node, force_text(domain))
[1][SEP1][If][None][Try][None][None][Return][SEP2][1,2][][3,4][5][][][SEP3][0][1][1][2][1][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_IF_COMPARE_CONSTANT_NOTIN_NAME_LOAD_RAISE_CALL_NAME_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_ASSIGN_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_TRY_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_NAME_LOAD_NAME_LOAD_EXCEPTHANDLER_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_RAISE_CALL_NAME_LOAD_BINOP_CONSTANT_MOD_NAME_LOAD_RETURN_BINOP_CONSTANT_MOD_TUPLE_NAME_LOAD_CALL_NAME_LOAD_NAME_LOAD_LOAD
def bundle(self): """A list of any parent CAs, including this CA. The list is ordered so the Root CA will be the first. """ ca = self bundle = [ca] while ca.parent is not None: bundle.append(ca.parent) ca = ca.parent return bundle
[1][SEP1][None][While][None][Return][SEP2][1][2,3][1][][SEP3][0][0][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_NAME_LOAD_ASSIGN_NAME_STORE_LIST_NAME_LOAD_LOAD_WHILE_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_ISNOT_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_NAME_LOAD
def include_theme_files(self, fragment): """ Gets theme configuration and renders theme css into fragment """ theme = self.get_theme() if not theme or 'package' not in theme: return theme_package, theme_files = theme.get('package', None), theme.get('locations', []) resource_loader = ResourceLoader(theme_package) for theme_file in theme_files: fragment.add_css(resource_loader.load_unicode(theme_file))
[2][SEP1][If][Return][None][For][None][SEP2][1,2][][3][4][3][SEP3][1][0][3][0][2]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_IF_BOOLOP_OR_UNARYOP_NOT_NAME_LOAD_COMPARE_CONSTANT_NOTIN_NAME_LOAD_RETURN_ASSIGN_TUPLE_NAME_STORE_NAME_STORE_STORE_TUPLE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_LIST_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD
def is_requirement(line): """ Return True if the requirement line is a package requirement; that is, it is not blank, a comment, a URL, or an included file. """ return not ( line == '' or line.startswith('-r') or line.startswith('#') or line.startswith('-e') or line.startswith('git+') )
[1][SEP1][Return][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_RETURN_UNARYOP_NOT_BOOLOP_OR_COMPARE_NAME_LOAD_EQ_CONSTANT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT
def _find_content(self, cont_name): """Search for a content_name in the content data, if not found the parent is searched.""" try: a = self.content_data[cont_name] return a except KeyError: if self.parent: return self.parent._find_content(cont_name) else: # Fallback for no content (Raise NoContent?) return ""
[2][SEP1][Try][Return][If][Return][Return][SEP2][1,2][][3,4][][][SEP3][0][0][0][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_TRY_ASSIGN_NAME_STORE_SUBSCRIPT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_LOAD_RETURN_NAME_LOAD_EXCEPTHANDLER_NAME_LOAD_IF_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_RETURN_CONSTANT
def read_config(name): """ 配置读取 """ name = name.lower() conf_list = _read_config_list() for conf in conf_list: if conf.startswith(name): return conf.split('=')[1].split('#')[0].strip() return None
[1][SEP1][None][For][If][Return][Return][SEP2][1][2,3][4,1][][][SEP3][2][0][1][0][3]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_NAME_LOAD_FOR_NAME_STORE_NAME_LOAD_IF_CALL_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_RETURN_CALL_ATTRIBUTE_SUBSCRIPT_CALL_ATTRIBUTE_SUBSCRIPT_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_CONSTANT_LOAD_LOAD_CONSTANT_CONSTANT_LOAD_LOAD_RETURN_CONSTANT
def _dict_of_results(self): """ Get the dictionary representation of results :return: dict (str -> dict (str -> str)) """ result_json = {} result_list = [] for r in self.results: result_list.append({ 'name': r.check_name, 'ok': r.ok, 'status': r.status, 'description': r.description, 'message': r.message, 'reference_url': r.reference_url, 'logs': r.logs, }) result_json["checks"] = result_list return result_json
[1][SEP1][None][For][None][Return][SEP2][1][2,3][1][][SEP3][0][0][1][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_ASSIGN_NAME_STORE_LIST_LOAD_FOR_NAME_STORE_ATTRIBUTE_NAME_LOAD_LOAD_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_DICT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_CONSTANT_STORE_NAME_LOAD_RETURN_NAME_LOAD
def ostree_path(self): """ ostree repository -- content """ if self._ostree_path is None: self._ostree_path = os.path.join(self.tmpdir, "ostree-repo") subprocess.check_call(["ostree", "init", "--mode", "bare-user-only", "--repo", self._ostree_path]) return self._ostree_path
[1][SEP1][If][None][Return][SEP2][1,2][2][][SEP3][0][2][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_IF_COMPARE_ATTRIBUTE_NAME_LOAD_LOAD_IS_CONSTANT_ASSIGN_ATTRIBUTE_NAME_LOAD_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_LIST_CONSTANT_CONSTANT_CONSTANT_CONSTANT_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_RETURN_ATTRIBUTE_NAME_LOAD_LOAD
def quick_add(self, text, note=None, reminder=None): """Add a task using the 'Quick Add Task' syntax. :param text: The text of the task that is parsed. A project name starts with the `#` character, a label starts with a `@` and an assignee starts with a `+`. :type text: str :param note: The content of the note. :type note: str :param reminder: The date of the reminder, added in free form text. :type reminder: str :return: The added task. :rtype: :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> task = user.quick_add('Install Pytodoist #personal @app') >>> print(task.content) Install PyTodoist """ response = API.quick_add(self.api_token, text, note=note, reminder=reminder) _fail_if_contains_errors(response) task_json = response.json() return Task(task_json, self)
[4][SEP1][Return][SEP2][][SEP3][4]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_KEYWORD_NAME_LOAD_KEYWORD_NAME_LOAD_EXPR_CALL_NAME_LOAD_NAME_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_RETURN_CALL_NAME_LOAD_NAME_LOAD_NAME_LOAD
def delete_collaborator(self, email): """Remove a collaborating user from the shared project. :param email: The collaborator's email address. :type email: str >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> project.delete_collaborator('jane.doe@gmail.com') """ args = { 'project_id': self.id, 'email': email, } _perform_command(self.owner, 'delete_collaborator', args)
[2][SEP1][None][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_DICT_CONSTANT_CONSTANT_ATTRIBUTE_NAME_LOAD_LOAD_NAME_LOAD_EXPR_CALL_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD
def rfind(self, sub, start=None, end=None): """Return the highest index where substring sub is found, such that sub is contained within string[start:end]. Optional arguments start and end are interpreted as in slice notation. :param str sub: Substring to search. :param int start: Beginning position. :param int end: Stop comparison at this position. """ return self.value_no_colors.rfind(sub, start, end)
[4][SEP1][Return][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_ARG_CONSTANT_CONSTANT_EXPR_CONSTANT_RETURN_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_NAME_LOAD_NAME_LOAD_NAME_LOAD
def white(cls, string, auto=False): """Color-code entire string. :param str string: String to colorize. :param bool auto: Enable auto-color (dark/light terminal). :return: Class instance for colorized string. :rtype: Color """ return cls.colorize('white', string, auto=auto)
[3][SEP1][Return][SEP2][][SEP3][1]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_ARG_ARG_CONSTANT_EXPR_CONSTANT_RETURN_CALL_ATTRIBUTE_NAME_LOAD_LOAD_CONSTANT_NAME_LOAD_KEYWORD_NAME_LOAD
def linked_parameters(self): """ Get a dictionary with all parameters in this model in a linked status. A parameter is in a linked status if it is linked to another parameter (i.e. it is forced to have the same value of the other parameter), or if it is linked with another parameter or an independent variable through a law. :return: dictionary of linked parameters """ # Refresh the list self._update_parameters() # Filter selecting only free parameters linked_parameter_dictionary = collections.OrderedDict() for parameter_name, parameter in self._parameters.iteritems(): if parameter.has_auxiliary_variable(): linked_parameter_dictionary[parameter_name] = parameter return linked_parameter_dictionary
[1][SEP1][None][For][If][Return][None][SEP2][1][2,3][4,1][][1][SEP3][2][1][1][0][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_EXPR_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_FOR_TUPLE_NAME_STORE_NAME_STORE_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_IF_CALL_ATTRIBUTE_NAME_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_NAME_LOAD_STORE_NAME_LOAD_RETURN_NAME_LOAD
def parameters(self): """ Returns a dictionary of all parameters for this source. We use the parameter path as the key because it's guaranteed to be unique, unlike the parameter name. :return: """ all_parameters = collections.OrderedDict() for component in self._components.values(): for par in component.shape.parameters.values(): all_parameters[par.path] = par return all_parameters
[1][SEP1][None][For][For][Return][None][SEP2][1][2,3][4,1][][2][SEP3][1][1][1][0][0]
MODULE_FUNCTIONDEF_ARGUMENTS_ARG_EXPR_CONSTANT_ASSIGN_NAME_STORE_CALL_ATTRIBUTE_NAME_LOAD_LOAD_FOR_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_FOR_NAME_STORE_CALL_ATTRIBUTE_ATTRIBUTE_ATTRIBUTE_NAME_LOAD_LOAD_LOAD_LOAD_ASSIGN_SUBSCRIPT_NAME_LOAD_ATTRIBUTE_NAME_LOAD_LOAD_STORE_NAME_LOAD_RETURN_NAME_LOAD

SRC-AST-CFG

This dataset was created from CodeSearchNet, it contains triplets of (function, AST, CFG).

It was made for training deep learning models to associate code representations.

The code used to build it can be found here.

Size

#train = 278620 samples

#test = 15169 samples

#validation = 15566 samples

Contact

e-mail: mrochkoulets@gmail.com

Downloads last month
52
Edit dataset card