idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
100
def dump_stats ( myStats ) : print ( "\n----%s PYTHON PING Statistics----" % ( myStats . thisIP ) ) if myStats . pktsSent > 0 : myStats . fracLoss = ( myStats . pktsSent - myStats . pktsRcvd ) / myStats . pktsSent print ( ( "%d packets transmitted, %d packets received, " "%0.1f%% packet loss" ) % ( myStats . pktsSent , myStats . pktsRcvd , 100.0 * myStats . fracLoss ) ) if myStats . pktsRcvd > 0 : print ( "round-trip (ms) min/avg/max = %d/%0.1f/%d" % ( myStats . minTime , myStats . totTime / myStats . pktsRcvd , myStats . maxTime ) ) print ( "" ) return
Show stats when pings are done
214
7
101
def updatable ( self ) : if self . latest_version > self . current_version : updatable_version = self . latest_version else : updatable_version = False return updatable_version
bootstrap - py package updatable? .
43
9
102
def show_message ( self ) : print ( 'current version: {current_version}\n' 'latest version : {latest_version}' . format ( current_version = self . current_version , latest_version = self . latest_version ) )
Show message updatable .
55
5
103
def condense_otus ( otuF , nuniqueF ) : uniqueOTUs = set ( ) nuOTUs = { } # parse non-unique otu matrix for line in nuniqueF : line = line . split ( ) uOTU = line [ 0 ] for nuOTU in line [ 1 : ] : nuOTUs [ nuOTU ] = uOTU uniqueOTUs . add ( uOTU ) otuFilter = defaultdict ( list ) # parse otu sequence file for line in otuF : line = line . split ( ) otuID , seqIDs = line [ 0 ] , line [ 1 : ] if otuID in uniqueOTUs : otuFilter [ otuID ] . extend ( seqIDs ) elif otuID in nuOTUs : otuFilter [ nuOTUs [ otuID ] ] . extend ( seqIDs ) return otuFilter
Traverse the input otu - sequence file collect the non - unique OTU IDs and file the sequences associated with then under the unique OTU ID as defined by the input matrix .
194
37
104
def rna_bases ( rna_cov , scaffold , bases , line ) : start = int ( line [ 3 ] ) stop = start + bases - 1 if scaffold not in rna_cov : return rna_cov for pos in rna_cov [ scaffold ] [ 2 ] : ol = get_overlap ( [ start , stop ] , pos ) rna_cov [ scaffold ] [ 0 ] += ol return rna_cov
determine if read overlaps with rna if so count bases
105
14
105
def parse_s2bins ( s2bins ) : s2b = { } b2s = { } for line in s2bins : line = line . strip ( ) . split ( ) s , b = line [ 0 ] , line [ 1 ] if 'UNK' in b : continue if len ( line ) > 2 : g = ' ' . join ( line [ 2 : ] ) else : g = 'n/a' b = '%s\t%s' % ( b , g ) s2b [ s ] = b if b not in b2s : b2s [ b ] = [ ] b2s [ b ] . append ( s ) return s2b , b2s
parse ggKbase scaffold - to - bin mapping - scaffolds - to - bins and bins - to - scaffolds
157
26
106
def filter_missing_rna ( s2bins , bins2s , rna_cov ) : for bin , scaffolds in list ( bins2s . items ( ) ) : c = 0 for s in scaffolds : if s in rna_cov : c += 1 if c == 0 : del bins2s [ bin ] for scaffold , bin in list ( s2bins . items ( ) ) : if bin not in bins2s : del s2bins [ scaffold ] return s2bins , bins2s
remove any bins that don t have 16S
118
9
107
def calc_bin_cov ( scaffolds , cov ) : bases = sum ( [ cov [ i ] [ 0 ] for i in scaffolds if i in cov ] ) length = sum ( [ cov [ i ] [ 1 ] for i in scaffolds if i in cov ] ) if length == 0 : return 0 return float ( float ( bases ) / float ( length ) )
calculate bin coverage
80
5
108
def clean ( self ) : # First make sure the super's clean method is called upon. super ( TranslationFormSet , self ) . clean ( ) if settings . HIDE_LANGUAGE : return if len ( self . forms ) > 0 : # If a default language has been provided, make sure a translation # is available if settings . DEFAULT_LANGUAGE and not any ( self . errors ) : # Don't bother validating the formset unless each form is # valid on its own. Reference: # http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation for form in self . forms : language_code = form . cleaned_data . get ( 'language_code' , None ) if language_code == settings . DEFAULT_LANGUAGE : # All is good, don't bother checking any further return raise forms . ValidationError ( _ ( 'No translation provided for default language \'%s\'.' ) % settings . DEFAULT_LANGUAGE ) else : raise forms . ValidationError ( _ ( 'At least one translation should be provided.' ) )
Make sure there is at least a translation has been filled in . If a default language has been specified make sure that it exists amongst translations .
250
28
109
def _get_default_language ( self ) : assert hasattr ( self , 'available_languages' ) , 'No available languages have been generated.' assert len ( self . available_languages ) > 0 , 'No available languages to select from.' if ( settings . DEFAULT_LANGUAGE and settings . DEFAULT_LANGUAGE in self . available_languages ) or ( 'language_code' not in self . form . base_fields ) : # Default language still available self . available_languages . remove ( settings . DEFAULT_LANGUAGE ) return settings . DEFAULT_LANGUAGE else : # Select the first item and return it return self . available_languages . pop ( 0 )
If a default language has been set and is still available in self . available_languages return it and remove it from the list .
157
27
110
def _construct_form ( self , i , * * kwargs ) : if not settings . HIDE_LANGUAGE : self . _construct_available_languages ( ) form = super ( TranslationFormSet , self ) . _construct_form ( i , * * kwargs ) if settings . HIDE_LANGUAGE : form . instance . language_code = settings . DEFAULT_LANGUAGE else : language_code = form . instance . language_code if language_code : logger . debug ( u'Removing translation choice %s for instance %s' u' in form %d' , language_code , form . instance , i ) self . available_languages . remove ( language_code ) else : initial_language_code = self . _get_default_language ( ) logger . debug ( u'Preselecting language code %s for form %d' , initial_language_code , i ) form . initial [ 'language_code' ] = initial_language_code return form
Construct the form overriding the initial value for language_code .
221
12
111
def fq_merge ( R1 , R2 ) : c = itertools . cycle ( [ 1 , 2 , 3 , 4 ] ) for r1 , r2 in zip ( R1 , R2 ) : n = next ( c ) if n == 1 : pair = [ [ ] , [ ] ] pair [ 0 ] . append ( r1 . strip ( ) ) pair [ 1 ] . append ( r2 . strip ( ) ) if n == 4 : yield pair
merge separate fastq files
103
6
112
def _build_circle ( self ) : total_weight = 0 for node in self . _nodes : total_weight += self . _weights . get ( node , 1 ) for node in self . _nodes : weight = self . _weights . get ( node , 1 ) ks = math . floor ( ( 40 * len ( self . _nodes ) * weight ) / total_weight ) for i in xrange ( 0 , int ( ks ) ) : b_key = self . _md5_digest ( '%s-%s-salt' % ( node , i ) ) for l in xrange ( 0 , 4 ) : key = ( ( b_key [ 3 + l * 4 ] << 24 ) | ( b_key [ 2 + l * 4 ] << 16 ) | ( b_key [ 1 + l * 4 ] << 8 ) | b_key [ l * 4 ] ) self . _hashring [ key ] = node self . _sorted_keys . append ( key ) self . _sorted_keys . sort ( )
Creates hash ring .
232
5
113
def _gen_key ( self , key ) : b_key = self . _md5_digest ( key ) return self . _hashi ( b_key , lambda x : x )
Return long integer for a given key that represent it place on the hash ring .
42
16
114
def has_custom_image ( user_context , app_id ) : possible_paths = _valid_custom_image_paths ( user_context , app_id ) return any ( map ( os . path . exists , possible_paths ) )
Returns True if there exists a custom image for app_id .
56
13
115
def get_custom_image ( user_context , app_id ) : possible_paths = _valid_custom_image_paths ( user_context , app_id ) existing_images = filter ( os . path . exists , possible_paths ) if len ( existing_images ) > 0 : return existing_images [ 0 ]
Returns the custom image associated with a given app . If there are multiple candidate images on disk one is chosen arbitrarily .
73
23
116
def set_custom_image ( user_context , app_id , image_path ) : if image_path is None : return False if not os . path . exists ( image_path ) : return False ( root , ext ) = os . path . splitext ( image_path ) if not is_valid_extension ( ext ) : # TODO: Maybe log that this happened? return False # If we don't remove the old image then theres no guarantee that Steam will # show our new image when it launches. if has_custom_image ( user_context , app_id ) : img = get_custom_image ( user_context , app_id ) assert ( img is not None ) os . remove ( img ) # Set the new image parent_dir = paths . custom_images_directory ( user_context ) new_path = os . path . join ( parent_dir , app_id + ext ) shutil . copyfile ( image_path , new_path ) return True
Sets the custom image for app_id to be the image located at image_path . If there already exists a custom image for app_id it will be deleted . Returns True is setting the image was successful .
214
44
117
def from_file ( cls , fname , form = None ) : try : tg = TableGroup . from_file ( fname ) opfname = None except JSONDecodeError : tg = TableGroup . fromvalue ( cls . MD ) opfname = fname if len ( tg . tables ) != 1 : raise ValueError ( 'profile description must contain exactly one table' ) metadata = tg . common_props metadata . update ( fname = Path ( fname ) , form = form ) return cls ( * [ { k : None if ( k != cls . GRAPHEME_COL and v == cls . NULL ) else v for k , v in d . items ( ) } for d in tg . tables [ 0 ] . iterdicts ( fname = opfname ) ] , * * metadata )
Read an orthography profile from a metadata file or a default tab - separated profile file .
185
18
118
def from_text ( cls , text , mapping = 'mapping' ) : graphemes = Counter ( grapheme_pattern . findall ( text ) ) specs = [ OrderedDict ( [ ( cls . GRAPHEME_COL , grapheme ) , ( 'frequency' , frequency ) , ( mapping , grapheme ) ] ) for grapheme , frequency in graphemes . most_common ( ) ] return cls ( * specs )
Create a Profile instance from the Unicode graphemes found in text .
102
14
119
def split_fasta ( f , id2f ) : opened = { } for seq in parse_fasta ( f ) : id = seq [ 0 ] . split ( '>' ) [ 1 ] . split ( ) [ 0 ] if id not in id2f : continue fasta = id2f [ id ] if fasta not in opened : opened [ fasta ] = '%s.fa' % fasta seq [ 1 ] += '\n' with open ( opened [ fasta ] , 'a+' ) as f_out : f_out . write ( '\n' . join ( seq ) )
split fasta file into separate fasta files based on list of scaffolds that belong to each separate file
135
21
120
def _is_user_directory ( self , pathname ) : fullpath = os . path . join ( self . userdata_location ( ) , pathname ) # SteamOS puts a directory named 'anonymous' in the userdata directory # by default. Since we assume that pathname is a userID, ignore any name # that can't be converted to a number return os . path . isdir ( fullpath ) and pathname . isdigit ( )
Check whether pathname is a valid user data directory
97
10
121
def local_users ( self ) : # Any users on the machine will have an entry inside of the userdata # folder. As such, the easiest way to find a list of all users on the # machine is to just list the folders inside userdata userdirs = filter ( self . _is_user_directory , os . listdir ( self . userdata_location ( ) ) ) # Exploits the fact that the directory is named the same as the user id return map ( lambda userdir : user . User ( self , int ( userdir ) ) , userdirs )
Returns an array of user ids for users on the filesystem
123
12
122
def _calculate_degree_days ( temperature_equivalent , base_temperature , cooling = False ) : if cooling : ret = temperature_equivalent - base_temperature else : ret = base_temperature - temperature_equivalent # degree days cannot be negative ret [ ret < 0 ] = 0 prefix = 'CDD' if cooling else 'HDD' ret . name = '{}_{}' . format ( prefix , base_temperature ) return ret
Calculates degree days starting with a series of temperature equivalent values
101
13
123
def status ( self ) : return { self . _acronym_status ( l ) : l for l in self . resp_text . split ( '\n' ) if l . startswith ( self . prefix_status ) }
Development status .
51
3
124
def licenses ( self ) : return { self . _acronym_lic ( l ) : l for l in self . resp_text . split ( '\n' ) if l . startswith ( self . prefix_lic ) }
OSI Approved license .
51
6
125
def licenses_desc ( self ) : return { self . _acronym_lic ( l ) : l . split ( self . prefix_lic ) [ 1 ] for l in self . resp_text . split ( '\n' ) if l . startswith ( self . prefix_lic ) }
Remove prefix .
65
3
126
def _acronym_lic ( self , license_statement ) : pat = re . compile ( r'\(([\w+\W?\s?]+)\)' ) if pat . search ( license_statement ) : lic = pat . search ( license_statement ) . group ( 1 ) if lic . startswith ( 'CNRI' ) : acronym_licence = lic [ : 4 ] else : acronym_licence = lic . replace ( ' ' , '' ) else : acronym_licence = '' . join ( [ w [ 0 ] for w in license_statement . split ( self . prefix_lic ) [ 1 ] . split ( ) ] ) return acronym_licence
Convert license acronym .
148
5
127
def calcMD5 ( path ) : # check that file exists if os . path . exists ( path ) is False : yield False else : command = [ 'md5sum' , path ] p = Popen ( command , stdout = PIPE ) for line in p . communicate ( ) [ 0 ] . splitlines ( ) : yield line . decode ( 'ascii' ) . strip ( ) . split ( ) [ 0 ] p . wait ( ) yield False
calc MD5 based on path
100
7
128
def wget ( ftp , f = False , exclude = False , name = False , md5 = False , tries = 10 ) : # file name if f is False : f = ftp . rsplit ( '/' , 1 ) [ - 1 ] # downloaded file if it does not already exist # check md5s on server (optional) t = 0 while md5check ( f , ftp , md5 , exclude ) is not True : t += 1 if name is not False : print ( '# downloading:' , name , f ) if exclude is False : command = 'wget -q --random-wait %s' % ( ftp ) else : command = 'wget -q --random-wait -R %s %s' % ( exclude , ftp ) p = Popen ( command , shell = True ) p . communicate ( ) if t >= tries : print ( 'not downloaded:' , name , f ) return [ f , False ] return [ f , True ]
download files with wget
210
5
129
def check ( line , queries ) : line = line . strip ( ) spLine = line . replace ( '.' , ' ' ) . split ( ) matches = set ( spLine ) . intersection ( queries ) if len ( matches ) > 0 : return matches , line . split ( '\t' ) return matches , False
check that at least one of queries is in list l
68
11
130
def entrez ( db , acc ) : c1 = [ 'esearch' , '-db' , db , '-query' , acc ] c2 = [ 'efetch' , '-db' , 'BioSample' , '-format' , 'docsum' ] p1 = Popen ( c1 , stdout = PIPE , stderr = PIPE ) p2 = Popen ( c2 , stdin = p1 . stdout , stdout = PIPE , stderr = PIPE ) return p2 . communicate ( )
search entrez using specified database and accession
125
9
131
def searchAccession ( acc ) : # try genbank file # genome database out , error = entrez ( 'genome' , acc ) for line in out . splitlines ( ) : line = line . decode ( 'ascii' ) . strip ( ) if 'Assembly_Accession' in line or 'BioSample' in line : newAcc = line . split ( '>' ) [ 1 ] . split ( '<' ) [ 0 ] . split ( '.' ) [ 0 ] . split ( ',' ) [ 0 ] if len ( newAcc ) > 0 : return ( True , acc , newAcc ) # nucleotide database out , error = entrez ( 'nucleotide' , acc ) for line in out . splitlines ( ) : line = line . decode ( 'ascii' ) . strip ( ) if 'Assembly_Accession' in line or 'BioSample' in line : newAcc = line . split ( '>' ) [ 1 ] . split ( '<' ) [ 0 ] . split ( '.' ) [ 0 ] . split ( ',' ) [ 0 ] if len ( newAcc ) > 0 : return ( True , acc , newAcc ) # assembly database out , error = entrez ( 'assembly' , acc ) for line in out . splitlines ( ) : line = line . decode ( 'ascii' ) . strip ( ) if 'Assembly_Accession' in line or 'BioSample' in line : newAcc = line . split ( '>' ) [ 1 ] . split ( '<' ) [ 0 ] . split ( '.' ) [ 0 ] . split ( ',' ) [ 0 ] if len ( newAcc ) > 0 : return ( True , acc , newAcc ) for error in error . splitlines ( ) : error = error . decode ( 'ascii' ) . strip ( ) if '500 Can' in error : return ( False , acc , 'no network' ) return ( False , acc , 'efetch failed' )
attempt to use NCBI Entrez to get BioSample ID
431
13
132
def getFTPs ( accessions , ftp , search , exclude , convert = False , threads = 1 , attempt = 1 , max_attempts = 2 ) : info = wget ( ftp ) [ 0 ] allMatches = [ ] for genome in open ( info , encoding = 'utf8' ) : genome = str ( genome ) matches , genomeInfo = check ( genome , accessions ) if genomeInfo is not False : f = genomeInfo [ 0 ] + search Gftp = genomeInfo [ 19 ] Gftp = Gftp + '/' + search allMatches . extend ( matches ) yield ( Gftp , f , exclude , matches ) # print accessions that could not be matched # and whether or not they could be converted (optional) newAccs = [ ] missing = accessions . difference ( set ( allMatches ) ) if convert is True : pool = Pool ( threads ) pool = pool . imap_unordered ( searchAccession , missing ) for newAcc in tqdm ( pool , total = len ( missing ) ) : status , accession , newAcc = newAcc if status is True : newAccs . append ( newAcc ) print ( 'not found:' , accession , '->' , newAcc ) else : for accession in missing : print ( 'not found:' , accession ) # re-try after converting accessions (optional) if len ( newAccs ) > 0 and attempt <= max_attempts : print ( 'convert accession attempt' , attempt ) attempt += 1 for hit in getFTPs ( set ( newAccs ) , ftp , search , exclude , convert , threads = 1 , attempt = attempt ) : yield hit
download genome info from NCBI
368
6
133
def download ( args ) : accessions , infoFTP = set ( args [ 'g' ] ) , args [ 'i' ] search , exclude = args [ 's' ] , args [ 'e' ] FTPs = getFTPs ( accessions , infoFTP , search , exclude , threads = args [ 't' ] , convert = args [ 'convert' ] ) if args [ 'test' ] is True : for genome in FTPs : print ( 'found:' , ';' . join ( genome [ - 1 ] ) , genome [ 0 ] ) return FTPs pool = Pool ( args [ 't' ] ) pool = pool . imap_unordered ( wgetGenome , FTPs ) files = [ ] for f in tqdm ( pool , total = len ( accessions ) ) : files . append ( f ) return files
download genomes from NCBI
186
5
134
def fix_fasta ( fasta ) : for seq in parse_fasta ( fasta ) : seq [ 0 ] = remove_char ( seq [ 0 ] ) if len ( seq [ 1 ] ) > 0 : yield seq
remove pesky characters from fasta file header
49
8
135
def _calc_frames ( stats ) : timings = [ ] callers = [ ] for key , values in iteritems ( stats . stats ) : timings . append ( pd . Series ( key + values [ : - 1 ] , index = timing_colnames , ) ) for caller_key , caller_values in iteritems ( values [ - 1 ] ) : callers . append ( pd . Series ( key + caller_key + caller_values , index = caller_columns , ) ) timings_df = pd . DataFrame ( timings ) callers_df = pd . DataFrame ( callers ) timings_df [ 'filename:funcname' ] = ( timings_df [ 'filename' ] + ':' + timings_df [ 'funcname' ] ) timings_df = timings_df . groupby ( 'filename:funcname' ) . sum ( ) return timings_df , callers_df
Compute a DataFrame summary of a Stats object .
209
11
136
def unmapped ( sam , mates ) : for read in sam : if read . startswith ( '@' ) is True : continue read = read . strip ( ) . split ( ) if read [ 2 ] == '*' and read [ 6 ] == '*' : yield read elif mates is True : if read [ 2 ] == '*' or read [ 6 ] == '*' : yield read for i in read : if i == 'YT:Z:UP' : yield read
get unmapped reads
107
4
137
def parallel ( processes , threads ) : pool = multithread ( threads ) pool . map ( run_process , processes ) pool . close ( ) pool . join ( )
execute jobs in processes using N threads
36
7
138
def define_log_renderer ( fmt , fpath , quiet ) : # it must accept a logger, method_name and event_dict (just like processors) # but must return the rendered string, not a dictionary. # TODO tty logic if fmt : return structlog . processors . JSONRenderer ( ) if fpath is not None : return structlog . processors . JSONRenderer ( ) if sys . stderr . isatty ( ) and not quiet : return structlog . dev . ConsoleRenderer ( ) return structlog . processors . JSONRenderer ( )
the final log processor that structlog requires to render .
127
11
139
def _structlog_default_keys_processor ( logger_class , log_method , event ) : global HOSTNAME if 'id' not in event : event [ 'id' ] = '%s_%s' % ( datetime . utcnow ( ) . strftime ( '%Y%m%dT%H%M%S' ) , uuid . uuid1 ( ) . hex ) if 'type' not in event : event [ 'type' ] = 'log' event [ 'host' ] = HOSTNAME return event
Add unique id type and hostname
120
7
140
def define_log_processors ( ) : # these processors should accept logger, method_name and event_dict # and return a new dictionary which will be passed as event_dict to the next one. return [ structlog . processors . TimeStamper ( fmt = "iso" ) , _structlog_default_keys_processor , structlog . stdlib . PositionalArgumentsFormatter ( ) , structlog . processors . StackInfoRenderer ( ) , structlog . processors . format_exc_info , ]
log processors that structlog executes before final rendering
112
9
141
def _configure_logger ( fmt , quiet , level , fpath , pre_hooks , post_hooks , metric_grouping_interval ) : # NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently. level = getattr ( logging , level . upper ( ) ) global _GLOBAL_LOG_CONFIGURED if _GLOBAL_LOG_CONFIGURED : return # since the hooks need to run through structlog, need to wrap them like processors def wrap_hook ( fn ) : @ wraps ( fn ) def processor ( logger , method_name , event_dict ) : fn ( event_dict ) return event_dict return processor processors = define_log_processors ( ) processors . extend ( [ wrap_hook ( h ) for h in pre_hooks ] ) if metric_grouping_interval : processors . append ( metrics_grouping_processor ) log_renderer = define_log_renderer ( fmt , fpath , quiet ) stderr_required = ( not quiet ) pretty_to_stderr = ( stderr_required and ( fmt == "pretty" or ( fmt is None and sys . stderr . isatty ( ) ) ) ) should_inject_pretty_renderer = ( pretty_to_stderr and not isinstance ( log_renderer , structlog . dev . ConsoleRenderer ) ) if should_inject_pretty_renderer : stderr_required = False processors . append ( StderrConsoleRenderer ( ) ) processors . append ( log_renderer ) processors . extend ( [ wrap_hook ( h ) for h in post_hooks ] ) streams = [ ] # we need to use a stream if we are writing to both file and stderr, and both are json if stderr_required : streams . append ( sys . stderr ) if fpath is not None : # TODO handle creating a directory for this log file ? # TODO set mode and encoding appropriately streams . append ( open ( fpath , 'a' ) ) assert len ( streams ) != 0 , "cannot configure logger for 0 streams" stream = streams [ 0 ] if len ( streams ) == 1 else Stream ( * streams ) atexit . register ( stream . close ) # a global level struct log config unless otherwise specified. structlog . configure ( processors = processors , context_class = dict , logger_factory = LevelLoggerFactory ( stream , level = level ) , wrapper_class = BoundLevelLogger , cache_logger_on_first_use = True , ) # TODO take care of removing other handlers stdlib_root_log = logging . getLogger ( ) stdlib_root_log . addHandler ( StdlibStructlogHandler ( ) ) stdlib_root_log . setLevel ( level ) _GLOBAL_LOG_CONFIGURED = True
configures a logger when required write to stderr or a file
630
14
142
def _add_base_info ( self , event_dict ) : f = sys . _getframe ( ) level_method_frame = f . f_back caller_frame = level_method_frame . f_back return event_dict
Instead of using a processor adding basic information like caller filename etc here .
52
14
143
def _proxy_to_logger ( self , method_name , event , * event_args , * * event_kw ) : if isinstance ( event , bytes ) : event = event . decode ( 'utf-8' ) if event_args : event_kw [ 'positional_args' ] = event_args return super ( BoundLevelLogger , self ) . _proxy_to_logger ( method_name , event = event , * * event_kw )
Propagate a method call to the wrapped logger .
103
10
144
def translate ( rect , x , y , width = 1 ) : return ( ( rect [ 0 ] [ 0 ] + x , rect [ 0 ] [ 1 ] + y ) , ( rect [ 1 ] [ 0 ] + x , rect [ 1 ] [ 1 ] + y ) , ( rect [ 2 ] [ 0 ] + x + width , rect [ 2 ] [ 1 ] + y ) , ( rect [ 3 ] [ 0 ] + x + width , rect [ 3 ] [ 1 ] + y ) )
Given four points of a rectangle translate the rectangle to the specified x and y coordinates and optionally change the width .
108
22
145
def remove_bad ( string ) : remove = [ ':' , ',' , '(' , ')' , ' ' , '|' , ';' , '\'' ] for c in remove : string = string . replace ( c , '_' ) return string
remove problem characters from string
57
5
146
def get_ids ( a ) : a_id = '%s.id.fa' % ( a . rsplit ( '.' , 1 ) [ 0 ] ) a_id_lookup = '%s.id.lookup' % ( a . rsplit ( '.' , 1 ) [ 0 ] ) if check ( a_id ) is True : return a_id , a_id_lookup a_id_f = open ( a_id , 'w' ) a_id_lookup_f = open ( a_id_lookup , 'w' ) ids = [ ] for seq in parse_fasta ( open ( a ) ) : id = id_generator ( ) while id in ids : id = id_generator ( ) ids . append ( id ) header = seq [ 0 ] . split ( '>' ) [ 1 ] name = remove_bad ( header ) seq [ 0 ] = '>%s %s' % ( id , header ) print ( '\n' . join ( seq ) , file = a_id_f ) print ( '%s\t%s\t%s' % ( id , name , header ) , file = a_id_lookup_f ) return a_id , a_id_lookup
make copy of sequences with short identifier
282
7
147
def convert2phylip ( convert ) : out = '%s.phy' % ( convert . rsplit ( '.' , 1 ) [ 0 ] ) if check ( out ) is False : convert = open ( convert , 'rU' ) out_f = open ( out , 'w' ) alignments = AlignIO . parse ( convert , "fasta" ) AlignIO . write ( alignments , out , "phylip" ) return out
convert fasta to phylip because RAxML is ridiculous
100
13
148
def run_iqtree ( phy , model , threads , cluster , node ) : # set ppn based on threads if threads > 24 : ppn = 24 else : ppn = threads tree = '%s.treefile' % ( phy ) if check ( tree ) is False : if model is False : model = 'TEST' dir = os . getcwd ( ) command = 'iqtree-omp -s %s -m %s -nt %s -quiet' % ( phy , model , threads ) if cluster is False : p = Popen ( command , shell = True ) else : if node is False : node = '1' qsub = 'qsub -l nodes=%s:ppn=%s -m e -N iqtree' % ( node , ppn ) command = 'cd /tmp; mkdir iqtree; cd iqtree; cp %s/%s .; %s; mv * %s/; rm -r ../iqtree' % ( dir , phy , command , dir ) re_call = 'cd %s; %s --no-fast --iq' % ( dir . rsplit ( '/' , 1 ) [ 0 ] , ' ' . join ( sys . argv ) ) p = Popen ( 'echo "%s;%s" | %s' % ( command , re_call , qsub ) , shell = True ) p . communicate ( ) return tree
run IQ - Tree
315
4
149
def fix_tree ( tree , a_id_lookup , out ) : if check ( out ) is False and check ( tree ) is True : tree = open ( tree ) . read ( ) for line in open ( a_id_lookup ) : id , name , header = line . strip ( ) . split ( '\t' ) tree = tree . replace ( id + ':' , name + ':' ) out_f = open ( out , 'w' ) print ( tree . strip ( ) , file = out_f ) return out
get the names for sequences in the raxml tree
118
11
150
def create_cluster ( settings ) : # Pull in our client settings = copy . deepcopy ( settings ) backend = settings . pop ( 'engine' , settings . pop ( 'backend' , None ) ) if isinstance ( backend , basestring ) : Conn = import_string ( backend ) elif backend : Conn = backend else : raise KeyError ( 'backend' ) # Pull in our cluster cluster = settings . pop ( 'cluster' , None ) if not cluster : Cluster = Conn . get_cluster ( ) elif isinstance ( cluster , basestring ) : Cluster = import_string ( cluster ) else : Cluster = cluster # Pull in our router router = settings . pop ( 'router' , None ) if not router : Router = BaseRouter elif isinstance ( router , basestring ) : Router = import_string ( router ) else : Router = router # Build the connection cluster return Cluster ( router = Router , backend = Conn , * * settings )
Creates a new Nydus cluster from the given settings .
211
13
151
def _get_translation ( self , field , code ) : if not code in self . _translation_cache : translations = self . translations . select_related ( ) logger . debug ( u'Matched with field %s for language %s. Attempting lookup.' , field , code ) try : translation_obj = translations . get ( language_code = code ) except ObjectDoesNotExist : translation_obj = None self . _translation_cache [ code ] = translation_obj logger . debug ( u'Translation not found in cache.' ) else : logger . debug ( u'Translation found in cache.' ) # Get the translation from the cache translation_obj = self . _translation_cache . get ( code ) # If this is none, it means that a translation does not exist # It is important to cache this one as well if not translation_obj : raise ObjectDoesNotExist field_value = getattr ( translation_obj , field ) logger . debug ( u'Found translation object %s, returning value %s.' , translation_obj , field_value ) return field_value
Gets the translation of a specific field for a specific language code .
230
14
152
def unicode_wrapper ( self , property , default = ugettext ( 'Untitled' ) ) : # TODO: Test coverage! try : value = getattr ( self , property ) except ValueError : logger . warn ( u'ValueError rendering unicode for %s object.' , self . _meta . object_name ) value = None if not value : value = default return value
Wrapper to allow for easy unicode representation of an object by the specified property . If this wrapper is not able to find the right translation of the specified property it will return the default value instead .
82
40
153
def strip_inserts ( fasta ) : for seq in parse_fasta ( fasta ) : seq [ 1 ] = '' . join ( [ b for b in seq [ 1 ] if b == '-' or b . isupper ( ) ] ) yield seq
remove insertion columns from aligned fasta file
56
8
154
def transform ( self , word , column = Profile . GRAPHEME_COL , error = errors . replace ) : assert self . op , 'method can only be called with orthography profile.' if column != Profile . GRAPHEME_COL and column not in self . op . column_labels : raise ValueError ( "Column {0} not found in profile." . format ( column ) ) word = self . op . tree . parse ( word , error ) if column == Profile . GRAPHEME_COL : return word out = [ ] for token in word : try : target = self . op . graphemes [ token ] [ column ] except KeyError : target = self . _errors [ 'replace' ] ( token ) if target is not None : if isinstance ( target , ( tuple , list ) ) : out . extend ( target ) else : out . append ( target ) return out
Transform a string s graphemes into the mappings given in a different column in the orthography profile .
192
22
155
def rules ( self , word ) : return self . _rules . apply ( word ) if self . _rules else word
Function to tokenize input string and return output of str with ortho rules applied .
25
17
156
def combine_modifiers ( self , graphemes ) : result = [ ] temp = "" count = len ( graphemes ) for grapheme in reversed ( graphemes ) : count -= 1 if len ( grapheme ) == 1 and unicodedata . category ( grapheme ) == "Lm" and not ord ( grapheme ) in [ 712 , 716 ] : temp = grapheme + temp # hack for the cases where a space modifier is the first character in the # string if count == 0 : result [ - 1 ] = temp + result [ - 1 ] continue # pragma: no cover # catch and repair stress marks if len ( grapheme ) == 1 and ord ( grapheme ) in [ 712 , 716 ] : result [ - 1 ] = grapheme + result [ - 1 ] temp = "" continue # combine contour tone marks (non-accents) if len ( grapheme ) == 1 and unicodedata . category ( grapheme ) == "Sk" : if len ( result ) == 0 : result . append ( grapheme ) temp = "" continue else : if unicodedata . category ( result [ - 1 ] [ 0 ] ) == "Sk" : result [ - 1 ] = grapheme + result [ - 1 ] temp = "" continue result . append ( grapheme + temp ) temp = "" # last check for tie bars segments = result [ : : - 1 ] i = 0 r = [ ] while i < len ( segments ) : # tie bars if ord ( segments [ i ] [ - 1 ] ) in [ 865 , 860 ] : r . append ( segments [ i ] + segments [ i + 1 ] ) i += 2 else : r . append ( segments [ i ] ) i += 1 return r
Given a string that is space - delimited on Unicode grapheme clusters group Unicode modifier letters with their preceding base characters deal with tie bars etc .
385
30
157
def parse_catalytic ( insertion , gff ) : offset = insertion [ 'offset' ] GeneStrand = insertion [ 'strand' ] if type ( insertion [ 'intron' ] ) is not str : return gff for intron in parse_fasta ( insertion [ 'intron' ] . split ( '|' ) ) : ID , annot , strand , pos = intron [ 0 ] . split ( '>' ) [ 1 ] . split ( ) Start , End = [ int ( i ) for i in pos . split ( '-' ) ] if strand != GeneStrand : if strand == '+' : strand = '-' else : strand = '+' Start , End = End - 2 , Start - 2 Start , End = abs ( Start + offset ) - 1 , abs ( End + offset ) - 1 gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( 'Rfam' ) gff [ 'feature' ] . append ( 'Catalytic RNA' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s; Name=%s' % ( ID , annot ) ) return gff
parse catalytic RNAs to gff format
324
9
158
def parse_orf ( insertion , gff ) : offset = insertion [ 'offset' ] if type ( insertion [ 'orf' ] ) is not str : return gff for orf in parse_fasta ( insertion [ 'orf' ] . split ( '|' ) ) : ID = orf [ 0 ] . split ( '>' ) [ 1 ] . split ( ) [ 0 ] Start , End , strand = [ int ( i ) for i in orf [ 0 ] . split ( ' # ' ) [ 1 : 4 ] ] if strand == 1 : strand = '+' else : strand = '-' GeneStrand = insertion [ 'strand' ] if strand != GeneStrand : if strand == '+' : strand = '-' else : strand = '+' Start , End = End - 2 , Start - 2 Start , End = abs ( Start + offset ) - 1 , abs ( End + offset ) - 1 annot = orf [ 0 ] . split ( ) [ 1 ] if annot == 'n/a' : annot = 'unknown' gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( 'Prodigal and Pfam' ) gff [ 'feature' ] . append ( 'CDS' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s; Name=%s' % ( ID , annot ) ) return gff
parse ORF to gff format
375
7
159
def parse_insertion ( insertion , gff ) : offset = insertion [ 'offset' ] for ins in parse_fasta ( insertion [ 'insertion sequence' ] . split ( '|' ) ) : strand = insertion [ 'strand' ] ID = ins [ 0 ] . split ( '>' ) [ 1 ] . split ( ) [ 0 ] Start , End = [ int ( i ) for i in ins [ 0 ] . split ( 'gene-pos=' , 1 ) [ 1 ] . split ( ) [ 0 ] . split ( '-' ) ] Start , End = abs ( Start + offset ) , abs ( End + offset ) if strand == '-' : Start , End = End , Start gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( insertion [ 'source' ] ) gff [ 'feature' ] . append ( 'IVS' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) # same as rRNA gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s' % ( ID ) ) return gff
parse insertion to gff format
295
6
160
def parse_rRNA ( insertion , seq , gff ) : offset = insertion [ 'offset' ] strand = insertion [ 'strand' ] for rRNA in parse_masked ( seq , 0 ) [ 0 ] : rRNA = '' . join ( rRNA ) Start = seq [ 1 ] . find ( rRNA ) + 1 End = Start + len ( rRNA ) - 1 if strand == '-' : Start , End = End - 2 , Start - 2 pos = ( abs ( Start + offset ) - 1 , abs ( End + offset ) - 1 ) Start , End = min ( pos ) , max ( pos ) source = insertion [ 'source' ] annot = '%s rRNA' % ( source . split ( 'from' , 1 ) [ 0 ] ) gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( source ) gff [ 'feature' ] . append ( 'rRNA' ) gff [ 'start' ] . append ( Start ) gff [ 'end' ] . append ( End ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'Name=%s' % ( annot ) ) return gff
parse rRNA to gff format
299
7
161
def iTable2GFF ( iTable , fa , contig = False ) : columns = [ '#seqname' , 'source' , 'feature' , 'start' , 'end' , 'score' , 'strand' , 'frame' , 'attribute' ] gff = { c : [ ] for c in columns } for insertion in iTable . iterrows ( ) : insertion = insertion [ 1 ] if insertion [ 'ID' ] not in fa : continue # rRNA strand strand = insertion [ 'sequence' ] . split ( 'strand=' , 1 ) [ 1 ] . split ( ) [ 0 ] # set rRNA positions for reporting features on contig or extracted sequence if contig is True : gene = [ int ( i ) for i in insertion [ 'sequence' ] . split ( 'pos=' , 1 ) [ 1 ] . split ( ) [ 0 ] . split ( '-' ) ] if strand == '-' : offset = - 1 * ( gene [ 1 ] ) else : offset = gene [ 0 ] else : strand = '+' gene = [ 1 , int ( insertion [ 'sequence' ] . split ( 'total-len=' , 1 ) [ 1 ] . split ( ) [ 0 ] ) ] offset = gene [ 0 ] insertion [ 'strand' ] = strand insertion [ 'offset' ] = offset # source for prediction source = insertion [ 'sequence' ] . split ( '::model' , 1 ) [ 0 ] . rsplit ( ' ' , 1 ) [ - 1 ] insertion [ 'source' ] = source # rRNA gene geneAnnot = '%s rRNA gene' % ( source . split ( 'from' , 1 ) [ 0 ] ) geneNum = insertion [ 'sequence' ] . split ( 'seq=' , 1 ) [ 1 ] . split ( ) [ 0 ] gff [ '#seqname' ] . append ( insertion [ 'ID' ] ) gff [ 'source' ] . append ( source ) gff [ 'feature' ] . append ( 'Gene' ) gff [ 'start' ] . append ( gene [ 0 ] ) gff [ 'end' ] . append ( gene [ 1 ] ) gff [ 'score' ] . append ( '.' ) gff [ 'strand' ] . append ( strand ) gff [ 'frame' ] . append ( '.' ) gff [ 'attribute' ] . append ( 'ID=%s; Name=%s' % ( geneNum , geneAnnot ) ) # rRNA gff = parse_rRNA ( insertion , fa [ insertion [ 'ID' ] ] , gff ) # insertions gff = parse_insertion ( insertion , gff ) # orfs gff = parse_orf ( insertion , gff ) # catalytic RNAs gff = parse_catalytic ( insertion , gff ) return pd . DataFrame ( gff ) [ columns ] . drop_duplicates ( )
convert iTable to gff file
634
8
162
def summarize_taxa ( biom ) : tamtcounts = defaultdict ( int ) tot_seqs = 0.0 for row , col , amt in biom [ 'data' ] : tot_seqs += amt rtax = biom [ 'rows' ] [ row ] [ 'metadata' ] [ 'taxonomy' ] for i , t in enumerate ( rtax ) : t = t . strip ( ) if i == len ( rtax ) - 1 and len ( t ) > 3 and len ( rtax [ - 1 ] ) > 3 : t = 's__' + rtax [ i - 1 ] . strip ( ) . split ( '_' ) [ - 1 ] + '_' + t . split ( '_' ) [ - 1 ] tamtcounts [ t ] += amt lvlData = { lvl : levelData ( tamtcounts , tot_seqs , lvl ) for lvl in [ 'k' , 'p' , 'c' , 'o' , 'f' , 'g' , 's' ] } return tot_seqs , lvlData
Given an abundance table group the counts by every taxonomic level .
239
13
163
def custom_image ( self , user ) : for ext in self . valid_custom_image_extensions ( ) : image_location = self . _custom_image_path ( user , ext ) if os . path . isfile ( image_location ) : return image_location return None
Returns the path to the custom image set for this game or None if no image is set
62
18
164
def set_image ( self , user , image_path ) : _ , ext = os . path . splitext ( image_path ) shutil . copy ( image_path , self . _custom_image_path ( user , ext ) )
Sets a custom image for the game . image_path should refer to an image file on disk
53
20
165
def sam_list ( sam ) : list = [ ] for file in sam : for line in file : if line . startswith ( '@' ) is False : line = line . strip ( ) . split ( ) id , map = line [ 0 ] , int ( line [ 1 ] ) if map != 4 and map != 8 : list . append ( id ) return set ( list )
get a list of mapped reads
83
6
166
def sam_list_paired ( sam ) : list = [ ] pair = [ '1' , '2' ] prev = '' for file in sam : for line in file : if line . startswith ( '@' ) is False : line = line . strip ( ) . split ( ) id , map = line [ 0 ] , int ( line [ 1 ] ) if map != 4 and map != 8 : read = id . rsplit ( '/' ) [ 0 ] if read == prev : list . append ( read ) prev = read return set ( list )
get a list of mapped reads require that both pairs are mapped in the sam file in order to remove the reads
121
22
167
def filter_paired ( list ) : pairs = { } filtered = [ ] for id in list : read = id . rsplit ( '/' ) [ 0 ] if read not in pairs : pairs [ read ] = [ ] pairs [ read ] . append ( id ) for read in pairs : ids = pairs [ read ] if len ( ids ) == 2 : filtered . extend ( ids ) return set ( filtered )
require that both pairs are mapped in the sam file in order to remove the reads
90
16
168
def sam2fastq ( line ) : fastq = [ ] fastq . append ( '@%s' % line [ 0 ] ) fastq . append ( line [ 9 ] ) fastq . append ( '+%s' % line [ 0 ] ) fastq . append ( line [ 10 ] ) return fastq
print fastq from sam
69
5
169
def check_mismatches ( read , pair , mismatches , mm_option , req_map ) : # if read is not paired, make sure it is mapped and that mm <= thresh if pair is False : mm = count_mismatches ( read ) if mm is False : return False # if no threshold is supplied, return True if mismatches is False : return True # passes threshold? if mm <= mismatches : return True # paired reads r_mm = count_mismatches ( read ) p_mm = count_mismatches ( pair ) # if neither read is mapped, return False if r_mm is False and p_mm is False : return False # if no threshold, return True if mismatches is False : return True # if req_map is True, both reads have to map if req_map is True : if r_mm is False or p_mm is False : return False ## if option is 'one,' only one read has to pass threshold if mm_option == 'one' : if ( r_mm is not False and r_mm <= mismatches ) or ( p_mm is not False and p_mm <= mismatches ) : return True ## if option is 'both,' both reads have to pass threshold if mm_option == 'both' : ## if one read in pair does not map to the scaffold, ## make sure the other read passes threshold if r_mm is False : if p_mm <= mismatches : return True elif p_mm is False : if r_mm <= mismatches : return True elif ( r_mm is not False and r_mm <= mismatches ) and ( p_mm is not False and p_mm <= mismatches ) : return True return False
- check to see if the read maps with < = threshold number of mismatches - mm_option = one or both depending on whether or not one or both reads in a pair need to pass the mismatch threshold - pair can be False if read does not have a pair - make sure alignment score is not 0 which would indicate that the read was not aligned to the reference
368
74
170
def check_region ( read , pair , region ) : if region is False : return True for mapping in read , pair : if mapping is False : continue start , length = int ( mapping [ 3 ] ) , len ( mapping [ 9 ] ) r = [ start , start + length - 1 ] if get_overlap ( r , region ) > 0 : return True return False
determine whether or not reads map to specific region of scaffold
79
14
171
def get_steam ( ) : # Helper function which checks if the potential userdata directory exists # and returns a new Steam instance with that userdata directory if it does. # If the directory doesnt exist it returns None instead helper = lambda udd : Steam ( udd ) if os . path . exists ( udd ) else None # For both OS X and Linux, Steam stores it's userdata in a consistent # location. plat = platform . system ( ) if plat == 'Darwin' : return helper ( paths . default_osx_userdata_path ( ) ) if plat == 'Linux' : return helper ( paths . default_linux_userdata_path ( ) ) # Windows is a bit trickier. The userdata directory is stored in the Steam # installation directory, meaning that theoretically it could be anywhere. # Luckily, Valve stores the installation directory in the registry, so its # still possible for us to figure out automatically if plat == 'Windows' : possible_dir = winutils . find_userdata_directory ( ) # Unlike the others, `possible_dir` might be None (if something odd # happened with the registry) return helper ( possible_dir ) if possible_dir is not None else None # This should never be hit. Windows, OS X, and Linux should be the only # supported platforms. # TODO: Add logging here so that the user (developer) knows that something # odd happened. return None
Returns a Steam object representing the current Steam installation on the users computer . If the user doesn t have Steam installed returns None .
304
25
172
def zero_to_one ( table , option ) : if option == 'table' : m = min ( min ( table ) ) ma = max ( max ( table ) ) t = [ ] for row in table : t_row = [ ] if option != 'table' : m , ma = min ( row ) , max ( row ) for i in row : if ma == m : t_row . append ( 0 ) else : t_row . append ( ( i - m ) / ( ma - m ) ) t . append ( t_row ) return t
normalize from zero to one for row or table
120
10
173
def pertotal ( table , option ) : if option == 'table' : total = sum ( [ i for line in table for i in line ] ) t = [ ] for row in table : t_row = [ ] if option != 'table' : total = sum ( row ) for i in row : if total == 0 : t_row . append ( 0 ) else : t_row . append ( i / total * 100 ) t . append ( t_row ) return t
calculate percent of total
102
6
174
def scale ( table ) : t = [ ] columns = [ [ ] for i in table [ 0 ] ] for row in table : for i , v in enumerate ( row ) : columns [ i ] . append ( v ) sums = [ float ( sum ( i ) ) for i in columns ] scale_to = float ( max ( sums ) ) scale_factor = [ scale_to / i for i in sums if i != 0 ] for row in table : t . append ( [ a * b for a , b in zip ( row , scale_factor ) ] ) return t
scale table based on the column with the largest sum
123
10
175
def norm ( table ) : print ( '# norm dist is broken' , file = sys . stderr ) exit ( ) from matplotlib . pyplot import hist as hist t = [ ] for i in table : t . append ( np . ndarray . tolist ( hist ( i , bins = len ( i ) , normed = True ) [ 0 ] ) ) return t
fit to normal distribution
83
4
176
def log_trans ( table ) : t = [ ] all = [ item for sublist in table for item in sublist ] if min ( all ) == 0 : scale = min ( [ i for i in all if i != 0 ] ) * 10e-10 else : scale = 0 for i in table : t . append ( np . ndarray . tolist ( np . log10 ( [ j + scale for j in i ] ) ) ) return t
log transform each value in table
98
6
177
def box_cox ( table ) : from scipy . stats import boxcox as bc t = [ ] for i in table : if min ( i ) == 0 : scale = min ( [ j for j in i if j != 0 ] ) * 10e-10 else : scale = 0 t . append ( np . ndarray . tolist ( bc ( np . array ( [ j + scale for j in i ] ) ) [ 0 ] ) ) return t
box - cox transform table
99
6
178
def inh ( table ) : t = [ ] for i in table : t . append ( np . ndarray . tolist ( np . arcsinh ( i ) ) ) return t
inverse hyperbolic sine transformation
40
8
179
def diri ( table ) : t = [ ] for i in table : a = [ j + 1 for j in i ] t . append ( np . ndarray . tolist ( np . random . mtrand . dirichlet ( a ) ) ) return t
from SparCC - randomly draw from the corresponding posterior Dirichlet distribution with a uniform prior
58
19
180
def generate_barcodes ( nIds , codeLen = 12 ) : def next_code ( b , c , i ) : return c [ : i ] + b + ( c [ i + 1 : ] if i < - 1 else '' ) def rand_base ( ) : return random . choice ( [ 'A' , 'T' , 'C' , 'G' ] ) def rand_seq ( n ) : return '' . join ( [ rand_base ( ) for _ in range ( n ) ] ) # homopolymer filter regex: match if 4 identical bases in a row hpf = re . compile ( 'aaaa|cccc|gggg|tttt' , re . IGNORECASE ) while True : codes = [ rand_seq ( codeLen ) ] if ( hpf . search ( codes [ 0 ] ) is None ) : break idx = 0 while len ( codes ) < nIds : idx -= 1 if idx < - codeLen : idx = - 1 codes . append ( rand_seq ( codeLen ) ) else : nc = next_code ( rand_base ( ) , codes [ - 1 ] , idx ) if hpf . search ( nc ) is None : codes . append ( nc ) codes = list ( set ( codes ) ) return codes
Given a list of sample IDs generate unique n - base barcodes for each . Note that only 4^n unique barcodes are possible .
282
28
181
def scrobble_data_dir ( dataDir , sampleMap , outF , qualF = None , idopt = None , utf16 = False ) : seqcount = 0 outfiles = [ osp . split ( outF . name ) [ 1 ] ] if qualF : outfiles . append ( osp . split ( qualF . name ) [ 1 ] ) for item in os . listdir ( dataDir ) : if item in outfiles or not osp . isfile ( os . path . join ( dataDir , item ) ) : continue # FASTA files if osp . splitext ( item ) [ 1 ] in file_types [ 'fasta' ] : fh = open_enc ( os . path . join ( dataDir , item ) , utf16 ) records = SeqIO . parse ( fh , 'fasta' ) for record in records : if isinstance ( idopt , tuple ) : sep , field = idopt sampleID = record . id . split ( sep ) [ field - 1 ] else : sampleID = osp . splitext ( item ) [ 0 ] record . seq = ( sampleMap [ sampleID ] . barcode + sampleMap [ sampleID ] . primer + record . seq ) SeqIO . write ( record , outF , 'fasta' ) seqcount += 1 fh . close ( ) # QUAL files elif qualF and osp . splitext ( item ) [ 1 ] in file_types [ 'qual' ] : fh = open_enc ( os . path . join ( dataDir , item ) , utf16 ) records = SeqIO . parse ( fh , 'qual' ) for record in records : mi = sampleMap [ sampleMap . keys ( ) [ 0 ] ] quals = [ 40 for _ in range ( len ( mi . barcode ) + len ( mi . primer ) ) ] record . letter_annotations [ 'phred_quality' ] [ 0 : 0 ] = quals SeqIO . write ( record , qualF , 'qual' ) fh . close ( ) return seqcount
Given a sample ID and a mapping modify a Sanger FASTA file to include the barcode and primer in the sequence data and change the description line as needed .
456
34
182
def handle_program_options ( ) : parser = argparse . ArgumentParser ( description = "Convert Sanger-sequencing \ derived data files for use with the \ metagenomics analysis program QIIME, by \ extracting Sample ID information, adding\ barcodes and primers to the sequence \ data, and outputting a mapping file and\ single FASTA-formatted sequence file \ formed by concatenating all input data." ) parser . add_argument ( '-i' , '--input_dir' , required = True , help = "The directory containing sequence data files. \ Assumes all data files are placed in this \ directory. For files organized within folders by\ sample, use -s in addition." ) parser . add_argument ( '-m' , '--map_file' , default = 'map.txt' , help = "QIIME-formatted mapping file linking Sample IDs \ with barcodes and primers." ) parser . add_argument ( '-o' , '--output' , default = 'output.fasta' , metavar = 'OUTPUT_FILE' , help = "Single file containing all sequence data found \ in input_dir, FASTA-formatted with barcode and \ primer preprended to sequence. If the -q option \ is passed, any quality data will also be output \ to a single file of the same name with a .qual \ extension." ) parser . add_argument ( '-b' , '--barcode_length' , type = int , default = 12 , help = "Length of the generated barcode sequences. \ Default is 12 (QIIME default), minimum is 8." ) parser . add_argument ( '-q' , '--qual' , action = 'store_true' , default = False , help = "Instruct the program to look for quality \ input files" ) parser . add_argument ( '-u' , '--utf16' , action = 'store_true' , default = False , help = "UTF-16 encoded input files" ) parser . add_argument ( '-t' , '--treatment' , help = "Inserts an additional column into the mapping \ file specifying some treatment or other variable\ that separates the current set of sequences \ from any other set of seqeunces. For example:\ -t DiseaseState=healthy" ) # data input options sidGroup = parser . add_mutually_exclusive_group ( required = True ) sidGroup . add_argument ( '-d' , '--identifier_pattern' , action = ValidateIDPattern , nargs = 2 , metavar = ( 'SEPARATOR' , 'FIELD_NUMBER' ) , help = "Indicates how to extract the Sample ID from \ the description line. Specify two things: \ 1. Field separator, 2. Field number of Sample \ ID (1 or greater). If the separator is a space \ or tab, use \s or \\t respectively. \ Example: >ka-SampleID-2091, use -i - 2, \ indicating - is the separator and the Sample ID\ is field #2." ) sidGroup . add_argument ( '-f' , '--filename_sample_id' , action = 'store_true' , default = False , help = 'Specify that the program should\ the name of each fasta file as the Sample ID for use\ in the mapping file. This is meant to be used when \ all sequence data for a sample is stored in a single\ file.' ) return parser . parse_args ( )
Uses the built - in argparse module to handle command - line options for the program .
772
19
183
def arcsin_sqrt ( biom_tbl ) : arcsint = lambda data , id_ , md : np . arcsin ( np . sqrt ( data ) ) tbl_relabd = relative_abd ( biom_tbl ) tbl_asin = tbl_relabd . transform ( arcsint , inplace = False ) return tbl_asin
Applies the arcsine square root transform to the given BIOM - format table
81
16
184
def parse_sam ( sam , qual ) : for line in sam : if line . startswith ( '@' ) : continue line = line . strip ( ) . split ( ) if int ( line [ 4 ] ) == 0 or int ( line [ 4 ] ) < qual : continue yield line
parse sam file and check mapping quality
63
7
185
def rc_stats ( stats ) : rc_nucs = { 'A' : 'T' , 'T' : 'A' , 'G' : 'C' , 'C' : 'G' , 'N' : 'N' } rcs = [ ] for pos in reversed ( stats ) : rc = { } rc [ 'reference frequencey' ] = pos [ 'reference frequency' ] rc [ 'consensus frequencey' ] = pos [ 'consensus frequency' ] rc [ 'In' ] = pos [ 'In' ] rc [ 'Del' ] = pos [ 'Del' ] rc [ 'ref' ] = rc_nucs [ pos [ 'ref' ] ] rc [ 'consensus' ] = ( rc_nucs [ pos [ 'consensus' ] [ 0 ] ] , pos [ 'consensus' ] [ 1 ] ) for base , stat in list ( pos . items ( ) ) : if base in rc_nucs : rc [ rc_nucs [ base ] ] = stat rcs . append ( rc ) return rcs
reverse completement stats
238
5
186
def parse_codons ( ref , start , end , strand ) : codon = [ ] c = cycle ( [ 1 , 2 , 3 ] ) ref = ref [ start - 1 : end ] if strand == - 1 : ref = rc_stats ( ref ) for pos in ref : n = next ( c ) codon . append ( pos ) if n == 3 : yield codon codon = [ ]
parse codon nucleotide positions in range start - > end wrt strand
87
15
187
def calc_coverage ( ref , start , end , length , nucs ) : ref = ref [ start - 1 : end ] bases = 0 for pos in ref : for base , count in list ( pos . items ( ) ) : if base in nucs : bases += count return float ( bases ) / float ( length )
calculate coverage for positions in range start - > end
71
12
188
def parse_gbk ( gbks ) : for gbk in gbks : for record in SeqIO . parse ( open ( gbk ) , 'genbank' ) : for feature in record . features : if feature . type == 'gene' : try : locus = feature . qualifiers [ 'locus_tag' ] [ 0 ] except : continue if feature . type == 'CDS' : try : locus = feature . qualifiers [ 'locus_tag' ] [ 0 ] except : pass start = int ( feature . location . start ) + int ( feature . qualifiers [ 'codon_start' ] [ 0 ] ) end , strand = int ( feature . location . end ) , feature . location . strand if strand is None : strand = 1 else : strand = - 1 contig = record . id # contig = record.id.rsplit('.', 1)[0] yield contig , [ locus , [ start , end , strand ] , feature . qualifiers ]
parse gbk file
217
5
189
def parse_fasta_annotations ( fastas , annot_tables , trans_table ) : if annot_tables is not False : annots = { } for table in annot_tables : for cds in open ( table ) : ID , start , end , strand = cds . strip ( ) . split ( ) annots [ ID ] = [ start , end , int ( strand ) ] for fasta in fastas : for seq in parse_fasta ( fasta ) : if ( '# ;gc_cont' not in seq [ 0 ] and '# ID=' not in seq [ 0 ] ) and annot_tables is False : print ( '# specify fasta from Prodigal or annotations table (-t)' , file = sys . stderr ) exit ( ) if 'ID=' in seq [ 0 ] : ID = seq [ 0 ] . rsplit ( 'ID=' , 1 ) [ 1 ] . split ( ';' , 1 ) [ 0 ] contig = seq [ 0 ] . split ( ) [ 0 ] . split ( '>' ) [ 1 ] . rsplit ( '_%s' % ( ID ) , 1 ) [ 0 ] else : contig = seq [ 0 ] . split ( ) [ 0 ] . split ( '>' ) [ 1 ] . rsplit ( '_' , 1 ) [ 0 ] locus = seq [ 0 ] . split ( ) [ 0 ] . split ( '>' ) [ 1 ] # annotation info from Prodigal if ( '# ;gc_cont' in seq [ 0 ] or '# ID=' in seq [ 0 ] ) : info = seq [ 0 ] . split ( ' # ' ) start , end , strand = int ( info [ 1 ] ) , int ( info [ 2 ] ) , info [ 3 ] if strand == '1' : strand = 1 else : strand = - 1 product = [ '' . join ( info [ 4 ] . split ( ) [ 1 : ] ) ] # annotation info from table else : start , end , strand = annots [ locus ] product = seq [ 0 ] . split ( ' ' , 1 ) [ 1 ] info = { 'transl_table' : [ trans_table ] , 'translation' : [ seq [ 1 ] ] , 'product' : product } yield contig , [ locus , [ start , end , strand ] , info ]
parse gene call information from Prodigal fasta output
513
11
190
def parse_annotations ( annots , fmt , annot_tables , trans_table ) : annotations = { } # annotations[contig] = [features] # gbk format if fmt is False : for contig , feature in parse_gbk ( annots ) : if contig not in annotations : annotations [ contig ] = [ ] annotations [ contig ] . append ( feature ) # fasta format else : for contig , feature in parse_fasta_annotations ( annots , annot_tables , trans_table ) : if contig not in annotations : annotations [ contig ] = [ ] annotations [ contig ] . append ( feature ) return annotations
parse annotations in either gbk or Prodigal fasta format
145
14
191
def codon2aa ( codon , trans_table ) : return Seq ( '' . join ( codon ) , IUPAC . ambiguous_dna ) . translate ( table = trans_table ) [ 0 ]
convert codon to amino acid
47
7
192
def find_consensus ( bases ) : nucs = [ 'A' , 'T' , 'G' , 'C' , 'N' ] total = sum ( [ bases [ nuc ] for nuc in nucs if nuc in bases ] ) # save most common base as consensus (random nuc if there is a tie) try : top = max ( [ bases [ nuc ] for nuc in nucs if nuc in bases ] ) except : bases [ 'consensus' ] = ( 'N' , 'n/a' ) bases [ 'consensus frequency' ] = 'n/a' bases [ 'reference frequency' ] = 'n/a' return bases top = [ ( nuc , bases [ nuc ] ) for nuc in bases if bases [ nuc ] == top ] if top [ 0 ] [ 1 ] == 0 : bases [ 'consensus' ] = ( 'n/a' , 0 ) else : bases [ 'consensus' ] = random . choice ( top ) if total == 0 : c_freq = 'n/a' ref_freq = 'n/a' else : c_freq = float ( bases [ 'consensus' ] [ 1 ] ) / float ( total ) if bases [ 'ref' ] not in bases : ref_freq = 0 else : ref_freq = float ( bases [ bases [ 'ref' ] ] ) / float ( total ) bases [ 'consensus frequency' ] = c_freq bases [ 'reference frequency' ] = ref_freq return bases
find consensus base based on nucleotide frequencies
340
8
193
def print_consensus ( genomes ) : # generate consensus sequences cons = { } # cons[genome][sample][contig] = consensus for genome , contigs in list ( genomes . items ( ) ) : cons [ genome ] = { } for contig , samples in list ( contigs . items ( ) ) : for sample , stats in list ( samples . items ( ) ) : if sample not in cons [ genome ] : cons [ genome ] [ sample ] = { } seq = cons [ genome ] [ sample ] [ contig ] = [ ] for pos , ps in enumerate ( stats [ 'bp_stats' ] , 1 ) : ref , consensus = ps [ 'ref' ] , ps [ 'consensus' ] [ 0 ] if consensus == 'n/a' : consensus = ref . lower ( ) seq . append ( consensus ) # print consensus sequences for genome , samples in cons . items ( ) : for sample , contigs in samples . items ( ) : fn = '%s.%s.consensus.fa' % ( genome , sample ) f = open ( fn , 'w' ) for contig , seq in contigs . items ( ) : print ( '>%s' % ( contig ) , file = f ) print ( '' . join ( seq ) , file = f ) f . close ( ) return cons
print consensensus sequences for each genome and sample
287
10
194
def parse_cov ( cov_table , scaffold2genome ) : size = { } # size[genome] = genome size mapped = { } # mapped[genome][sample] = mapped bases # parse coverage files for line in open ( cov_table ) : line = line . strip ( ) . split ( '\t' ) if line [ 0 ] . startswith ( '#' ) : samples = line [ 1 : ] samples = [ i . rsplit ( '/' , 1 ) [ - 1 ] . split ( '.' , 1 ) [ 0 ] for i in samples ] continue scaffold , length = line [ 0 ] . split ( ': ' ) length = float ( length ) covs = [ float ( i ) for i in line [ 1 : ] ] bases = [ c * length for c in covs ] if scaffold not in scaffold2genome : continue genome = scaffold2genome [ scaffold ] if genome not in size : size [ genome ] = 0 mapped [ genome ] = { sample : 0 for sample in samples } # keep track of genome size size [ genome ] += length # keep track of number of mapped bases for sample , count in zip ( samples , bases ) : mapped [ genome ] [ sample ] += count # calculate coverage from base counts and genome size coverage = { 'genome' : [ ] , 'genome size (bp)' : [ ] , 'sample' : [ ] , 'coverage' : [ ] } for genome , length in size . items ( ) : for sample in samples : cov = mapped [ genome ] [ sample ] / length coverage [ 'genome' ] . append ( genome ) coverage [ 'genome size (bp)' ] . append ( length ) coverage [ 'sample' ] . append ( sample ) coverage [ 'coverage' ] . append ( cov ) return pd . DataFrame ( coverage )
calculate genome coverage from scaffold coverage table
403
10
195
def genome_coverage ( covs , s2b ) : COV = [ ] for cov in covs : COV . append ( parse_cov ( cov , s2b ) ) return pd . concat ( COV )
calculate genome coverage from scaffold coverage
52
9
196
def parse_s2bs ( s2bs ) : s2b = { } for s in s2bs : for line in open ( s ) : line = line . strip ( ) . split ( '\t' ) s , b = line [ 0 ] , line [ 1 ] s2b [ s ] = b return s2b
convert s2b files to dictionary
73
8
197
def fa2s2b ( fastas ) : s2b = { } for fa in fastas : for seq in parse_fasta ( fa ) : s = seq [ 0 ] . split ( '>' , 1 ) [ 1 ] . split ( ) [ 0 ] s2b [ s ] = fa . rsplit ( '/' , 1 ) [ - 1 ] . rsplit ( '.' , 1 ) [ 0 ] return s2b
convert fastas to s2b dictionary
96
9
198
def filter_ambiguity ( records , percent = 0.5 ) : # , repeats=6) seqs = [ ] # Ns = ''.join(['N' for _ in range(repeats)]) count = 0 for record in records : if record . seq . count ( 'N' ) / float ( len ( record ) ) < percent : # pos = record.seq.find(Ns) # if pos >= 0: # record.seq = Seq(str(record.seq)[:pos]) seqs . append ( record ) count += 1 return seqs , count
Filters out sequences with too much ambiguity as defined by the method parameters .
126
15
199
def package_existent ( name ) : try : response = requests . get ( PYPI_URL . format ( name ) ) if response . ok : msg = ( '[error] "{0}" is registered already in PyPI.\n' '\tSpecify another package name.' ) . format ( name ) raise Conflict ( msg ) except ( socket . gaierror , Timeout , ConnectionError , HTTPError ) as exc : raise BackendFailure ( exc )
Search package .
98
3