idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
---|---|---|
200 | def append_index_id ( id , ids ) : index = 1 mod = '%s_%s' % ( id , index ) while mod in ids : index += 1 mod = '%s_%s' % ( id , index ) ids . append ( mod ) return mod , ids | add index to id to make it unique wrt ids |
201 | def de_rep ( fastas , append_index , return_original = False ) : ids = [ ] for fasta in fastas : for seq in parse_fasta ( fasta ) : header = seq [ 0 ] . split ( '>' ) [ 1 ] . split ( ) id = header [ 0 ] if id not in ids : ids . append ( id ) if return_original is True : yield [ header , seq ] else : yield seq elif append_index == True : new , ids = append_index_id ( id , ids ) if return_original is True : yield [ header , [ '>%s %s' % ( new , ' ' . join ( header [ 1 : : ] ) ) , seq [ 1 ] ] ] else : yield [ '>%s %s' % ( new , ' ' . join ( header [ 1 : : ] ) ) , seq [ 1 ] ] | de - replicate fastas based on sequence names |
202 | def get ( postcode ) : postcode = quote ( postcode . replace ( ' ' , '' ) ) url = '%s/postcode/%s.json' % ( END_POINT , postcode ) return _get_json_resp ( url ) | Request data associated with postcode . |
203 | def get_from_postcode ( postcode , distance ) : postcode = quote ( postcode . replace ( ' ' , '' ) ) return _get_from ( distance , 'postcode=%s' % postcode ) | Request all postcode data within distance miles of postcode . |
204 | def _check_point ( self , lat , lng ) : if abs ( lat ) > 90 or abs ( lng ) > 180 : msg = "Illegal lat and/or lng, (%s, %s) provided." % ( lat , lng ) raise IllegalPointException ( msg ) | Checks if latitude and longitude correct |
205 | def _lookup ( self , skip_cache , fun , * args , ** kwargs ) : if args not in self . cache or skip_cache : self . cache [ args ] = fun ( * args , ** kwargs ) return self . cache [ args ] | Checks for cached responses before requesting from web - service |
206 | def get_nearest ( self , lat , lng , skip_cache = False ) : lat , lng = float ( lat ) , float ( lng ) self . _check_point ( lat , lng ) return self . _lookup ( skip_cache , get_nearest , lat , lng ) | Calls postcodes . get_nearest but checks correctness of lat and long and by default utilises a local cache . |
207 | def get_from_postcode ( self , postcode , distance , skip_cache = False ) : distance = float ( distance ) if distance < 0 : raise IllegalDistanceException ( "Distance must not be negative" ) postcode = postcode . lower ( ) . replace ( ' ' , '' ) return self . _lookup ( skip_cache , get_from_postcode , postcode , float ( distance ) ) | Calls postcodes . get_from_postcode but checks correctness of distance and by default utilises a local cache . |
208 | def get_from_geo ( self , lat , lng , distance , skip_cache = False ) : lat , lng , distance = float ( lat ) , float ( lng ) , float ( distance ) if distance < 0 : raise IllegalDistanceException ( "Distance must not be negative" ) self . _check_point ( lat , lng ) return self . _lookup ( skip_cache , get_from_geo , lat , lng , distance ) | Calls postcodes . get_from_geo but checks the correctness of all arguments and by default utilises a local cache . |
209 | def insertions_from_masked ( seq ) : insertions = [ ] prev = True for i , base in enumerate ( seq ) : if base . isupper ( ) and prev is True : insertions . append ( [ ] ) prev = False elif base . islower ( ) : insertions [ - 1 ] . append ( i ) prev = True return [ [ min ( i ) , max ( i ) ] for i in insertions if i != [ ] ] | get coordinates of insertions from insertion - masked sequence |
210 | def seq_info ( names , id2names , insertions , sequences ) : seqs = { } for name in names : id = id2names [ name ] gene = name . split ( 'fromHMM::' , 1 ) [ 0 ] . rsplit ( ' ' , 1 ) [ 1 ] model = name . split ( 'fromHMM::' , 1 ) [ 1 ] . split ( '=' , 1 ) [ 1 ] . split ( ) [ 0 ] i_gene_pos = insertions [ id ] i_model_pos = name . split ( 'fromHMM::' , 1 ) [ 1 ] . split ( 'model-pos(ins-len)=' ) [ 1 ] . split ( ) [ 0 ] . split ( ';' ) i_info = [ ] for i , ins in enumerate ( i_gene_pos ) : model_pos = i_model_pos [ i ] . split ( '-' ) [ 1 ] . split ( '(' ) [ 0 ] length = i_model_pos [ i ] . split ( '(' ) [ 1 ] . split ( ')' ) [ 0 ] iheader = '>%s_%s insertion::seq=%s type=insertion strand=n/a gene-pos=%s-%s model-pos=%s' % ( id , ( i + 1 ) , ( i + 1 ) , ins [ 0 ] , ins [ 1 ] , model_pos ) iseq = sequences [ id ] [ 1 ] [ ins [ 0 ] : ( ins [ 1 ] + 1 ) ] iseq = [ iheader , iseq ] info = [ ins , model_pos , length , iseq , [ ] , [ ] ] i_info . append ( info ) seqs [ id ] = [ gene , model , i_info ] return seqs | get insertion information from header |
211 | def check_overlap ( pos , ins , thresh ) : ins_pos = ins [ 0 ] ins_len = ins [ 2 ] ol = overlap ( ins_pos , pos ) feat_len = pos [ 1 ] - pos [ 0 ] + 1 if float ( ol ) / float ( feat_len ) >= thresh : return True return False | make sure thresh % feature is contained within insertion |
212 | def max_insertion ( seqs , gene , domain ) : seqs = [ i [ 2 ] for i in list ( seqs . values ( ) ) if i [ 2 ] != [ ] and i [ 0 ] == gene and i [ 1 ] == domain ] lengths = [ ] for seq in seqs : for ins in seq : lengths . append ( int ( ins [ 2 ] ) ) if lengths == [ ] : return 100 return max ( lengths ) | length of largest insertion |
213 | def model_length ( gene , domain ) : if gene == '16S' : domain2max = { 'E_coli_K12' : int ( 1538 ) , 'bacteria' : int ( 1689 ) , 'archaea' : int ( 1563 ) , 'eukarya' : int ( 2652 ) } return domain2max [ domain ] elif gene == '23S' : domain2max = { 'E_coli_K12' : int ( 2903 ) , 'bacteria' : int ( 3146 ) , 'archaea' : int ( 3774 ) , 'eukarya' : int ( 9079 ) } return domain2max [ domain ] else : print ( sys . stderr , '# length unknown for gene: %s, domain: %s' % ( gene , domain ) ) exit ( ) | get length of model |
214 | def setup_markers ( seqs ) : family2marker = { } markers = cycle ( [ '^' , 'p' , '*' , '+' , 'x' , 'd' , '|' , 'v' , '>' , '<' , '8' ] ) size = 60 families = [ ] for seq in list ( seqs . values ( ) ) : for insertion in seq [ 2 ] : for family in list ( insertion [ - 1 ] . values ( ) ) : if family not in families : families . append ( family ) for family in families : marker = next ( markers ) if marker == '^' : size = size * 0.5 family2marker [ family ] = [ marker , size ] return family2marker | setup unique marker for every orf annotation - change size if necessary |
215 | def plot_by_gene_and_domain ( name , seqs , tax , id2name ) : for gene in set ( [ seq [ 0 ] for seq in list ( seqs . values ( ) ) ] ) : for domain in set ( [ seq [ 1 ] for seq in list ( seqs . values ( ) ) ] ) : plot_insertions ( name , seqs , gene , domain , tax , id2name ) | plot insertions for each gene and domain |
216 | def get_descriptions ( fastas ) : id2desc = { } for fasta in fastas : for seq in parse_fasta ( fasta ) : header = seq [ 0 ] . split ( '>' ) [ 1 ] . split ( ' ' ) id = header [ 0 ] if len ( header ) > 1 : desc = ' ' . join ( header [ 1 : ] ) else : desc = 'n/a' length = float ( len ( [ i for i in seq [ 1 ] . strip ( ) if i != '*' ] ) ) id2desc [ id ] = [ fasta , desc , length ] return id2desc | get the description for each ORF |
217 | def print_genome_matrix ( hits , fastas , id2desc , file_name ) : out = open ( file_name , 'w' ) fastas = sorted ( fastas ) print ( '## percent identity between genomes' , file = out ) print ( '# - \t %s' % ( '\t' . join ( fastas ) ) , file = out ) for fasta in fastas : line = [ fasta ] for other in fastas : if other == fasta : average = '-' else : average = numpy . average ( [ hits [ fasta ] [ other ] [ i ] [ 3 ] for i in hits [ fasta ] [ other ] ] ) line . append ( str ( average ) ) print ( '\t' . join ( line ) , file = out ) print ( '' , file = out ) print ( '## percent of orfs that are orthologous between genomes' , file = out ) print ( '# - \t %s' % ( '\t' . join ( fastas ) ) , file = out ) for fasta in fastas : line = [ fasta ] for other in fastas : if other == fasta : percent = '-' else : orthologs = float ( len ( hits [ fasta ] [ other ] ) ) orfs = float ( len ( [ i for i in id2desc if id2desc [ i ] [ 0 ] == fasta ] ) ) percent = float ( orthologs / orfs ) * 100 line . append ( str ( percent ) ) print ( '\t' . join ( line ) , file = out ) | optimize later? slow ... should combine with calculate_threshold module |
218 | def self_compare ( fastas , id2desc , algorithm ) : for fasta in fastas : blast = open ( search ( fasta , fasta , method = algorithm , alignment = 'local' ) ) for hit in best_blast ( blast , 1 ) : id , bit = hit [ 0 ] . split ( ) [ 0 ] , float ( hit [ - 1 ] ) id2desc [ id ] . append ( bit ) return id2desc | compare genome to self to get the best possible bit score for each ORF |
219 | def calc_thresholds ( rbh , file_name , thresholds = [ False , False , False , False ] , stdevs = 2 ) : calc_threshold = thresholds [ - 1 ] norm_threshold = { } for pair in itertools . permutations ( [ i for i in rbh ] , 2 ) : if pair [ 0 ] not in norm_threshold : norm_threshold [ pair [ 0 ] ] = { } norm_threshold [ pair [ 0 ] ] [ pair [ 1 ] ] = { } out = open ( file_name , 'w' ) print ( '#### summary of rbh comparisons\n' , file = out ) comparisons = [ ] for genome in rbh : for compare in rbh [ genome ] : pair = '' . join ( sorted ( [ genome , compare ] ) ) if pair in comparisons : continue comparisons . append ( pair ) scores = { 'percent identity' : [ ] , 'e-value' : [ ] , 'bit score' : [ ] , 'normalized bit score' : [ ] , 'alignment length fraction' : [ ] } print ( '### blast between %s and %s\n' % ( genome , compare ) , file = out ) for id in rbh [ genome ] [ compare ] : pident , length_fraction , e , bit , norm_bit = rbh [ genome ] [ compare ] [ id ] [ 3 : ] scores [ 'percent identity' ] . append ( pident ) scores [ 'alignment length fraction' ] . append ( length_fraction ) scores [ 'e-value' ] . append ( e ) scores [ 'bit score' ] . append ( bit ) scores [ 'normalized bit score' ] . append ( norm_bit ) if calc_threshold is True : norms = scores [ 'normalized bit score' ] average = numpy . average ( norms ) std = numpy . std ( norms ) normal_thresh = average - ( std * stdevs ) print ( '## average normalized bit score: %s' % average , file = out ) print ( '## standard deviation of normalized bit scores: %s' % std , file = out ) print ( '## normalized bit score threshold set to: %s\n' % ( normal_thresh ) , file = out ) norm_threshold [ genome ] [ compare ] , norm_threshold [ compare ] [ genome ] = normal_thresh , normal_thresh for score in scores : print ( '## %s' % ( score ) , file = out ) if len ( scores [ score ] ) > 0 : print ( '## average: %s' % numpy . average ( scores [ score ] ) , file = out ) print ( '' , file = out ) out . close ( ) if calc_threshold is True : return thresholds [ 0 : - 1 ] + [ norm_threshold ] else : return thresholds | if thresholds are not specififed calculate based on the distribution of normalized bit scores |
220 | def neto ( fastas , algorithm = 'usearch' , e = 0.01 , bit = 40 , length = .65 , norm_bit = False ) : thresholds = [ e , bit , length , norm_bit ] id2desc = get_descriptions ( fastas ) id2desc = self_compare ( fastas , id2desc , algorithm ) hits = compare_genomes ( fastas , id2desc , algorithm ) calc_thresholds ( hits , file_name = 'fbh.scores.summary.txt' ) rbh_network ( id2desc , hits , file_name = 'fbh.network.edges.txt' ) hits , rbh = find_rbh ( hits , id2desc ) thresholds = calc_thresholds ( rbh , 'rbh.scores.summary.txt' , thresholds ) g = rbh_network ( id2desc , rbh , file_name = 'rbh.network.edges.txt' ) filtered_g , filtered_rbh = rbh_network ( id2desc , rbh , 'rbh.filtered.network.edges.txt' , thresholds ) calc_thresholds ( filtered_rbh , file_name = 'rbh.filtered.scores.summary.txt' ) print_summary ( filtered_g , fastas , id2desc , file_name = 'rbh.filtered.network.nodes.txt' ) print_network_matrix ( filtered_g , fastas , id2desc , file_name = 'rbh.filtered.network.matrix.txt' ) print_genome_matrix ( filtered_rbh , fastas , id2desc , file_name = 'rbh.filtered.network.genome_matrix.txt' ) split_g = split_network ( filtered_g , id2desc , file_name = 'rbh.filtered.split.network.edges.txt' ) print_summary ( split_g , fastas , id2desc , file_name = 'rbh.filtered.split.network.nodes.txt' ) print_network_matrix ( split_g , fastas , id2desc , file_name = 'rbh.filtered.split.network.matrix.txt' ) return split_g | make and split a rbh network |
221 | def _parse_raster_info ( self , prop = RASTER_INFO ) : raster_info = { } . fromkeys ( _iso_definitions [ prop ] , u'' ) raster_info [ 'dimensions' ] = get_default_for_complex_sub ( prop = prop , subprop = 'dimensions' , value = parse_property ( self . _xml_tree , None , self . _data_map , '_ri_num_dims' ) , xpath = self . _data_map [ '_ri_num_dims' ] ) xpath_root = self . _get_xroot_for ( prop ) xpath_map = self . _data_structures [ prop ] for dimension in parse_complex_list ( self . _xml_tree , xpath_root , xpath_map , RASTER_DIMS ) : dimension_type = dimension [ 'type' ] . lower ( ) if dimension_type == 'vertical' : raster_info [ 'vertical_count' ] = dimension [ 'size' ] elif dimension_type == 'column' : raster_info [ 'column_count' ] = dimension [ 'size' ] raster_info [ 'x_resolution' ] = u' ' . join ( dimension [ k ] for k in [ 'value' , 'units' ] ) . strip ( ) elif dimension_type == 'row' : raster_info [ 'row_count' ] = dimension [ 'size' ] raster_info [ 'y_resolution' ] = u' ' . join ( dimension [ k ] for k in [ 'value' , 'units' ] ) . strip ( ) return raster_info if any ( raster_info [ k ] for k in raster_info ) else { } | Collapses multiple dimensions into a single raster_info complex struct |
222 | def _update_raster_info ( self , ** update_props ) : tree_to_update = update_props [ 'tree_to_update' ] prop = update_props [ 'prop' ] values = update_props . pop ( 'values' ) xroot , xpath = None , self . _data_map [ '_ri_num_dims' ] raster_info = [ update_property ( tree_to_update , xroot , xpath , prop , values . get ( 'dimensions' , u'' ) ) ] xpath_root = self . _get_xroot_for ( prop ) xpath_map = self . _data_structures [ prop ] v_dimension = { } if values . get ( 'vertical_count' ) : v_dimension = v_dimension . fromkeys ( xpath_map , u'' ) v_dimension [ 'type' ] = 'vertical' v_dimension [ 'size' ] = values . get ( 'vertical_count' , u'' ) x_dimension = { } if values . get ( 'column_count' ) or values . get ( 'x_resolution' ) : x_dimension = x_dimension . fromkeys ( xpath_map , u'' ) x_dimension [ 'type' ] = 'column' x_dimension [ 'size' ] = values . get ( 'column_count' , u'' ) x_dimension [ 'value' ] = values . get ( 'x_resolution' , u'' ) y_dimension = { } if values . get ( 'row_count' ) or values . get ( 'y_resolution' ) : y_dimension = y_dimension . fromkeys ( xpath_map , u'' ) y_dimension [ 'type' ] = 'row' y_dimension [ 'size' ] = values . get ( 'row_count' , u'' ) y_dimension [ 'value' ] = values . get ( 'y_resolution' , u'' ) update_props [ 'prop' ] = RASTER_DIMS update_props [ 'values' ] = [ v_dimension , x_dimension , y_dimension ] raster_info += update_complex_list ( xpath_root = xpath_root , xpath_map = xpath_map , ** update_props ) return raster_info | Derives multiple dimensions from a single raster_info complex struct |
223 | def _trim_xpath ( self , xpath , prop ) : xroot = self . _get_xroot_for ( prop ) if xroot is None and isinstance ( xpath , string_types ) : xtags = xpath . split ( XPATH_DELIM ) if xtags [ - 1 ] in _iso_tag_primitives : xroot = XPATH_DELIM . join ( xtags [ : - 1 ] ) return xroot | Removes primitive type tags from an XPATH |
224 | def shortcut_app_id ( shortcut ) : algorithm = Crc ( width = 32 , poly = 0x04C11DB7 , reflect_in = True , xor_in = 0xffffffff , reflect_out = True , xor_out = 0xffffffff ) crc_input = '' . join ( [ shortcut . exe , shortcut . name ] ) high_32 = algorithm . bit_by_bit ( crc_input ) | 0x80000000 full_64 = ( high_32 << 32 ) | 0x02000000 return str ( full_64 ) | Generates the app id for a given shortcut . Steam uses app ids as a unique identifier for games but since shortcuts dont have a canonical serverside representation they need to be generated on the fly . The important part about this function is that it will generate the same app id as Steam does for a given shortcut |
225 | def _config ( self ) : cfg_wr = self . repo . config_writer ( ) cfg_wr . add_section ( 'user' ) cfg_wr . set_value ( 'user' , 'name' , self . metadata . author ) cfg_wr . set_value ( 'user' , 'email' , self . metadata . email ) cfg_wr . release ( ) | Execute git config . |
226 | def _remote_add ( self ) : self . repo . create_remote ( 'origin' , 'git@github.com:{username}/{repo}.git' . format ( username = self . metadata . username , repo = self . metadata . name ) ) | Execute git remote add . |
227 | def start ( self ) : try : self . args . func ( ) except SystemExit as e : if e . code != 0 : raise except KeyboardInterrupt : self . log . warning ( "exited via keyboard interrupt" ) except : self . log . exception ( "exited start function" ) finally : self . _flush_metrics_q . put ( None , block = True ) self . _flush_metrics_q . put ( None , block = True , timeout = 1 ) self . log . debug ( "exited_successfully" ) | Starts execution of the script |
228 | def define_baseargs ( self , parser ) : parser . add_argument ( '--name' , default = sys . argv [ 0 ] , help = 'Name to identify this instance' ) parser . add_argument ( '--log-level' , default = None , help = 'Logging level as picked from the logging module' ) parser . add_argument ( '--log-format' , default = None , choices = ( "json" , "pretty" , ) , help = ( "Force the format of the logs. By default, if the " "command is from a terminal, print colorful logs. " "Otherwise print json." ) , ) parser . add_argument ( '--log-file' , default = None , help = 'Writes logs to log file if specified, default: %(default)s' , ) parser . add_argument ( '--quiet' , default = False , action = "store_true" , help = 'if true, does not print logs to stderr, default: %(default)s' , ) parser . add_argument ( '--metric-grouping-interval' , default = None , type = int , help = 'To group metrics based on time interval ex:10 i.e;(10 sec)' , ) parser . add_argument ( '--debug' , default = False , action = "store_true" , help = 'To run the code in debug mode' , ) | Define basic command - line arguments required by the script . |
229 | def cleanup_payload ( self , payload ) : p = payload . replace ( '\n' , '' ) p = p . rstrip ( ) p = p . lstrip ( ) return p | Basically turns payload that looks like \\ n to . In the calling function if this function returns no object is added for that payload . |
230 | def get_default_for ( prop , value ) : prop = prop . strip ( '_' ) val = reduce_value ( value ) if prop in _COMPLEX_LISTS : return wrap_value ( val ) elif prop in _COMPLEX_STRUCTS : return val or { } else : return u'' if val is None else val | Ensures complex property types have the correct default values |
231 | def update_property ( tree_to_update , xpath_root , xpaths , prop , values , supported = None ) : if supported and prop . startswith ( '_' ) and prop . strip ( '_' ) in supported : values = u'' else : values = get_default_for ( prop , values ) if not xpaths : return [ ] elif not isinstance ( xpaths , ParserProperty ) : return _update_property ( tree_to_update , xpath_root , xpaths , values ) else : return xpaths . set_prop ( tree_to_update = tree_to_update , prop = prop , values = values ) | Either update the tree the default way or call the custom updater |
232 | def _update_property ( tree_to_update , xpath_root , xpaths , values ) : def update_element ( elem , idx , root , path , vals ) : has_root = bool ( root and len ( path ) > len ( root ) and path . startswith ( root ) ) path , attr = get_xpath_tuple ( path ) if attr : removed = [ get_element ( elem , path ) ] remove_element_attributes ( removed [ 0 ] , attr ) elif not has_root : removed = wrap_value ( remove_element ( elem , path ) ) else : path = get_xpath_branch ( root , path ) removed = [ ] if idx != 0 else [ remove_element ( e , path , True ) for e in get_elements ( elem , root ) ] if not vals : return removed items = [ ] for i , val in enumerate ( wrap_value ( vals ) ) : elem_to_update = elem if has_root : elem_to_update = insert_element ( elem , ( i + idx ) , root ) val = val . decode ( 'utf-8' ) if not isinstance ( val , string_types ) else val if not attr : items . append ( insert_element ( elem_to_update , i , path , val ) ) elif path : items . append ( insert_element ( elem_to_update , i , path , ** { attr : val } ) ) else : set_element_attributes ( elem_to_update , ** { attr : val } ) items . append ( elem_to_update ) return items xpaths = reduce_value ( xpaths ) values = filter_empty ( values ) if isinstance ( xpaths , string_types ) : return update_element ( tree_to_update , 0 , xpath_root , xpaths , values ) else : each = [ ] for index , xpath in enumerate ( xpaths ) : value = values [ index ] if values else None each . extend ( update_element ( tree_to_update , index , xpath_root , xpath , value ) ) return each | Default update operation for a single parser property . If xpaths contains one xpath then one element per value will be inserted at that location in the tree_to_update ; otherwise the number of values must match the number of xpaths . |
233 | def validate_complex ( prop , value , xpath_map = None ) : if value is not None : validate_type ( prop , value , dict ) if prop in _complex_definitions : complex_keys = _complex_definitions [ prop ] else : complex_keys = { } if xpath_map is None else xpath_map for complex_prop , complex_val in iteritems ( value ) : complex_key = '.' . join ( ( prop , complex_prop ) ) if complex_prop not in complex_keys : _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( complex_keys ) ) ) ) validate_type ( complex_key , complex_val , ( string_types , list ) ) | Default validation for single complex data structure |
234 | def validate_complex_list ( prop , value , xpath_map = None ) : if value is not None : validate_type ( prop , value , ( dict , list ) ) if prop in _complex_definitions : complex_keys = _complex_definitions [ prop ] else : complex_keys = { } if xpath_map is None else xpath_map for idx , complex_struct in enumerate ( wrap_value ( value ) ) : cs_idx = prop + '[' + str ( idx ) + ']' validate_type ( cs_idx , complex_struct , dict ) for cs_prop , cs_val in iteritems ( complex_struct ) : cs_key = '.' . join ( ( cs_idx , cs_prop ) ) if cs_prop not in complex_keys : _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( complex_keys ) ) ) ) if not isinstance ( cs_val , list ) : validate_type ( cs_key , cs_val , ( string_types , list ) ) else : for list_idx , list_val in enumerate ( cs_val ) : list_prop = cs_key + '[' + str ( list_idx ) + ']' validate_type ( list_prop , list_val , string_types ) | Default validation for Attribute Details data structure |
235 | def validate_dates ( prop , value , xpath_map = None ) : if value is not None : validate_type ( prop , value , dict ) date_keys = set ( value ) if date_keys : if DATE_TYPE not in date_keys or DATE_VALUES not in date_keys : if prop in _complex_definitions : complex_keys = _complex_definitions [ prop ] else : complex_keys = _complex_definitions [ DATES ] if xpath_map is None else xpath_map _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( complex_keys ) ) ) ) date_type = value [ DATE_TYPE ] if date_type not in DATE_TYPES : _validation_error ( 'dates.type' , None , date_type , DATE_TYPES ) date_vals = value [ DATE_VALUES ] validate_type ( 'dates.values' , date_vals , list ) dates_len = len ( date_vals ) if date_type == DATE_TYPE_MISSING and dates_len != 0 : _validation_error ( 'len(dates.values)' , None , dates_len , 0 ) if date_type == DATE_TYPE_SINGLE and dates_len != 1 : _validation_error ( 'len(dates.values)' , None , dates_len , 1 ) if date_type == DATE_TYPE_RANGE and dates_len != 2 : _validation_error ( 'len(dates.values)' , None , dates_len , 2 ) if date_type == DATE_TYPE_MULTIPLE and dates_len < 2 : _validation_error ( 'len(dates.values)' , None , dates_len , 'at least two' ) for idx , date in enumerate ( date_vals ) : date_key = 'dates.value[' + str ( idx ) + ']' validate_type ( date_key , date , string_types ) | Default validation for Date Types data structure |
236 | def validate_process_steps ( prop , value ) : if value is not None : validate_type ( prop , value , ( dict , list ) ) procstep_keys = set ( _complex_definitions [ prop ] ) for idx , procstep in enumerate ( wrap_value ( value ) ) : ps_idx = prop + '[' + str ( idx ) + ']' validate_type ( ps_idx , procstep , dict ) for ps_prop , ps_val in iteritems ( procstep ) : ps_key = '.' . join ( ( ps_idx , ps_prop ) ) if ps_prop not in procstep_keys : _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( procstep_keys ) ) ) ) if ps_prop != 'sources' : validate_type ( ps_key , ps_val , string_types ) else : validate_type ( ps_key , ps_val , ( string_types , list ) ) for src_idx , src_val in enumerate ( wrap_value ( ps_val ) ) : src_key = ps_key + '[' + str ( src_idx ) + ']' validate_type ( src_key , src_val , string_types ) | Default validation for Process Steps data structure |
237 | def validate_type ( prop , value , expected ) : if value is not None and not isinstance ( value , expected ) : _validation_error ( prop , type ( value ) . __name__ , None , expected ) | Default validation for all types |
238 | def _validation_error ( prop , prop_type , prop_value , expected ) : if prop_type is None : attrib = 'value' assigned = prop_value else : attrib = 'type' assigned = prop_type raise ValidationError ( 'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}' , attrib = attrib , prop = prop , assigned = assigned , expected = expected , invalid = { prop : prop_value } if attrib == 'value' else { } ) | Default validation for updated properties |
239 | def get_prop ( self , prop ) : if self . _parser is None : raise ConfigurationError ( 'Cannot call ParserProperty."get_prop" with no parser configured' ) return self . _parser ( prop ) if prop else self . _parser ( ) | Calls the getter with no arguments and returns its value |
240 | def can_group_commands ( command , next_command ) : multi_capable_commands = ( 'get' , 'set' , 'delete' ) if next_command is None : return False name = command . get_name ( ) if name not in multi_capable_commands : return False if name != next_command . get_name ( ) : return False if grouped_args_for_command ( command ) != grouped_args_for_command ( next_command ) : return False if command . get_kwargs ( ) != next_command . get_kwargs ( ) : return False return True | Returns a boolean representing whether these commands can be grouped together or not . |
241 | def find_databases ( databases ) : proteins = [ 'L15' , 'L18' , 'L6' , 'S8' , 'L5' , 'L24' , 'L14' , 'S17' , 'L16' , 'S3' , 'L22' , 'S19' , 'L2' , 'L4' , 'L3' , 'S10' ] protein_databases = { 'L14' : 'rpL14_JGI_MDM.filtered.faa' , 'L15' : 'rpL15_JGI_MDM.filtered.faa' , 'L16' : 'rpL16_JGI_MDM.filtered.faa' , 'L18' : 'rpL18_JGI_MDM.filtered.faa' , 'L22' : 'rpL22_JGI_MDM.filtered.faa' , 'L24' : 'rpL24_JGI_MDM.filtered.faa' , 'L2' : 'rpL2_JGI_MDM.filtered.faa' , 'L3' : 'rpL3_JGI_MDM.filtered.faa' , 'L4' : 'rpL4_JGI_MDM.filtered.faa' , 'L5' : 'rpL5_JGI_MDM.filtered.faa' , 'L6' : 'rpL6_JGI_MDM.filtered.faa' , 'S10' : 'rpS10_JGI_MDM.filtered.faa' , 'S17' : 'rpS17_JGI_MDM.filtered.faa' , 'S19' : 'rpS19_JGI_MDM.filtered.faa' , 'S3' : 'rpS3_JGI_MDM.filtered.faa' , 'S8' : 'rpS8_JGI_MDM.filtered.faa' } protein_databases = { key : '%s/%s' % ( databases , database ) for key , database in list ( protein_databases . items ( ) ) } return proteins , protein_databases | define ribosomal proteins and location of curated databases |
242 | def find_next ( start , stop , i2hits ) : if start not in i2hits and stop in i2hits : index = stop elif stop not in i2hits and start in i2hits : index = start elif start not in i2hits and stop not in i2hits : index = choice ( [ start , stop ] ) i2hits [ index ] = [ [ False ] ] else : A , B = i2hits [ start ] [ 0 ] , i2hits [ stop ] [ 0 ] if B [ 10 ] <= A [ 10 ] : index = stop else : index = start if index == start : nstart = start - 1 nstop = stop else : nstop = stop + 1 nstart = start match = i2hits [ index ] [ 0 ] rp = match [ - 1 ] return index , nstart , nstop , rp , match | which protein has the best hit the one to the right or to the left? |
243 | def find_ribosomal ( rps , scaffolds , s2rp , min_hits , max_hits_rp , max_errors ) : for scaffold , proteins in list ( s2rp . items ( ) ) : hits = { p : [ i for i in sorted ( hits , key = itemgetter ( 10 ) ) ] [ 0 : max_hits_rp ] for p , hits in list ( proteins . items ( ) ) if len ( hits ) > 0 } if len ( hits ) < min_hits : continue best = sorted ( [ hit [ 0 ] + [ p ] for p , hit in list ( hits . items ( ) ) ] , key = itemgetter ( 10 ) ) [ 0 ] block = find_block ( rps , scaffolds [ scaffold ] , hits , best , max_errors ) if ( len ( block ) - 1 ) >= min_hits : yield scaffold , block | determine which hits represent real ribosomal proteins identify each in syntenic block max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold |
244 | def filter_rep_set ( inF , otuSet ) : seqs = [ ] for record in SeqIO . parse ( inF , "fasta" ) : if record . id in otuSet : seqs . append ( record ) return seqs | Parse the rep set file and remove all sequences not associated with unique OTUs . |
245 | def _update_report_item ( self , ** update_props ) : tree_to_update = update_props [ 'tree_to_update' ] prop = update_props [ 'prop' ] values = wrap_value ( update_props [ 'values' ] ) xroot = self . _get_xroot_for ( prop ) attr_key = 'type' attr_val = u'' if prop == 'attribute_accuracy' : attr_val = 'DQQuanAttAcc' elif prop == 'dataset_completeness' : attr_val = 'DQCompOm' for elem in get_elements ( tree_to_update , xroot ) : if get_element_attributes ( elem ) . get ( attr_key ) == attr_val : clear_element ( elem ) remove_empty_element ( tree_to_update , xroot ) attrs = { attr_key : attr_val } updated = [ ] for idx , value in enumerate ( values ) : elem = insert_element ( tree_to_update , idx , xroot , ** attrs ) updated . append ( insert_element ( elem , idx , 'measDesc' , value ) ) return updated | Update the text for each element at the configured path if attribute matches |
246 | def _clear_interrupt ( self , intbit ) : int_status = self . _device . readU8 ( VCNL4010_INTSTAT ) int_status &= ~ intbit self . _device . write8 ( VCNL4010_INTSTAT , int_status ) | Clear the specified interrupt bit in the interrupt status register . |
247 | def move ( self ) : a = random . randint ( 0 , len ( self . state ) - 1 ) b = random . randint ( 0 , len ( self . state ) - 1 ) self . state [ [ a , b ] ] = self . state [ [ b , a ] ] | Swaps two nodes |
248 | def self_signed ( self , value ) : self . _self_signed = bool ( value ) if self . _self_signed : self . _issuer = None | A bool - if the certificate should be self - signed . |
249 | def _get_crl_url ( self , distribution_points ) : if distribution_points is None : return None for distribution_point in distribution_points : name = distribution_point [ 'distribution_point' ] if name . name == 'full_name' and name . chosen [ 0 ] . name == 'uniform_resource_identifier' : return name . chosen [ 0 ] . chosen . native return None | Grabs the first URL out of a asn1crypto . x509 . CRLDistributionPoints object |
250 | def ocsp_no_check ( self , value ) : if value is None : self . _ocsp_no_check = None else : self . _ocsp_no_check = bool ( value ) | A bool - if the certificate should have the OCSP no check extension . Only applicable to certificates created for signing OCSP responses . Such certificates should normally be issued for a very short period of time since they are effectively whitelisted by clients . |
251 | def emptylineless ( parser , token ) : nodelist = parser . parse ( ( 'endemptylineless' , ) ) parser . delete_first_token ( ) return EmptylinelessNode ( nodelist ) | Removes empty line . |
252 | def http_purge_url ( url ) : url = urlparse ( url ) connection = HTTPConnection ( url . hostname , url . port or 80 ) path = url . path or '/' connection . request ( 'PURGE' , '%s?%s' % ( path , url . query ) if url . query else path , '' , { 'Host' : '%s:%s' % ( url . hostname , url . port ) if url . port else url . hostname } ) response = connection . getresponse ( ) if response . status != 200 : logging . error ( 'Purge failed with status: %s' % response . status ) return response | Do an HTTP PURGE of the given asset . The URL is run through urlparse and must point to the varnish instance not the varnishadm |
253 | def run ( addr , * commands , ** kwargs ) : results = [ ] handler = VarnishHandler ( addr , ** kwargs ) for cmd in commands : if isinstance ( cmd , tuple ) and len ( cmd ) > 1 : results . extend ( [ getattr ( handler , c [ 0 ] . replace ( '.' , '_' ) ) ( * c [ 1 : ] ) for c in cmd ] ) else : results . append ( getattr ( handler , cmd . replace ( '.' , '_' ) ) ( * commands [ 1 : ] ) ) break handler . close ( ) return results | Non - threaded batch command runner returning output results |
254 | def add_stylesheets ( self , * css_files ) : for css_file in css_files : self . main_soup . style . append ( self . _text_file ( css_file ) ) | add stylesheet files in HTML head |
255 | def add_javascripts ( self , * js_files ) : if self . main_soup . script is None : script_tag = self . main_soup . new_tag ( 'script' ) self . main_soup . body . append ( script_tag ) for js_file in js_files : self . main_soup . script . append ( self . _text_file ( js_file ) ) | add javascripts files in HTML body |
256 | def export ( self ) : with open ( self . export_url , 'w' , encoding = 'utf-8' ) as file : file . write ( self . build ( ) ) if self . open_browser : webbrowser . open_new_tab ( self . export_url ) | return the object in a file |
257 | def build ( self ) : markdown_html = markdown . markdown ( self . markdown_text , extensions = [ TocExtension ( ) , 'fenced_code' , 'markdown_checklist.extension' , 'markdown.extensions.tables' ] ) markdown_soup = BeautifulSoup ( markdown_html , 'html.parser' ) if markdown_soup . find ( 'code' , attrs = { 'class' : 'mermaid' } ) : self . _add_mermaid_js ( ) for dot_tag in markdown_soup . find_all ( 'code' , attrs = { 'class' : 'dotgraph' } ) : grap_svg = self . _text_to_graphiz ( dot_tag . string ) graph_soup = BeautifulSoup ( grap_svg , 'html.parser' ) dot_tag . parent . replaceWith ( graph_soup ) self . main_soup . body . append ( markdown_soup ) return self . main_soup . prettify ( ) | convert Markdown text as html . return the html file as string |
258 | def _text_file ( self , url ) : try : with open ( url , 'r' , encoding = 'utf-8' ) as file : return file . read ( ) except FileNotFoundError : print ( 'File `{}` not found' . format ( url ) ) sys . exit ( 0 ) | return the content of a file |
259 | def _text_to_graphiz ( self , text ) : dot = Source ( text , format = 'svg' ) return dot . pipe ( ) . decode ( 'utf-8' ) | create a graphviz graph from text |
260 | def _add_mermaid_js ( self ) : self . add_javascripts ( '{}/js/jquery-1.11.3.min.js' . format ( self . resources_path ) ) self . add_javascripts ( '{}/js/mermaid.min.js' . format ( self . resources_path ) ) self . add_stylesheets ( '{}/css/mermaid.css' . format ( self . resources_path ) ) self . main_soup . script . append ( 'mermaid.initialize({startOnLoad:true });' ) | add js libraries and css files of mermaid js_file |
261 | def getCharacterSet ( self ) : chars = u'' c = None cnt = 1 start = 0 while True : escaped_slash = False c = self . next ( ) if self . lookahead ( ) == u'-' and not c == u'\\' : f = c self . next ( ) c = self . next ( ) if not c or ( c in self . meta_chars ) : raise StringGenerator . SyntaxError ( u"unexpected end of class range" ) chars += self . getCharacterRange ( f , c ) elif c == u'\\' : if self . lookahead ( ) in self . meta_chars : c = self . next ( ) chars += c continue elif self . lookahead ( ) in self . string_code : c = self . next ( ) chars += self . string_code [ c ] elif c and c not in self . meta_chars : chars += c if c == u']' : if self . lookahead ( ) == u'{' : [ start , cnt ] = self . getQuantifier ( ) else : start = - 1 cnt = 1 break if c and c in self . meta_chars and not self . last ( ) == u"\\" : raise StringGenerator . SyntaxError ( u"Un-escaped character in class definition: %s" % c ) if not c : break return StringGenerator . CharacterSet ( chars , start , cnt ) | Get a character set with individual members or ranges . |
262 | def getLiteral ( self ) : chars = u'' c = self . current ( ) while True : if c and c == u"\\" : c = self . next ( ) if c : chars += c continue elif not c or ( c in self . meta_chars ) : break else : chars += c if self . lookahead ( ) and self . lookahead ( ) in self . meta_chars : break c = self . next ( ) return StringGenerator . Literal ( chars ) | Get a sequence of non - special characters . |
263 | def getSequence ( self , level = 0 ) : seq = [ ] op = '' left_operand = None right_operand = None sequence_closed = False while True : c = self . next ( ) if not c : break if c and c not in self . meta_chars : seq . append ( self . getLiteral ( ) ) elif c and c == u'$' and self . lookahead ( ) == u'{' : seq . append ( self . getSource ( ) ) elif c == u'[' and not self . last ( ) == u'\\' : seq . append ( self . getCharacterSet ( ) ) elif c == u'(' and not self . last ( ) == u'\\' : seq . append ( self . getSequence ( level + 1 ) ) elif c == u')' and not self . last ( ) == u'\\' : if level == 0 : raise StringGenerator . SyntaxError ( u"Extra closing parenthesis" ) sequence_closed = True break elif c == u'|' and not self . last ( ) == u'\\' : op = c elif c == u'&' and not self . last ( ) == u'\\' : op = c else : if c in self . meta_chars and not self . last ( ) == u"\\" : raise StringGenerator . SyntaxError ( u"Un-escaped special character: %s" % c ) if op and not left_operand : if not seq or len ( seq ) < 1 : raise StringGenerator . SyntaxError ( u"Operator: %s with no left operand" % op ) left_operand = seq . pop ( ) elif op and len ( seq ) >= 1 and left_operand : right_operand = seq . pop ( ) if op == u'|' : seq . append ( StringGenerator . SequenceOR ( [ left_operand , right_operand ] ) ) elif op == u'&' : seq . append ( StringGenerator . SequenceAND ( [ left_operand , right_operand ] ) ) op = u'' left_operand = None right_operand = None if op : raise StringGenerator . SyntaxError ( u"Operator: %s with no right operand" % op ) if level > 0 and not sequence_closed : raise StringGenerator . SyntaxError ( u"Missing closing parenthesis" ) return StringGenerator . Sequence ( seq ) | Get a sequence of nodes . |
264 | def dump ( self , ** kwargs ) : import sys if not self . seq : self . seq = self . getSequence ( ) print ( "StringGenerator version: %s" % ( __version__ ) ) print ( "Python version: %s" % sys . version ) self . seq . dump ( ) return self . render ( ** kwargs ) | Print the parse tree and then call render for an example . |
265 | def render_list ( self , cnt , unique = False , progress_callback = None , ** kwargs ) : rendered_list = [ ] i = 0 total_attempts = 0 while True : if i >= cnt : break if total_attempts > cnt * self . unique_attempts_factor : raise StringGenerator . UniquenessError ( u"couldn't satisfy uniqueness" ) s = self . render ( ** kwargs ) if unique : if not s in rendered_list : rendered_list . append ( s ) i += 1 else : rendered_list . append ( s ) i += 1 total_attempts += 1 if progress_callback and callable ( progress_callback ) : progress_callback ( i , cnt ) return rendered_list | Return a list of generated strings . |
266 | def connect ( self ) : self . conn = boto . connect_s3 ( self . AWS_ACCESS_KEY_ID , self . AWS_SECRET_ACCESS_KEY , debug = self . S3UTILS_DEBUG_LEVEL ) self . bucket = self . conn . get_bucket ( self . AWS_STORAGE_BUCKET_NAME ) self . k = Key ( self . bucket ) | Establish the connection . This is done automatically for you . |
267 | def connect_cloudfront ( self ) : "Connect to Cloud Front. This is done automatically for you when needed." self . conn_cloudfront = connect_cloudfront ( self . AWS_ACCESS_KEY_ID , self . AWS_SECRET_ACCESS_KEY , debug = self . S3UTILS_DEBUG_LEVEL ) | Connect to Cloud Front . This is done automatically for you when needed . |
268 | def mkdir ( self , target_folder ) : self . printv ( "Making directory: %s" % target_folder ) self . k . key = re . sub ( r"^/|/$" , "" , target_folder ) + "/" self . k . set_contents_from_string ( '' ) self . k . close ( ) | Create a folder on S3 . |
269 | def rm ( self , path ) : list_of_files = list ( self . ls ( path ) ) if list_of_files : if len ( list_of_files ) == 1 : self . bucket . delete_key ( list_of_files [ 0 ] ) else : self . bucket . delete_keys ( list_of_files ) self . printv ( "Deleted: %s" % list_of_files ) else : logger . error ( "There was nothing to remove under %s" , path ) | Delete the path and anything under the path . |
270 | def __put_key ( self , local_file , target_file , acl = 'public-read' , del_after_upload = False , overwrite = True , source = "filename" ) : action_word = "moving" if del_after_upload else "copying" try : self . k . key = target_file if source == "filename" : self . k . set_contents_from_filename ( local_file , self . AWS_HEADERS ) elif source == "fileobj" : self . k . set_contents_from_file ( local_file , self . AWS_HEADERS ) elif source == "string" : self . k . set_contents_from_string ( local_file , self . AWS_HEADERS ) else : raise Exception ( "%s is not implemented as a source." % source ) self . k . set_acl ( acl ) self . k . close ( ) self . printv ( "%s %s to %s" % ( action_word , local_file , target_file ) ) if del_after_upload and source == "filename" : try : os . remove ( local_file ) except : logger . error ( "Unable to delete the file: " , local_file , exc_info = True ) return True except : logger . error ( "Error in writing to %s" , target_file , exc_info = True ) return False | Copy a file to s3 . |
271 | def cp ( self , local_path , target_path , acl = 'public-read' , del_after_upload = False , overwrite = True , invalidate = False ) : result = None if overwrite : list_of_files = [ ] else : list_of_files = self . ls ( folder = target_path , begin_from_file = "" , num = - 1 , get_grants = False , all_grant_data = False ) if local_path . endswith ( "/*" ) : local_path = local_path [ : - 2 ] target_path = re . sub ( r"^/|/$" , "" , target_path ) else : local_base_name = os . path . basename ( local_path ) local_path = re . sub ( r"/$" , "" , local_path ) target_path = re . sub ( r"^/" , "" , target_path ) if not target_path . endswith ( local_base_name ) : target_path = os . path . join ( target_path , local_base_name ) if os . path . exists ( local_path ) : result = self . __find_files_and_copy ( local_path , target_path , acl , del_after_upload , overwrite , invalidate , list_of_files ) else : result = { 'file_does_not_exist' : local_path } logger . error ( "trying to upload to s3 but file doesn't exist: %s" % local_path ) return result | Copy a file or folder from local to s3 . |
272 | def mv ( self , local_file , target_file , acl = 'public-read' , overwrite = True , invalidate = False ) : self . cp ( local_file , target_file , acl = acl , del_after_upload = True , overwrite = overwrite , invalidate = invalidate ) | Similar to Linux mv command . |
273 | def cp_cropduster_image ( self , the_image_path , del_after_upload = False , overwrite = False , invalidate = False ) : local_file = os . path . join ( settings . MEDIA_ROOT , the_image_path ) if os . path . exists ( local_file ) : the_image_crops_path = os . path . splitext ( the_image_path ) [ 0 ] the_image_crops_path_full_path = os . path . join ( settings . MEDIA_ROOT , the_image_crops_path ) self . cp ( local_path = local_file , target_path = os . path . join ( settings . S3_ROOT_BASE , the_image_path ) , del_after_upload = del_after_upload , overwrite = overwrite , invalidate = invalidate , ) self . cp ( local_path = the_image_crops_path_full_path + "/*" , target_path = os . path . join ( settings . S3_ROOT_BASE , the_image_crops_path ) , del_after_upload = del_after_upload , overwrite = overwrite , invalidate = invalidate , ) | Deal with saving cropduster images to S3 . Cropduster is a Django library for resizing editorial images . S3utils was originally written to put cropduster images on S3 bucket . |
274 | def chmod ( self , target_file , acl = 'public-read' ) : self . k . key = target_file self . k . set_acl ( acl ) self . k . close ( ) | sets permissions for a file on S3 |
275 | def ll ( self , folder = "" , begin_from_file = "" , num = - 1 , all_grant_data = False ) : return self . ls ( folder = folder , begin_from_file = begin_from_file , num = num , get_grants = True , all_grant_data = all_grant_data ) | Get the list of files and permissions from S3 . |
276 | def get_path ( url ) : url = urlsplit ( url ) path = url . path if url . query : path += "?{}" . format ( url . query ) return path | Get the path from a given url including the querystring . |
277 | def run ( self ) : if not os . path . exists ( self . output ) : try : os . mkdir ( self . output ) except : print 'failed to create output directory %s' % self . output if not os . path . isdir ( self . output ) : print 'invalid output directory %s' % self . output sys . exit ( 1 ) visitors = [ _CompaniesCSV ( self . output ) , _ActivitiesCSV ( self . output ) , _ActivitiesSeenCSV ( self . output ) , _QSACSV ( self . output ) , ] for path in glob . glob ( os . path . join ( self . input , '*.json' ) ) : with open ( path , 'r' ) as f : try : data = json . load ( f , encoding = 'utf-8' ) except ValueError : continue for visitor in visitors : visitor . visit ( data ) | Reads data from disk and generates CSV files . |
278 | def process_fields ( self , fields ) : result = [ ] strip = '' . join ( self . PREFIX_MAP ) for field in fields : direction = self . PREFIX_MAP [ '' ] if field [ 0 ] in self . PREFIX_MAP : direction = self . PREFIX_MAP [ field [ 0 ] ] field = field . lstrip ( strip ) result . append ( ( field , direction ) ) return result | Process a list of simple string field definitions and assign their order based on prefix . |
279 | def search_in_rubric ( self , ** kwargs ) : point = kwargs . pop ( 'point' , False ) if point : kwargs [ 'point' ] = '%s,%s' % point bound = kwargs . pop ( 'bound' , False ) if bound : kwargs [ 'bound[point1]' ] = bound [ 0 ] kwargs [ 'bound[point2]' ] = bound [ 1 ] filters = kwargs . pop ( 'filters' , False ) if filters : for k , v in filters . items ( ) : kwargs [ 'filters[%s]' % k ] = v return self . _search_in_rubric ( ** kwargs ) | Firms search in rubric |
280 | def refresh ( self ) : self . _screen . force_update ( ) self . _screen . refresh ( ) self . _update ( 1 ) | Refresh the list and the screen |
281 | def start ( self , activity , action ) : try : self . _start_action ( activity , action ) except ValueError : retox_log . debug ( "Could not find action %s in env %s" % ( activity , self . name ) ) self . refresh ( ) | Mark an action as started |
282 | def stop ( self , activity , action ) : try : self . _remove_running_action ( activity , action ) except ValueError : retox_log . debug ( "Could not find action %s in env %s" % ( activity , self . name ) ) self . _mark_action_completed ( activity , action ) self . refresh ( ) | Mark a task as completed |
283 | def finish ( self , status ) : retox_log . info ( "Completing %s with status %s" % ( self . name , status ) ) result = Screen . COLOUR_GREEN if not status else Screen . COLOUR_RED self . palette [ 'title' ] = ( Screen . COLOUR_WHITE , Screen . A_BOLD , result ) for item in list ( self . _task_view . options ) : self . _task_view . options . remove ( item ) self . _completed_view . options . append ( item ) self . refresh ( ) | Move laggard tasks over |
284 | def reset ( self ) : self . palette [ 'title' ] = ( Screen . COLOUR_WHITE , Screen . A_BOLD , Screen . COLOUR_BLUE ) self . _completed_view . options = [ ] self . _task_view . options = [ ] self . refresh ( ) | Reset the frame between jobs |
285 | def default_arguments ( cls ) : func = cls . __init__ args = func . __code__ . co_varnames defaults = func . __defaults__ index = - len ( defaults ) return { k : v for k , v in zip ( args [ index : ] , defaults ) } | Returns the available kwargs of the called class |
286 | def recreate ( cls , * args , ** kwargs ) : cls . check_arguments ( kwargs ) first_is_callable = True if any ( args ) and callable ( args [ 0 ] ) else False signature = cls . default_arguments ( ) allowed_arguments = { k : v for k , v in kwargs . items ( ) if k in signature } if ( any ( allowed_arguments ) or any ( args ) ) and not first_is_callable : if any ( args ) and not first_is_callable : return cls ( args [ 0 ] , ** allowed_arguments ) elif any ( allowed_arguments ) : return cls ( ** allowed_arguments ) return cls . instances [ - 1 ] if any ( cls . instances ) else cls ( ) | Recreate the class based in your args multiple uses |
287 | def check_arguments ( cls , passed ) : defaults = list ( cls . default_arguments ( ) . keys ( ) ) template = ( "Pass arg {argument:!r} in {cname:!r}, can be a typo? " "Supported key arguments: {defaults}" ) fails = [ ] for arg in passed : if arg not in defaults : warn ( template . format ( argument = arg , cname = cls . __name__ , defaults = defaults ) ) fails . append ( arg ) return any ( fails ) | Put warnings of arguments whose can t be handle by the class |
288 | def process ( self , data , type , history ) : if type in history : return if type . enum ( ) : return history . append ( type ) resolved = type . resolve ( ) value = None if type . multi_occurrence ( ) : value = [ ] else : if len ( resolved ) > 0 : if resolved . mixed ( ) : value = Factory . property ( resolved . name ) md = value . __metadata__ md . sxtype = resolved else : value = Factory . object ( resolved . name ) md = value . __metadata__ md . sxtype = resolved md . ordering = self . ordering ( resolved ) setattr ( data , type . name , value ) if value is not None : data = value if not isinstance ( data , list ) : self . add_attributes ( data , resolved ) for child , ancestry in resolved . children ( ) : if self . skip_child ( child , ancestry ) : continue self . process ( data , child , history [ : ] ) | process the specified type then process its children |
289 | def skip_child ( self , child , ancestry ) : if child . any ( ) : return True for x in ancestry : if x . choice ( ) : return True return False | get whether or not to skip the specified child |
290 | def active_knocks ( obj ) : if not hasattr ( _thread_locals , 'knock_enabled' ) : return True return _thread_locals . knock_enabled . get ( obj . __class__ , True ) | Checks whether knocks are enabled for the model given as argument |
291 | def pause_knocks ( obj ) : if not hasattr ( _thread_locals , 'knock_enabled' ) : _thread_locals . knock_enabled = { } obj . __class__ . _disconnect ( ) _thread_locals . knock_enabled [ obj . __class__ ] = False yield _thread_locals . knock_enabled [ obj . __class__ ] = True obj . __class__ . _connect ( ) | Context manager to suspend sending knocks for the given model |
292 | def _loopreport ( self ) : while 1 : eventlet . sleep ( 0.2 ) ac2popenlist = { } for action in self . session . _actions : for popen in action . _popenlist : if popen . poll ( ) is None : lst = ac2popenlist . setdefault ( action . activity , [ ] ) lst . append ( popen ) if not action . _popenlist and action in self . _actionmayfinish : super ( RetoxReporter , self ) . logaction_finish ( action ) self . _actionmayfinish . remove ( action ) self . screen . draw_next_frame ( repeat = False ) | Loop over the report progress |
293 | def send ( email , subject = None , from_email = None , to_email = None , cc = None , bcc = None , reply_to = None , smtp = None ) : if is_string ( email ) : email = EmailContent ( email ) from_email = sanitize_email_address ( from_email or email . headers . get ( 'from' ) ) to_email = sanitize_email_address ( to_email or email . headers . get ( 'to' ) ) cc = sanitize_email_address ( cc or email . headers . get ( 'cc' ) ) bcc = sanitize_email_address ( bcc or email . headers . get ( 'bcc' ) ) reply_to = sanitize_email_address ( reply_to or email . headers . get ( 'reply-to' ) ) message_args = { 'html' : email . html , 'text' : email . text , 'subject' : ( subject or email . headers . get ( 'subject' , '' ) ) , 'mail_from' : from_email , 'mail_to' : to_email } if cc : message_args [ 'cc' ] = cc if bcc : message_args [ 'bcc' ] = bcc if reply_to : message_args [ 'headers' ] = { 'reply-to' : reply_to } message = emails . Message ( ** message_args ) for filename , data in email . inline_images : message . attach ( filename = filename , content_disposition = 'inline' , data = data ) message . send ( smtp = smtp ) | Send markdown email |
294 | def _process_tz ( self , dt , naive , tz ) : def _tz ( t ) : if t in ( None , 'naive' ) : return t if t == 'local' : if __debug__ and not localtz : raise ValueError ( "Requested conversion to local timezone, but `localtz` not installed." ) t = localtz if not isinstance ( t , tzinfo ) : if __debug__ and not localtz : raise ValueError ( "The `pytz` package must be installed to look up timezone: " + repr ( t ) ) t = get_tz ( t ) if not hasattr ( t , 'normalize' ) and get_tz : t = get_tz ( t . tzname ( dt ) ) return t naive = _tz ( naive ) tz = _tz ( tz ) if not dt . tzinfo and naive : if hasattr ( naive , 'localize' ) : dt = naive . localize ( dt ) else : dt = dt . replace ( tzinfo = naive ) if not tz : return dt if hasattr ( tz , 'normalize' ) : dt = tz . normalize ( dt . astimezone ( tz ) ) elif tz == 'naive' : dt = dt . replace ( tzinfo = None ) else : dt = dt . astimezone ( tz ) return dt | Process timezone casting and conversion . |
295 | def _prepare_defaults ( self ) : for name , field in self . __fields__ . items ( ) : if field . assign : getattr ( self , name ) | Trigger assignment of default values . |
296 | def from_mongo ( cls , doc ) : if doc is None : return None if isinstance ( doc , Document ) : return doc if cls . __type_store__ and cls . __type_store__ in doc : cls = load ( doc [ cls . __type_store__ ] , 'marrow.mongo.document' ) instance = cls ( _prepare_defaults = False ) instance . __data__ = doc instance . _prepare_defaults ( ) return instance | Convert data coming in from the MongoDB wire driver into a Document instance . |
297 | def pop ( self , name , default = SENTINEL ) : if default is SENTINEL : return self . __data__ . pop ( name ) return self . __data__ . pop ( name , default ) | Retrieve and remove a value from the backing store optionally with a default . |
298 | def _op ( self , operation , other , * allowed ) : f = self . _field if self . _combining : return reduce ( self . _combining , ( q . _op ( operation , other , * allowed ) for q in f ) ) if __debug__ and _complex_safety_check ( f , { operation } | set ( allowed ) ) : raise NotImplementedError ( "{self!r} does not allow {op} comparison." . format ( self = self , op = operation ) ) if other is not None : other = f . transformer . foreign ( other , ( f , self . _document ) ) return Filter ( { self . _name : { operation : other } } ) | A basic operation operating on a single value . |
299 | def _iop ( self , operation , other , * allowed ) : f = self . _field if self . _combining : return reduce ( self . _combining , ( q . _iop ( operation , other , * allowed ) for q in f ) ) if __debug__ and _complex_safety_check ( f , { operation } | set ( allowed ) ) : raise NotImplementedError ( "{self!r} does not allow {op} comparison." . format ( self = self , op = operation ) ) def _t ( o ) : for value in o : yield None if value is None else f . transformer . foreign ( value , ( f , self . _document ) ) other = other if len ( other ) > 1 else other [ 0 ] values = list ( _t ( other ) ) return Filter ( { self . _name : { operation : values } } ) | An iterative operation operating on multiple values . Consumes iterators to construct a concrete list at time of execution . |