query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
sequencelengths
20
553
add index to id to make it unique wrt ids
def append_index_id ( id , ids ) : index = 1 mod = '%s_%s' % ( id , index ) while mod in ids : index += 1 mod = '%s_%s' % ( id , index ) ids . append ( mod ) return mod , ids
200
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/nr_fasta.py#L11-L21
[ "def", "_parseDelayImportDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "return", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")" ]
de - replicate fastas based on sequence names
def de_rep ( fastas , append_index , return_original = False ) : ids = [ ] for fasta in fastas : for seq in parse_fasta ( fasta ) : header = seq [ 0 ] . split ( '>' ) [ 1 ] . split ( ) id = header [ 0 ] if id not in ids : ids . append ( id ) if return_original is True : yield [ header , seq ] else : yield seq elif append_index == True : new , ids = append_index_id ( id , ids ) if return_original is True : yield [ header , [ '>%s %s' % ( new , ' ' . join ( header [ 1 : : ] ) ) , seq [ 1 ] ] ] else : yield [ '>%s %s' % ( new , ' ' . join ( header [ 1 : : ] ) ) , seq [ 1 ] ]
201
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/nr_fasta.py#L23-L43
[ "def", "open", "(", "self", ")", ":", "sess_id", "=", "self", ".", "_get_sess_id", "(", ")", "if", "sess_id", ":", "self", ".", "application", ".", "pc", ".", "websockets", "[", "self", ".", "_get_sess_id", "(", ")", "]", "=", "self", "self", ".", "write_message", "(", "json", ".", "dumps", "(", "{", "\"cmd\"", ":", "\"status\"", ",", "\"status\"", ":", "\"open\"", "}", ")", ")", "else", ":", "self", ".", "write_message", "(", "json", ".", "dumps", "(", "{", "\"cmd\"", ":", "\"error\"", ",", "\"error\"", ":", "\"Please login\"", ",", "\"code\"", ":", "401", "}", ")", ")" ]
Request data associated with postcode .
def get ( postcode ) : postcode = quote ( postcode . replace ( ' ' , '' ) ) url = '%s/postcode/%s.json' % ( END_POINT , postcode ) return _get_json_resp ( url )
202
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L22-L34
[ "def", "_randomize_subject_list", "(", "data_list", ",", "random", ")", ":", "if", "random", "==", "RandomType", ".", "REPRODUCIBLE", ":", "for", "i", "in", "range", "(", "len", "(", "data_list", ")", ")", ":", "_randomize_single_subject", "(", "data_list", "[", "i", "]", ",", "seed", "=", "i", ")", "elif", "random", "==", "RandomType", ".", "UNREPRODUCIBLE", ":", "for", "data", "in", "data_list", ":", "_randomize_single_subject", "(", "data", ")" ]
Request all postcode data within distance miles of postcode .
def get_from_postcode ( postcode , distance ) : postcode = quote ( postcode . replace ( ' ' , '' ) ) return _get_from ( distance , 'postcode=%s' % postcode )
203
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L56-L69
[ "def", "load_config", "(", "logdir", ")", ":", "# pylint: disable=missing-raises-doc", "config_path", "=", "logdir", "and", "os", ".", "path", ".", "join", "(", "logdir", ",", "'config.yaml'", ")", "if", "not", "config_path", "or", "not", "tf", ".", "gfile", ".", "Exists", "(", "config_path", ")", ":", "message", "=", "(", "'Cannot resume an existing run since the logging directory does not '", "'contain a configuration file.'", ")", "raise", "IOError", "(", "message", ")", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "config_path", ",", "'r'", ")", "as", "file_", ":", "config", "=", "yaml", ".", "load", "(", "file_", ",", "Loader", "=", "yaml", ".", "Loader", ")", "message", "=", "'Resume run and write summaries and checkpoints to {}.'", "tf", ".", "logging", ".", "info", "(", "message", ".", "format", "(", "config", ".", "logdir", ")", ")", "return", "config" ]
Checks if latitude and longitude correct
def _check_point ( self , lat , lng ) : if abs ( lat ) > 90 or abs ( lng ) > 180 : msg = "Illegal lat and/or lng, (%s, %s) provided." % ( lat , lng ) raise IllegalPointException ( msg )
204
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L123-L127
[ "def", "_parse_materials", "(", "header", ",", "views", ")", ":", "try", ":", "import", "PIL", ".", "Image", "except", "ImportError", ":", "log", ".", "warning", "(", "\"unable to load textures without pillow!\"", ")", "return", "None", "# load any images", "images", "=", "None", "if", "\"images\"", "in", "header", ":", "# images are referenced by index", "images", "=", "[", "None", "]", "*", "len", "(", "header", "[", "\"images\"", "]", ")", "# loop through images", "for", "i", ",", "img", "in", "enumerate", "(", "header", "[", "\"images\"", "]", ")", ":", "# get the bytes representing an image", "blob", "=", "views", "[", "img", "[", "\"bufferView\"", "]", "]", "# i.e. 'image/jpeg'", "# mime = img['mimeType']", "try", ":", "# load the buffer into a PIL image", "images", "[", "i", "]", "=", "PIL", ".", "Image", ".", "open", "(", "util", ".", "wrap_as_stream", "(", "blob", ")", ")", "except", "BaseException", ":", "log", ".", "error", "(", "\"failed to load image!\"", ",", "exc_info", "=", "True", ")", "# store materials which reference images", "materials", "=", "[", "]", "if", "\"materials\"", "in", "header", ":", "for", "mat", "in", "header", "[", "\"materials\"", "]", ":", "# flatten key structure so we can loop it", "loopable", "=", "mat", ".", "copy", "(", ")", "# this key stores another dict of crap", "if", "\"pbrMetallicRoughness\"", "in", "loopable", ":", "# add keys of keys to top level dict", "loopable", ".", "update", "(", "loopable", ".", "pop", "(", "\"pbrMetallicRoughness\"", ")", ")", "# save flattened keys we can use for kwargs", "pbr", "=", "{", "}", "for", "k", ",", "v", "in", "loopable", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "v", ",", "dict", ")", ":", "pbr", "[", "k", "]", "=", "v", "elif", "\"index\"", "in", "v", ":", "# get the index of image for texture", "idx", "=", "header", "[", "\"textures\"", "]", "[", "v", "[", "\"index\"", "]", "]", "[", "\"source\"", "]", "# store the actual image as the value", "pbr", "[", "k", "]", "=", "images", "[", "idx", "]", "# create a PBR material object for the GLTF material", "materials", ".", "append", "(", "visual", ".", "texture", ".", "PBRMaterial", "(", "*", "*", "pbr", ")", ")", "return", "materials" ]
Checks for cached responses before requesting from web - service
def _lookup ( self , skip_cache , fun , * args , * * kwargs ) : if args not in self . cache or skip_cache : self . cache [ args ] = fun ( * args , * * kwargs ) return self . cache [ args ]
205
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L129-L136
[ "def", "get_fptr", "(", "self", ")", ":", "cmpfunc", "=", "ctypes", ".", "CFUNCTYPE", "(", "ctypes", ".", "c_int", ",", "WPARAM", ",", "LPARAM", ",", "ctypes", ".", "POINTER", "(", "KBDLLHookStruct", ")", ")", "return", "cmpfunc", "(", "self", ".", "handle_input", ")" ]
Calls postcodes . get_nearest but checks correctness of lat and long and by default utilises a local cache .
def get_nearest ( self , lat , lng , skip_cache = False ) : lat , lng = float ( lat ) , float ( lng ) self . _check_point ( lat , lng ) return self . _lookup ( skip_cache , get_nearest , lat , lng )
206
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L152-L167
[ "def", "update_task_redundancy", "(", "config", ",", "task_id", ",", "redundancy", ")", ":", "if", "task_id", "is", "None", ":", "msg", "=", "(", "\"Are you sure you want to update all the tasks redundancy?\"", ")", "if", "click", ".", "confirm", "(", "msg", ")", ":", "res", "=", "_update_tasks_redundancy", "(", "config", ",", "task_id", ",", "redundancy", ")", "click", ".", "echo", "(", "res", ")", "else", ":", "click", ".", "echo", "(", "\"Aborting.\"", ")", "else", ":", "res", "=", "_update_tasks_redundancy", "(", "config", ",", "task_id", ",", "redundancy", ")", "click", ".", "echo", "(", "res", ")" ]
Calls postcodes . get_from_postcode but checks correctness of distance and by default utilises a local cache .
def get_from_postcode ( self , postcode , distance , skip_cache = False ) : distance = float ( distance ) if distance < 0 : raise IllegalDistanceException ( "Distance must not be negative" ) # remove spaces and change case here due to caching postcode = postcode . lower ( ) . replace ( ' ' , '' ) return self . _lookup ( skip_cache , get_from_postcode , postcode , float ( distance ) )
207
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L169-L189
[ "def", "rewind", "(", "self", ")", ":", "if", "self", ".", "mode", "!=", "READ", ":", "raise", "OSError", "(", "\"Can't rewind in write mode\"", ")", "self", ".", "fileobj", ".", "seek", "(", "0", ")", "self", ".", "_new_member", "=", "True", "self", ".", "extrabuf", "=", "b\"\"", "self", ".", "extrasize", "=", "0", "self", ".", "extrastart", "=", "0", "self", ".", "offset", "=", "0" ]
Calls postcodes . get_from_geo but checks the correctness of all arguments and by default utilises a local cache .
def get_from_geo ( self , lat , lng , distance , skip_cache = False ) : # remove spaces and change case here due to caching lat , lng , distance = float ( lat ) , float ( lng ) , float ( distance ) if distance < 0 : raise IllegalDistanceException ( "Distance must not be negative" ) self . _check_point ( lat , lng ) return self . _lookup ( skip_cache , get_from_geo , lat , lng , distance )
208
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L191-L210
[ "def", "toggle", "(", "self", ",", "rows", ")", ":", "for", "r", "in", "Progress", "(", "rows", ",", "'toggling'", ",", "total", "=", "len", "(", "self", ".", "rows", ")", ")", ":", "if", "not", "self", ".", "unselectRow", "(", "r", ")", ":", "self", ".", "selectRow", "(", "r", ")" ]
get coordinates of insertions from insertion - masked sequence
def insertions_from_masked ( seq ) : insertions = [ ] prev = True for i , base in enumerate ( seq ) : if base . isupper ( ) and prev is True : insertions . append ( [ ] ) prev = False elif base . islower ( ) : insertions [ - 1 ] . append ( i ) prev = True return [ [ min ( i ) , max ( i ) ] for i in insertions if i != [ ] ]
209
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L15-L28
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
get insertion information from header
def seq_info ( names , id2names , insertions , sequences ) : seqs = { } # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]] for name in names : id = id2names [ name ] gene = name . split ( 'fromHMM::' , 1 ) [ 0 ] . rsplit ( ' ' , 1 ) [ 1 ] model = name . split ( 'fromHMM::' , 1 ) [ 1 ] . split ( '=' , 1 ) [ 1 ] . split ( ) [ 0 ] i_gene_pos = insertions [ id ] # coordinates of each insertion wrt gene i_model_pos = name . split ( 'fromHMM::' , 1 ) [ 1 ] . split ( 'model-pos(ins-len)=' ) [ 1 ] . split ( ) [ 0 ] . split ( ';' ) # model overlap i_info = [ ] for i , ins in enumerate ( i_gene_pos ) : model_pos = i_model_pos [ i ] . split ( '-' ) [ 1 ] . split ( '(' ) [ 0 ] length = i_model_pos [ i ] . split ( '(' ) [ 1 ] . split ( ')' ) [ 0 ] iheader = '>%s_%s insertion::seq=%s type=insertion strand=n/a gene-pos=%s-%s model-pos=%s' % ( id , ( i + 1 ) , ( i + 1 ) , ins [ 0 ] , ins [ 1 ] , model_pos ) iseq = sequences [ id ] [ 1 ] [ ins [ 0 ] : ( ins [ 1 ] + 1 ) ] iseq = [ iheader , iseq ] info = [ ins , model_pos , length , iseq , [ ] , [ ] ] i_info . append ( info ) seqs [ id ] = [ gene , model , i_info ] return seqs
210
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L64-L86
[ "def", "remove_matching_braces", "(", "latex", ")", ":", "if", "latex", ".", "startswith", "(", "'{'", ")", "and", "latex", ".", "endswith", "(", "'}'", ")", ":", "opened", "=", "1", "matches", "=", "True", "for", "char", "in", "latex", "[", "1", ":", "-", "1", "]", ":", "if", "char", "==", "'{'", ":", "opened", "+=", "1", "elif", "char", "==", "'}'", ":", "opened", "-=", "1", "if", "opened", "==", "0", ":", "matches", "=", "False", "if", "matches", ":", "latex", "=", "latex", "[", "1", ":", "-", "1", "]", "return", "latex" ]
make sure thresh % feature is contained within insertion
def check_overlap ( pos , ins , thresh ) : ins_pos = ins [ 0 ] ins_len = ins [ 2 ] ol = overlap ( ins_pos , pos ) feat_len = pos [ 1 ] - pos [ 0 ] + 1 # print float(ol) / float(feat_len) if float ( ol ) / float ( feat_len ) >= thresh : return True return False
211
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L91-L102
[ "def", "list_configs", "(", ")", ":", "try", ":", "configs", "=", "snapper", ".", "ListConfigs", "(", ")", "return", "dict", "(", "(", "config", "[", "0", "]", ",", "config", "[", "2", "]", ")", "for", "config", "in", "configs", ")", "except", "dbus", ".", "DBusException", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Error encountered while listing configurations: {0}'", ".", "format", "(", "_dbus_exception_to_reason", "(", "exc", ",", "locals", "(", ")", ")", ")", ")" ]
length of largest insertion
def max_insertion ( seqs , gene , domain ) : seqs = [ i [ 2 ] for i in list ( seqs . values ( ) ) if i [ 2 ] != [ ] and i [ 0 ] == gene and i [ 1 ] == domain ] lengths = [ ] for seq in seqs : for ins in seq : lengths . append ( int ( ins [ 2 ] ) ) if lengths == [ ] : return 100 return max ( lengths )
212
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L305-L316
[ "def", "status", "(", "sec", ")", ":", "if", "_meta_", ".", "prg_bar", "in", "[", "\"on\"", ",", "\"ON\"", "]", ":", "syms", "=", "[", "\"|\"", ",", "\"/\"", ",", "\"-\"", ",", "\"\\\\\"", "]", "for", "sym", "in", "syms", ":", "sys", ".", "stdout", ".", "write", "(", "\"\\b{0}{1}{2}\"", ".", "format", "(", "_meta_", ".", "color", "[", "\"GREY\"", "]", ",", "sym", ",", "_meta_", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", "float", "(", "sec", ")", ")" ]
get length of model
def model_length ( gene , domain ) : if gene == '16S' : domain2max = { 'E_coli_K12' : int ( 1538 ) , 'bacteria' : int ( 1689 ) , 'archaea' : int ( 1563 ) , 'eukarya' : int ( 2652 ) } return domain2max [ domain ] elif gene == '23S' : domain2max = { 'E_coli_K12' : int ( 2903 ) , 'bacteria' : int ( 3146 ) , 'archaea' : int ( 3774 ) , 'eukarya' : int ( 9079 ) } return domain2max [ domain ] else : print ( sys . stderr , '# length unknown for gene: %s, domain: %s' % ( gene , domain ) ) exit ( )
213
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L318-L330
[ "def", "_do_http", "(", "opts", ",", "profile", "=", "'default'", ")", ":", "ret", "=", "{", "}", "url", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:url'", ".", "format", "(", "profile", ")", ",", "''", ")", "user", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:user'", ".", "format", "(", "profile", ")", ",", "''", ")", "passwd", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:pass'", ".", "format", "(", "profile", ")", ",", "''", ")", "realm", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:realm'", ".", "format", "(", "profile", ")", ",", "''", ")", "timeout", "=", "__salt__", "[", "'config.get'", "]", "(", "'modjk:{0}:timeout'", ".", "format", "(", "profile", ")", ",", "''", ")", "if", "not", "url", ":", "raise", "Exception", "(", "'missing url in profile {0}'", ".", "format", "(", "profile", ")", ")", "if", "user", "and", "passwd", ":", "auth", "=", "_auth", "(", "url", "=", "url", ",", "realm", "=", "realm", ",", "user", "=", "user", ",", "passwd", "=", "passwd", ")", "_install_opener", "(", "auth", ")", "url", "+=", "'?{0}'", ".", "format", "(", "_urlencode", "(", "opts", ")", ")", "for", "line", "in", "_urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", ".", "read", "(", ")", ".", "splitlines", "(", ")", ":", "splt", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "if", "splt", "[", "0", "]", "in", "ret", ":", "ret", "[", "splt", "[", "0", "]", "]", "+=", "',{0}'", ".", "format", "(", "splt", "[", "1", "]", ")", "else", ":", "ret", "[", "splt", "[", "0", "]", "]", "=", "splt", "[", "1", "]", "return", "ret" ]
setup unique marker for every orf annotation - change size if necessary
def setup_markers ( seqs ) : family2marker = { } # family2marker[family] = [marker, size] markers = cycle ( [ '^' , 'p' , '*' , '+' , 'x' , 'd' , '|' , 'v' , '>' , '<' , '8' ] ) size = 60 families = [ ] for seq in list ( seqs . values ( ) ) : for insertion in seq [ 2 ] : for family in list ( insertion [ - 1 ] . values ( ) ) : if family not in families : families . append ( family ) for family in families : marker = next ( markers ) if marker == '^' : size = size * 0.5 family2marker [ family ] = [ marker , size ] return family2marker
214
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L332-L351
[ "def", "on_end_validation", "(", "self", ",", "event", ")", ":", "self", ".", "Enable", "(", ")", "self", ".", "Show", "(", ")", "self", ".", "magic_gui_frame", ".", "Destroy", "(", ")" ]
plot insertions for each gene and domain
def plot_by_gene_and_domain ( name , seqs , tax , id2name ) : for gene in set ( [ seq [ 0 ] for seq in list ( seqs . values ( ) ) ] ) : for domain in set ( [ seq [ 1 ] for seq in list ( seqs . values ( ) ) ] ) : plot_insertions ( name , seqs , gene , domain , tax , id2name )
215
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L551-L557
[ "def", "configure_logger", "(", "self", ",", "tc_config_log_filename", "=", "None", ",", "tc_output_log_filename", "=", "None", ")", ":", "# Get config logger filename", "config_log_filename", "=", "DriverWrappersPool", ".", "get_configured_value", "(", "'Config_log_filename'", ",", "tc_config_log_filename", ",", "'logging.conf'", ")", "config_log_filename", "=", "os", ".", "path", ".", "join", "(", "DriverWrappersPool", ".", "config_directory", ",", "config_log_filename", ")", "# Configure logger only if logging filename has changed", "if", "self", ".", "config_log_filename", "!=", "config_log_filename", ":", "# Get output logger filename", "output_log_filename", "=", "DriverWrappersPool", ".", "get_configured_value", "(", "'Output_log_filename'", ",", "tc_output_log_filename", ",", "'toolium.log'", ")", "output_log_filename", "=", "os", ".", "path", ".", "join", "(", "DriverWrappersPool", ".", "output_directory", ",", "output_log_filename", ")", "output_log_filename", "=", "output_log_filename", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "try", ":", "logging", ".", "config", ".", "fileConfig", "(", "config_log_filename", ",", "{", "'logfilename'", ":", "output_log_filename", "}", ",", "False", ")", "except", "Exception", "as", "exc", ":", "print", "(", "\"[WARN] Error reading logging config file '{}': {}\"", ".", "format", "(", "config_log_filename", ",", "exc", ")", ")", "self", ".", "config_log_filename", "=", "config_log_filename", "self", ".", "output_log_filename", "=", "output_log_filename", "self", ".", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")" ]
get the description for each ORF
def get_descriptions ( fastas ) : id2desc = { } for fasta in fastas : for seq in parse_fasta ( fasta ) : header = seq [ 0 ] . split ( '>' ) [ 1 ] . split ( ' ' ) id = header [ 0 ] if len ( header ) > 1 : desc = ' ' . join ( header [ 1 : ] ) else : desc = 'n/a' length = float ( len ( [ i for i in seq [ 1 ] . strip ( ) if i != '*' ] ) ) id2desc [ id ] = [ fasta , desc , length ] return id2desc
216
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L37-L52
[ "def", "parse_setup", "(", "filepath", ")", ":", "# TODO: Need to parse setup.cfg and merge with the data from below", "# Monkey patch setuptools.setup to capture keyword arguments", "setup_kwargs", "=", "{", "}", "def", "setup_interceptor", "(", "*", "*", "kwargs", ")", ":", "setup_kwargs", ".", "update", "(", "kwargs", ")", "import", "setuptools", "setuptools_setup", "=", "setuptools", ".", "setup", "setuptools", ".", "setup", "=", "setup_interceptor", "# Manually compile setup.py", "with", "open", "(", "filepath", ",", "'r'", ")", "as", "f", ":", "code", "=", "compile", "(", "f", ".", "read", "(", ")", ",", "''", ",", "'exec'", ")", "setup", "=", "ModuleType", "(", "'setup'", ")", "exec", "(", "code", ",", "setup", ".", "__dict__", ")", "# Remove monkey patch", "setuptools", ".", "setup", "=", "setuptools_setup", "return", "setup_kwargs" ]
optimize later? slow ... should combine with calculate_threshold module
def print_genome_matrix ( hits , fastas , id2desc , file_name ) : out = open ( file_name , 'w' ) fastas = sorted ( fastas ) print ( '## percent identity between genomes' , file = out ) print ( '# - \t %s' % ( '\t' . join ( fastas ) ) , file = out ) for fasta in fastas : line = [ fasta ] for other in fastas : if other == fasta : average = '-' else : average = numpy . average ( [ hits [ fasta ] [ other ] [ i ] [ 3 ] for i in hits [ fasta ] [ other ] ] ) line . append ( str ( average ) ) print ( '\t' . join ( line ) , file = out ) print ( '' , file = out ) print ( '## percent of orfs that are orthologous between genomes' , file = out ) print ( '# - \t %s' % ( '\t' . join ( fastas ) ) , file = out ) for fasta in fastas : line = [ fasta ] for other in fastas : if other == fasta : percent = '-' else : orthologs = float ( len ( hits [ fasta ] [ other ] ) ) orfs = float ( len ( [ i for i in id2desc if id2desc [ i ] [ 0 ] == fasta ] ) ) percent = float ( orthologs / orfs ) * 100 line . append ( str ( percent ) ) print ( '\t' . join ( line ) , file = out )
217
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L140-L171
[ "def", "from_non_aligned_residue_IDs", "(", "Chain", ",", "StartResidueID", ",", "EndResidueID", ",", "Sequence", "=", "None", ")", ":", "return", "PDBSection", "(", "Chain", ",", "PDB", ".", "ResidueID2String", "(", "StartResidueID", ")", ",", "PDB", ".", "ResidueID2String", "(", "EndResidueID", ")", ",", "Sequence", "=", "Sequence", ")" ]
compare genome to self to get the best possible bit score for each ORF
def self_compare ( fastas , id2desc , algorithm ) : for fasta in fastas : blast = open ( search ( fasta , fasta , method = algorithm , alignment = 'local' ) ) for hit in best_blast ( blast , 1 ) : id , bit = hit [ 0 ] . split ( ) [ 0 ] , float ( hit [ - 1 ] ) id2desc [ id ] . append ( bit ) return id2desc
218
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L240-L249
[ "def", "_is_accepted_input", "(", "self", ",", "input_string", ")", ":", "ret", "=", "False", "accept_filter", "=", "(", "self", ".", "volume_string", ",", "\"http stream debug: \"", ")", "reject_filter", "=", "(", ")", "for", "n", "in", "accept_filter", ":", "if", "n", "in", "input_string", ":", "ret", "=", "True", "break", "if", "ret", ":", "for", "n", "in", "reject_filter", ":", "if", "n", "in", "input_string", ":", "ret", "=", "False", "break", "return", "ret" ]
if thresholds are not specififed calculate based on the distribution of normalized bit scores
def calc_thresholds ( rbh , file_name , thresholds = [ False , False , False , False ] , stdevs = 2 ) : calc_threshold = thresholds [ - 1 ] norm_threshold = { } for pair in itertools . permutations ( [ i for i in rbh ] , 2 ) : if pair [ 0 ] not in norm_threshold : norm_threshold [ pair [ 0 ] ] = { } norm_threshold [ pair [ 0 ] ] [ pair [ 1 ] ] = { } out = open ( file_name , 'w' ) print ( '#### summary of rbh comparisons\n' , file = out ) comparisons = [ ] for genome in rbh : for compare in rbh [ genome ] : pair = '' . join ( sorted ( [ genome , compare ] ) ) if pair in comparisons : continue comparisons . append ( pair ) scores = { 'percent identity' : [ ] , 'e-value' : [ ] , 'bit score' : [ ] , 'normalized bit score' : [ ] , 'alignment length fraction' : [ ] } print ( '### blast between %s and %s\n' % ( genome , compare ) , file = out ) for id in rbh [ genome ] [ compare ] : pident , length_fraction , e , bit , norm_bit = rbh [ genome ] [ compare ] [ id ] [ 3 : ] scores [ 'percent identity' ] . append ( pident ) scores [ 'alignment length fraction' ] . append ( length_fraction ) scores [ 'e-value' ] . append ( e ) scores [ 'bit score' ] . append ( bit ) scores [ 'normalized bit score' ] . append ( norm_bit ) if calc_threshold is True : norms = scores [ 'normalized bit score' ] average = numpy . average ( norms ) std = numpy . std ( norms ) normal_thresh = average - ( std * stdevs ) print ( '## average normalized bit score: %s' % average , file = out ) print ( '## standard deviation of normalized bit scores: %s' % std , file = out ) print ( '## normalized bit score threshold set to: %s\n' % ( normal_thresh ) , file = out ) norm_threshold [ genome ] [ compare ] , norm_threshold [ compare ] [ genome ] = normal_thresh , normal_thresh for score in scores : print ( '## %s' % ( score ) , file = out ) if len ( scores [ score ] ) > 0 : print ( '## average: %s' % numpy . average ( scores [ score ] ) , file = out ) # hist = histogram(scores[score], []) # for line in hist: # print >> out, line print ( '' , file = out ) out . close ( ) if calc_threshold is True : return thresholds [ 0 : - 1 ] + [ norm_threshold ] else : return thresholds
219
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L303-L352
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
make and split a rbh network
def neto ( fastas , algorithm = 'usearch' , e = 0.01 , bit = 40 , length = .65 , norm_bit = False ) : thresholds = [ e , bit , length , norm_bit ] id2desc = get_descriptions ( fastas ) # get [fasta, description, length] for ORF id id2desc = self_compare ( fastas , id2desc , algorithm ) # get best possible bit score for each ORF # (comparing with itself) [fasta, description, length, bestbit] hits = compare_genomes ( fastas , id2desc , algorithm ) # pair wise genome comparisons {genome: {id: [match_type = 'rbh' or 'fbh', scores]}} calc_thresholds ( hits , file_name = 'fbh.scores.summary.txt' ) rbh_network ( id2desc , hits , file_name = 'fbh.network.edges.txt' ) hits , rbh = find_rbh ( hits , id2desc ) # remove hits that are not reciprocal best blast hits thresholds = calc_thresholds ( rbh , 'rbh.scores.summary.txt' , thresholds ) # print rbh score summary to rbh_score_summary.txt and # calculate normalized bit score cutoff for each pair of # genomes, if desired g = rbh_network ( id2desc , rbh , file_name = 'rbh.network.edges.txt' ) filtered_g , filtered_rbh = rbh_network ( id2desc , rbh , 'rbh.filtered.network.edges.txt' , thresholds ) calc_thresholds ( filtered_rbh , file_name = 'rbh.filtered.scores.summary.txt' ) print_summary ( filtered_g , fastas , id2desc , file_name = 'rbh.filtered.network.nodes.txt' ) print_network_matrix ( filtered_g , fastas , id2desc , file_name = 'rbh.filtered.network.matrix.txt' ) print_genome_matrix ( filtered_rbh , fastas , id2desc , file_name = 'rbh.filtered.network.genome_matrix.txt' ) split_g = split_network ( filtered_g , id2desc , file_name = 'rbh.filtered.split.network.edges.txt' ) print_summary ( split_g , fastas , id2desc , file_name = 'rbh.filtered.split.network.nodes.txt' ) print_network_matrix ( split_g , fastas , id2desc , file_name = 'rbh.filtered.split.network.matrix.txt' ) return split_g
220
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L416-L445
[ "def", "feeds", "(", "self", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'feeds'", ")", "json", "=", "self", ".", "_json", "(", "self", ".", "_get", "(", "url", ")", ",", "200", ")", "del", "json", "[", "'ETag'", "]", "del", "json", "[", "'Last-Modified'", "]", "urls", "=", "[", "'timeline_url'", ",", "'user_url'", ",", "'current_user_public_url'", ",", "'current_user_url'", ",", "'current_user_actor_url'", ",", "'current_user_organization_url'", ",", "]", "for", "url", "in", "urls", ":", "json", "[", "url", "]", "=", "URITemplate", "(", "json", "[", "url", "]", ")", "links", "=", "json", ".", "get", "(", "'_links'", ",", "{", "}", ")", "for", "d", "in", "links", ".", "values", "(", ")", ":", "d", "[", "'href'", "]", "=", "URITemplate", "(", "d", "[", "'href'", "]", ")", "return", "json" ]
Collapses multiple dimensions into a single raster_info complex struct
def _parse_raster_info ( self , prop = RASTER_INFO ) : raster_info = { } . fromkeys ( _iso_definitions [ prop ] , u'' ) # Ensure conversion of lists to newlines is in place raster_info [ 'dimensions' ] = get_default_for_complex_sub ( prop = prop , subprop = 'dimensions' , value = parse_property ( self . _xml_tree , None , self . _data_map , '_ri_num_dims' ) , xpath = self . _data_map [ '_ri_num_dims' ] ) xpath_root = self . _get_xroot_for ( prop ) xpath_map = self . _data_structures [ prop ] for dimension in parse_complex_list ( self . _xml_tree , xpath_root , xpath_map , RASTER_DIMS ) : dimension_type = dimension [ 'type' ] . lower ( ) if dimension_type == 'vertical' : raster_info [ 'vertical_count' ] = dimension [ 'size' ] elif dimension_type == 'column' : raster_info [ 'column_count' ] = dimension [ 'size' ] raster_info [ 'x_resolution' ] = u' ' . join ( dimension [ k ] for k in [ 'value' , 'units' ] ) . strip ( ) elif dimension_type == 'row' : raster_info [ 'row_count' ] = dimension [ 'size' ] raster_info [ 'y_resolution' ] = u' ' . join ( dimension [ k ] for k in [ 'value' , 'units' ] ) . strip ( ) return raster_info if any ( raster_info [ k ] for k in raster_info ) else { }
221
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/iso_metadata_parser.py#L472-L502
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Derives multiple dimensions from a single raster_info complex struct
def _update_raster_info ( self , * * update_props ) : tree_to_update = update_props [ 'tree_to_update' ] prop = update_props [ 'prop' ] values = update_props . pop ( 'values' ) # Update number of dimensions at raster_info root (applies to all dimensions below) xroot , xpath = None , self . _data_map [ '_ri_num_dims' ] raster_info = [ update_property ( tree_to_update , xroot , xpath , prop , values . get ( 'dimensions' , u'' ) ) ] # Derive vertical, longitude, and latitude dimensions from raster_info xpath_root = self . _get_xroot_for ( prop ) xpath_map = self . _data_structures [ prop ] v_dimension = { } if values . get ( 'vertical_count' ) : v_dimension = v_dimension . fromkeys ( xpath_map , u'' ) v_dimension [ 'type' ] = 'vertical' v_dimension [ 'size' ] = values . get ( 'vertical_count' , u'' ) x_dimension = { } if values . get ( 'column_count' ) or values . get ( 'x_resolution' ) : x_dimension = x_dimension . fromkeys ( xpath_map , u'' ) x_dimension [ 'type' ] = 'column' x_dimension [ 'size' ] = values . get ( 'column_count' , u'' ) x_dimension [ 'value' ] = values . get ( 'x_resolution' , u'' ) y_dimension = { } if values . get ( 'row_count' ) or values . get ( 'y_resolution' ) : y_dimension = y_dimension . fromkeys ( xpath_map , u'' ) y_dimension [ 'type' ] = 'row' y_dimension [ 'size' ] = values . get ( 'row_count' , u'' ) y_dimension [ 'value' ] = values . get ( 'y_resolution' , u'' ) # Update derived dimensions as complex list, and append affected elements for return update_props [ 'prop' ] = RASTER_DIMS update_props [ 'values' ] = [ v_dimension , x_dimension , y_dimension ] raster_info += update_complex_list ( xpath_root = xpath_root , xpath_map = xpath_map , * * update_props ) return raster_info
222
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/iso_metadata_parser.py#L622-L666
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Removes primitive type tags from an XPATH
def _trim_xpath ( self , xpath , prop ) : xroot = self . _get_xroot_for ( prop ) if xroot is None and isinstance ( xpath , string_types ) : xtags = xpath . split ( XPATH_DELIM ) if xtags [ - 1 ] in _iso_tag_primitives : xroot = XPATH_DELIM . join ( xtags [ : - 1 ] ) return xroot
223
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/iso_metadata_parser.py#L691-L702
[ "def", "cli", "(", "sock", ",", "configs", ",", "modules", ",", "files", ",", "log", ",", "debug", ")", ":", "setup_logging", "(", "log", ",", "debug", ")", "config", "=", "join_configs", "(", "configs", ")", "# load python modules", "load_modules", "(", "modules", ")", "# load python files", "load_files", "(", "files", ")", "# summarize active events and callbacks", "summarize_events", "(", ")", "gloop", "=", "gevent", ".", "Greenlet", ".", "spawn", "(", "loop", ",", "sock", "=", "sock", ",", "config", "=", "config", ")", "gloop", ".", "start", "(", ")", "gloop", ".", "join", "(", ")" ]
Generates the app id for a given shortcut . Steam uses app ids as a unique identifier for games but since shortcuts dont have a canonical serverside representation they need to be generated on the fly . The important part about this function is that it will generate the same app id as Steam does for a given shortcut
def shortcut_app_id ( shortcut ) : algorithm = Crc ( width = 32 , poly = 0x04C11DB7 , reflect_in = True , xor_in = 0xffffffff , reflect_out = True , xor_out = 0xffffffff ) crc_input = '' . join ( [ shortcut . exe , shortcut . name ] ) high_32 = algorithm . bit_by_bit ( crc_input ) | 0x80000000 full_64 = ( high_32 << 32 ) | 0x02000000 return str ( full_64 )
224
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/shortcuts.py#L9-L21
[ "def", "set_end_date", "(", "self", ",", "lifetime", ")", ":", "self", ".", "end_date", "=", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "datetime", ".", "timedelta", "(", "0", ",", "lifetime", ")", ")" ]
Execute git config .
def _config ( self ) : cfg_wr = self . repo . config_writer ( ) cfg_wr . add_section ( 'user' ) cfg_wr . set_value ( 'user' , 'name' , self . metadata . author ) cfg_wr . set_value ( 'user' , 'email' , self . metadata . email ) cfg_wr . release ( )
225
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/vcs.py#L35-L41
[ "def", "revoke_session", "(", "self", ",", "sid", "=", "''", ",", "token", "=", "''", ")", ":", "if", "not", "sid", ":", "if", "token", ":", "sid", "=", "self", ".", "handler", ".", "sid", "(", "token", ")", "else", ":", "raise", "ValueError", "(", "'Need one of \"sid\" or \"token\"'", ")", "for", "typ", "in", "[", "'access_token'", ",", "'refresh_token'", ",", "'code'", "]", ":", "try", ":", "self", ".", "revoke_token", "(", "self", "[", "sid", "]", "[", "typ", "]", ",", "typ", ")", "except", "KeyError", ":", "# If no such token has been issued", "pass", "self", ".", "update", "(", "sid", ",", "revoked", "=", "True", ")" ]
Execute git remote add .
def _remote_add ( self ) : self . repo . create_remote ( 'origin' , 'git@github.com:{username}/{repo}.git' . format ( username = self . metadata . username , repo = self . metadata . name ) )
226
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/vcs.py#L47-L53
[ "def", "delete_everything", "(", "self", ")", ":", "for", "k", "in", "self", ".", "_backup_list", "(", "prefix", "=", "self", ".", "layout", ".", "basebackups", "(", ")", ")", ":", "self", ".", "_maybe_delete_key", "(", "k", ",", "'part of a base backup'", ")", "for", "k", "in", "self", ".", "_backup_list", "(", "prefix", "=", "self", ".", "layout", ".", "wal_directory", "(", ")", ")", ":", "self", ".", "_maybe_delete_key", "(", "k", ",", "'part of wal logs'", ")", "if", "self", ".", "deleter", ":", "self", ".", "deleter", ".", "close", "(", ")" ]
Starts execution of the script
def start ( self ) : # invoke the appropriate sub-command as requested from command-line try : self . args . func ( ) except SystemExit as e : if e . code != 0 : raise except KeyboardInterrupt : self . log . warning ( "exited via keyboard interrupt" ) except : self . log . exception ( "exited start function" ) # set exit code so we know it did not end successfully # TODO different exit codes based on signals ? finally : self . _flush_metrics_q . put ( None , block = True ) self . _flush_metrics_q . put ( None , block = True , timeout = 1 ) self . log . debug ( "exited_successfully" )
227
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/basescript.py#L67-L87
[ "def", "get_listing", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'listing'", ")", ":", "allEvents", "=", "self", ".", "get_allEvents", "(", ")", "openEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", "closedEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", "publicEvents", "=", "allEvents", ".", "instance_of", "(", "PublicEvent", ")", "allSeries", "=", "allEvents", ".", "instance_of", "(", "Series", ")", "self", ".", "listing", "=", "{", "'allEvents'", ":", "allEvents", ",", "'openEvents'", ":", "openEvents", ",", "'closedEvents'", ":", "closedEvents", ",", "'publicEvents'", ":", "publicEvents", ",", "'allSeries'", ":", "allSeries", ",", "'regOpenEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateEvents'", ":", "publicEvents", ".", "filter", "(", "publicevent__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'publicevent__category'", ")", ",", "'regOpenSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateSeries'", ":", "allSeries", ".", "filter", "(", "series__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'series__category'", ")", ",", "}", "return", "self", ".", "listing" ]
Define basic command - line arguments required by the script .
def define_baseargs ( self , parser ) : parser . add_argument ( '--name' , default = sys . argv [ 0 ] , help = 'Name to identify this instance' ) parser . add_argument ( '--log-level' , default = None , help = 'Logging level as picked from the logging module' ) parser . add_argument ( '--log-format' , default = None , # TODO add more formats choices = ( "json" , "pretty" , ) , help = ( "Force the format of the logs. By default, if the " "command is from a terminal, print colorful logs. " "Otherwise print json." ) , ) parser . add_argument ( '--log-file' , default = None , help = 'Writes logs to log file if specified, default: %(default)s' , ) parser . add_argument ( '--quiet' , default = False , action = "store_true" , help = 'if true, does not print logs to stderr, default: %(default)s' , ) parser . add_argument ( '--metric-grouping-interval' , default = None , type = int , help = 'To group metrics based on time interval ex:10 i.e;(10 sec)' , ) parser . add_argument ( '--debug' , default = False , action = "store_true" , help = 'To run the code in debug mode' , )
228
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/basescript.py#L123-L151
[ "def", "cache_affected_objects_review_history", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Caching review_history ...\"", ")", "query", "=", "dict", "(", "portal_type", "=", "NEW_SENAITE_WORKFLOW_BINDINGS", ")", "brains", "=", "api", ".", "search", "(", "query", ",", "UID_CATALOG", ")", "total", "=", "len", "(", "brains", ")", "for", "num", ",", "brain", "in", "enumerate", "(", "brains", ")", ":", "if", "num", "%", "100", "==", "0", ":", "logger", ".", "info", "(", "\"Caching review_history: {}/{}\"", ".", "format", "(", "num", ",", "total", ")", ")", "review_history", "=", "get_review_history_for", "(", "brain", ")", "review_history_cache", "[", "api", ".", "get_uid", "(", "brain", ")", "]", "=", "review_history" ]
Basically turns payload that looks like \\ n to . In the calling function if this function returns no object is added for that payload .
def cleanup_payload ( self , payload ) : p = payload . replace ( '\n' , '' ) p = p . rstrip ( ) p = p . lstrip ( ) return p
229
https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/parser/source_coder.py#L73-L82
[ "def", "MemoryExceeded", "(", "self", ")", ":", "rss_size", "=", "self", ".", "proc", ".", "memory_info", "(", ")", ".", "rss", "return", "rss_size", "//", "1024", "//", "1024", ">", "config", ".", "CONFIG", "[", "\"Client.rss_max\"", "]" ]
Ensures complex property types have the correct default values
def get_default_for ( prop , value ) : prop = prop . strip ( '_' ) # Handle alternate props (leading underscores) val = reduce_value ( value ) # Filtering of value happens here if prop in _COMPLEX_LISTS : return wrap_value ( val ) elif prop in _COMPLEX_STRUCTS : return val or { } else : return u'' if val is None else val
230
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L223-L234
[ "def", "write", "(", "self", ",", "text", ")", ":", "if", "text", ":", "if", "text", "[", "0", "]", "in", "'(w'", ":", "# write thread and wsgi messages to debug only", "self", ".", "log", ".", "debug", "(", "text", "[", ":", "-", "1", "]", ")", "return", "if", "self", ".", "access_log", ":", "self", ".", "access_log", ".", "write", "(", "text", ")", "self", ".", "log", ".", "info", "(", "text", "[", ":", "-", "1", "]", ")" ]
Either update the tree the default way or call the custom updater
def update_property ( tree_to_update , xpath_root , xpaths , prop , values , supported = None ) : if supported and prop . startswith ( '_' ) and prop . strip ( '_' ) in supported : values = u'' # Remove alternate elements: write values only to primary location else : values = get_default_for ( prop , values ) # Enforce defaults as required per property if not xpaths : return [ ] elif not isinstance ( xpaths , ParserProperty ) : return _update_property ( tree_to_update , xpath_root , xpaths , values ) else : # Call ParserProperty.set_prop without xpath_root (managed internally) return xpaths . set_prop ( tree_to_update = tree_to_update , prop = prop , values = values )
231
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L392-L423
[ "def", "end_stream", "(", "self", ",", "stream_id", ")", ":", "with", "(", "yield", "from", "self", ".", "_get_stream", "(", "stream_id", ")", ".", "wlock", ")", ":", "yield", "from", "self", ".", "_resumed", ".", "wait", "(", ")", "self", ".", "_conn", ".", "end_stream", "(", "stream_id", ")", "self", ".", "_flush", "(", ")" ]
Default update operation for a single parser property . If xpaths contains one xpath then one element per value will be inserted at that location in the tree_to_update ; otherwise the number of values must match the number of xpaths .
def _update_property ( tree_to_update , xpath_root , xpaths , values ) : # Inner function to update a specific XPATH with the values provided def update_element ( elem , idx , root , path , vals ) : """ Internal helper function to encapsulate single item update """ has_root = bool ( root and len ( path ) > len ( root ) and path . startswith ( root ) ) path , attr = get_xpath_tuple ( path ) # 'path/@attr' to ('path', 'attr') if attr : removed = [ get_element ( elem , path ) ] remove_element_attributes ( removed [ 0 ] , attr ) elif not has_root : removed = wrap_value ( remove_element ( elem , path ) ) else : path = get_xpath_branch ( root , path ) removed = [ ] if idx != 0 else [ remove_element ( e , path , True ) for e in get_elements ( elem , root ) ] if not vals : return removed items = [ ] for i , val in enumerate ( wrap_value ( vals ) ) : elem_to_update = elem if has_root : elem_to_update = insert_element ( elem , ( i + idx ) , root ) val = val . decode ( 'utf-8' ) if not isinstance ( val , string_types ) else val if not attr : items . append ( insert_element ( elem_to_update , i , path , val ) ) elif path : items . append ( insert_element ( elem_to_update , i , path , * * { attr : val } ) ) else : set_element_attributes ( elem_to_update , * * { attr : val } ) items . append ( elem_to_update ) return items # Code to update each of the XPATHs with each of the values xpaths = reduce_value ( xpaths ) values = filter_empty ( values ) if isinstance ( xpaths , string_types ) : return update_element ( tree_to_update , 0 , xpath_root , xpaths , values ) else : each = [ ] for index , xpath in enumerate ( xpaths ) : value = values [ index ] if values else None each . extend ( update_element ( tree_to_update , index , xpath_root , xpath , value ) ) return each
232
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L426-L486
[ "def", "create", "(", "cls", ",", "destination", ")", ":", "mdb_gz_b64", "=", "\"\"\"\\\n H4sICIenn1gC/25ldzIwMDMubWRiAO2de2wcRx3Hf7O7Pt/d3u6eLyEtVaOaqg+EkjQvuVVDwa9a\n jWXHdZxQQlCJ7fOrfp3OTpqkhVxTItFWIhVQVFBRVNIKRaColVpAUKGKRwwFqUAhKiBIpUaoVWP+\n qKgIIHL8Znb39u72znWJiWP3+9l473fzm/nNY3cdf2fmbBJEPdO9E+nebLq+fWC6vrWZOImen9D7\n 9sR+vPPNE0PZxo/TE5879mj+yNc3/OzAD2bXv3DmV9/o/8PZnxxr+/fDL2w79ulzN7e+/sS/zvzz\n w3+N1z28p3PTfQ3nfn/m2YmeFS2no89uWnvqwO5HUvd/5Phr938tes3j/zm5+qT41J8/P/iZx87/\n +qHrjgyduubG1t/+7eWB2XztTNuT+1clZt9c2/e7HRGizevWEwAAAAAAAACAhUEIwvE+PoRIO8K7\n FzT6obPPwTMBAAAAAAAAAABcfpzPXwya+Ispo1xlEO2KEEX9eaGyWnrqyKQ60tQ0AcNZRcR1RYuy\n +XZCxoqRzmaMI6cKGRJuJVrIEZUOQ9UrHStUYpyzKkdNmSPFDkM6aguhXMdVHCMuHXE2Suu4IFQJ\n l6CErNWUDouDlbdKOZIcrKLD4S5WdNhqIEodqlVaofKgVTHpiBQ6uLG0uaKsuYbf3IS8BmV1qFAm\n j1Z5Hbp06GWDKC+DTS00SRN8DFA/TXNfW6mXX3upj7+mOHWllzLAObN8du0gdSdlKO3ZcWqjMbaH\n uOQqtidViRF+P0HbOH2c3xm0lfMb1EH7uHZ5vp32c+ks+5PqfSeXS9NejjTAvZQpd7J3kuuJFqLE\n qYvuVa3Ocqk7OVXWNMFxZPRVtJ1zSXuCBrlkh+rjEF1Zlt5Dw6qN0xx5Bx3gGgbowVo56EIjkc9T\n xX9Jdd+5PKDOD6q3VQvwv7qiZ8st419cdYHlo6iuriF8X4HA590AsodXhvrsj0yMDPnAuI+ZvOrq\n 1o7K51Hdy7a8cdXNm5AedbfG5W3j3lOybxFZKb6zAgAAAAAAsNzQxAlbvnYJV3VcUU3/S2luBIKF\n ha+IlWp+wxW4IiRXRSXxKeNU1eOxUuUbSOIINbEM7WT506ZE3LASgCOeYJWCMcnCsI/u8eSsFEYR\n lnlbWa6+u0jTYqSkvuQL9G5CLFwTRBMAAAAAAAAAgMtW/79lyVdLKxW7oqDF3bXOniib0UD/m/xq\n loWqvFwt3DX/mrLNALIu3V35NkpK1JDmL+2XOmr9pf1gKiFY4I672wc0mveaf6zaenyKmljPT6t5\n hT7a6y13y0XqjFpwneJjRC0oRwvL3eUL2fHCcuyGIntjhTkDuZCd5Vc5j+HNUMyx+myYcpHW5YG5\n ZijUdbg2VFu4ZzzcHFM3seQLAAAAAAAAAMtc//9S6cm1emX97ytK1v81rHelhtfVfAFnseZXRdV9\n Ad7+dhGS5kbl3eqe/K8pU/nnYwX5X2VeoLbCZwHi7txD6aTELabnoLJ5AfPFC8JmFd3Pun+MlfM4\n q/846/4s62i5+8Dmc7EvSVN0UG2tL00p1uPXqZTt/G5QqX+5lbufz+mSctVzFce6upBrTG3Fd+cn\n pmiYrUyw8+GNfL4hn8/k83qZrVlyGzgPeqbhjcOqx7KMEZRpU/MPQ+rsldEtuYm8vExkznoMS+6b\n KC5TZRt8wVf4xEkFX4V5D/X2vYz1/EcR8yMAAAAAAACAJY0Qf/d3vLPUlb//b4Nzzv6W3Wevtl+1\n vmxts2LWTxOHErcm3jGfMUfNG0yMGQAAAAAAeJ/8rLwAMXIYRgCARFv8IIaYtKpGqCdqlN/2kupD\n /ob67qXhsi0lDh2Vp6728faO9tHuUflfWJ1wE0e6724f35XuG71r16Dr0FwH573by6rKi0N7RveN\n tnd6aTVBWrpjd3fnuJtsBMnDk90ju7zckSA5XGGtdGrK2dWhUnRcMgAAAAAAAAD4v2CIV6vqf82I\n Jusbcwsy7wkWSf/n1JQNq/Oc+uQGq/ecmsphYZ6Tn6XwRLjwxb7mTxDoakLgURUFshwAAAAAAAAA\n ljpCrHZ8W/f2/2NUAAAAAAAAAAAAhXH5RLm4IIbotqot7hbW/0MGWCp46/+pgpHwjZS3IyAlfMPy\n tgakNN+wfcPxNgukdN9I+kadt30gZfhGjW+s8I2V3s6CVNTbWZCK+Eatb3zAN1Z5mw5SMd+I+wZ+\n +QQAAAAAAAAA/K8IcdT27Zqi3/+HkQEAAAAAAAAAsGgkMQQLjSHqbQPDAAAAAAAAAAAALGuw/g8A\n AAAAAAAA4DJUqwsQI7cQDWlcLiMq1/9rcGMBAAAAAAAAAADLGuh/AAAAAAAAAAAA+h8AAAAAAAAA\n AABLHyHusDTPjtLzTtoxnRftUftqe8YatDA+AAAAAAAAAPDeqJN/KVt+et0R9PYnzz7W8PrZRv+V\n HblO6qEDNEXbaYDGqJemaYQmaYJThtnK8Gvzb1opfDRTPZmUlxUY86qgm/ZyFVkOOqCC3kLhoyEI\n qs8raBO10O0q3EYKH+uDcNq8wnVRH93D7evnYZhHG5kkB3a0OYO2ctCWV9ZR+FhT0l2HCzl6xVBz\n XZyPUvi4taTjcwRuVUF7uYW9HMy9MJspfGwMAoo5A+5Qwca8UHN2WogeU/fu0ito1vmjM+M85zzp\n fNG5zxl2djrNzk3O9+0m+yWrx2q0fpH4buJ4Yk3ig4lvmkfxx9gBAAAAAAC4OAylQfJ5h5pfSVCc\n f853gqSmWPSZux6xjUznltH2HT/flNu7++0NZ7/07cg/vnPbVu30y6d/NLvlabPh+j81v/Xc5g9l\n 1h2f+epn9+VPdN90OHHvU50fm94y/ZXvWQ/tP/yJG/NH3llz8A79tlNPG72DHSePHdzz2s3XPzVj\n vzSUvSHjVys1Rv5CSUv8pEvcEqkbV/KX35JaQ+npikmRS9o4rtYIt8RYnJa4Ou6SV6stTm+l7rcX\n q9qSy+23pCVIcgV/SZKuJj5CSRc4Y/PpkiesLJcI53J37NvFuQzv4peGL0/SypP+C+45xVAAMAEA\n \"\"\"", "pristine", "=", "StringIO", "(", ")", "pristine", ".", "write", "(", "base64", ".", "b64decode", "(", "mdb_gz_b64", ")", ")", "pristine", ".", "seek", "(", "0", ")", "pristine", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "pristine", ",", "mode", "=", "'rb'", ")", "with", "open", "(", "destination", ",", "'wb'", ")", "as", "handle", ":", "shutil", ".", "copyfileobj", "(", "pristine", ",", "handle", ")", "return", "cls", "(", "destination", ")" ]
Default validation for single complex data structure
def validate_complex ( prop , value , xpath_map = None ) : if value is not None : validate_type ( prop , value , dict ) if prop in _complex_definitions : complex_keys = _complex_definitions [ prop ] else : complex_keys = { } if xpath_map is None else xpath_map for complex_prop , complex_val in iteritems ( value ) : complex_key = '.' . join ( ( prop , complex_prop ) ) if complex_prop not in complex_keys : _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( complex_keys ) ) ) ) validate_type ( complex_key , complex_val , ( string_types , list ) )
233
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L572-L589
[ "def", "get_vocab", "(", "self", ",", "vocab_name", ",", "*", "*", "kwargs", ")", ":", "vocab_dict", "=", "self", ".", "__get_vocab_dict__", "(", "vocab_name", ",", "*", "*", "kwargs", ")", "filepaths", "=", "list", "(", "set", "(", "[", "os", ".", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "vocab_dict", "[", "'filename'", "]", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "vocab_dir", ",", "vocab_dict", "[", "'filename'", "]", ")", "]", ")", ")", "for", "path", "in", "filepaths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f_obj", ":", "vocab_dict", ".", "update", "(", "{", "\"name\"", ":", "vocab_name", ",", "\"data\"", ":", "f_obj", ".", "read", "(", ")", ",", "\"modified\"", ":", "os", ".", "path", ".", "getmtime", "(", "path", ")", "}", ")", "return", "vocab_dict", "download_locs", "=", "make_list", "(", "vocab_dict", ".", "get", "(", "'download'", ",", "[", "]", ")", ")", "for", "loc", "in", "download_locs", ":", "loc_web", "=", "urllib", ".", "request", ".", "urlopen", "(", "loc", ")", "# loc_file_date = date_parse(loc_web.info()['Last-Modified'])", "urllib", ".", "request", ".", "urlretrieve", "(", "loc", ",", "filepaths", "[", "0", "]", ")", "with", "open", "(", "filepaths", "[", "0", "]", ",", "'rb'", ")", "as", "f_obj", ":", "vocab_dict", ".", "update", "(", "{", "\"name\"", ":", "vocab_name", ",", "\"data\"", ":", "f_obj", ".", "read", "(", ")", ",", "\"modified\"", ":", "os", ".", "path", ".", "getmtime", "(", "filepaths", "[", "0", "]", ")", "}", ")", "return", "vocab_dict" ]
Default validation for Attribute Details data structure
def validate_complex_list ( prop , value , xpath_map = None ) : if value is not None : validate_type ( prop , value , ( dict , list ) ) if prop in _complex_definitions : complex_keys = _complex_definitions [ prop ] else : complex_keys = { } if xpath_map is None else xpath_map for idx , complex_struct in enumerate ( wrap_value ( value ) ) : cs_idx = prop + '[' + str ( idx ) + ']' validate_type ( cs_idx , complex_struct , dict ) for cs_prop , cs_val in iteritems ( complex_struct ) : cs_key = '.' . join ( ( cs_idx , cs_prop ) ) if cs_prop not in complex_keys : _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( complex_keys ) ) ) ) if not isinstance ( cs_val , list ) : validate_type ( cs_key , cs_val , ( string_types , list ) ) else : for list_idx , list_val in enumerate ( cs_val ) : list_prop = cs_key + '[' + str ( list_idx ) + ']' validate_type ( list_prop , list_val , string_types )
234
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L592-L618
[ "def", "compress_pdf", "(", "pdf_fpath", ",", "output_fname", "=", "None", ")", ":", "import", "utool", "as", "ut", "ut", ".", "assertpath", "(", "pdf_fpath", ")", "suffix", "=", "'_'", "+", "ut", ".", "get_datestamp", "(", "False", ")", "+", "'_compressed'", "print", "(", "'pdf_fpath = %r'", "%", "(", "pdf_fpath", ",", ")", ")", "output_pdf_fpath", "=", "ut", ".", "augpath", "(", "pdf_fpath", ",", "suffix", ",", "newfname", "=", "output_fname", ")", "print", "(", "'output_pdf_fpath = %r'", "%", "(", "output_pdf_fpath", ",", ")", ")", "gs_exe", "=", "find_ghostscript_exe", "(", ")", "cmd_list", "=", "(", "gs_exe", ",", "'-sDEVICE=pdfwrite'", ",", "'-dCompatibilityLevel=1.4'", ",", "'-dNOPAUSE'", ",", "'-dQUIET'", ",", "'-dBATCH'", ",", "'-sOutputFile='", "+", "output_pdf_fpath", ",", "pdf_fpath", ")", "ut", ".", "cmd", "(", "*", "cmd_list", ")", "return", "output_pdf_fpath" ]
Default validation for Date Types data structure
def validate_dates ( prop , value , xpath_map = None ) : if value is not None : validate_type ( prop , value , dict ) date_keys = set ( value ) if date_keys : if DATE_TYPE not in date_keys or DATE_VALUES not in date_keys : if prop in _complex_definitions : complex_keys = _complex_definitions [ prop ] else : complex_keys = _complex_definitions [ DATES ] if xpath_map is None else xpath_map _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( complex_keys ) ) ) ) date_type = value [ DATE_TYPE ] if date_type not in DATE_TYPES : _validation_error ( 'dates.type' , None , date_type , DATE_TYPES ) date_vals = value [ DATE_VALUES ] validate_type ( 'dates.values' , date_vals , list ) dates_len = len ( date_vals ) if date_type == DATE_TYPE_MISSING and dates_len != 0 : _validation_error ( 'len(dates.values)' , None , dates_len , 0 ) if date_type == DATE_TYPE_SINGLE and dates_len != 1 : _validation_error ( 'len(dates.values)' , None , dates_len , 1 ) if date_type == DATE_TYPE_RANGE and dates_len != 2 : _validation_error ( 'len(dates.values)' , None , dates_len , 2 ) if date_type == DATE_TYPE_MULTIPLE and dates_len < 2 : _validation_error ( 'len(dates.values)' , None , dates_len , 'at least two' ) for idx , date in enumerate ( date_vals ) : date_key = 'dates.value[' + str ( idx ) + ']' validate_type ( date_key , date , string_types )
235
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L621-L663
[ "def", "_unregister_bundle_factories", "(", "self", ",", "bundle", ")", ":", "# type: (Bundle) -> None", "with", "self", ".", "__factories_lock", ":", "# Find out which factories must be removed", "to_remove", "=", "[", "factory_name", "for", "factory_name", "in", "self", ".", "__factories", "if", "self", ".", "get_factory_bundle", "(", "factory_name", ")", "is", "bundle", "]", "# Remove all of them", "for", "factory_name", "in", "to_remove", ":", "try", ":", "self", ".", "unregister_factory", "(", "factory_name", ")", "except", "ValueError", "as", "ex", ":", "_logger", ".", "warning", "(", "\"Error unregistering factory '%s': %s\"", ",", "factory_name", ",", "ex", ")" ]
Default validation for Process Steps data structure
def validate_process_steps ( prop , value ) : if value is not None : validate_type ( prop , value , ( dict , list ) ) procstep_keys = set ( _complex_definitions [ prop ] ) for idx , procstep in enumerate ( wrap_value ( value ) ) : ps_idx = prop + '[' + str ( idx ) + ']' validate_type ( ps_idx , procstep , dict ) for ps_prop , ps_val in iteritems ( procstep ) : ps_key = '.' . join ( ( ps_idx , ps_prop ) ) if ps_prop not in procstep_keys : _validation_error ( prop , None , value , ( 'keys: {0}' . format ( ',' . join ( procstep_keys ) ) ) ) if ps_prop != 'sources' : validate_type ( ps_key , ps_val , string_types ) else : validate_type ( ps_key , ps_val , ( string_types , list ) ) for src_idx , src_val in enumerate ( wrap_value ( ps_val ) ) : src_key = ps_key + '[' + str ( src_idx ) + ']' validate_type ( src_key , src_val , string_types )
236
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L666-L691
[ "def", "create_api_call", "(", "func", ",", "settings", ")", ":", "def", "base_caller", "(", "api_call", ",", "_", ",", "*", "args", ")", ":", "\"\"\"Simply call api_call and ignore settings.\"\"\"", "return", "api_call", "(", "*", "args", ")", "def", "inner", "(", "request", ",", "options", "=", "None", ")", ":", "\"\"\"Invoke with the actual settings.\"\"\"", "this_options", "=", "_merge_options_metadata", "(", "options", ",", "settings", ")", "this_settings", "=", "settings", ".", "merge", "(", "this_options", ")", "if", "this_settings", ".", "retry", "and", "this_settings", ".", "retry", ".", "retry_codes", ":", "api_call", "=", "gax", ".", "retry", ".", "retryable", "(", "func", ",", "this_settings", ".", "retry", ",", "*", "*", "this_settings", ".", "kwargs", ")", "else", ":", "api_call", "=", "gax", ".", "retry", ".", "add_timeout_arg", "(", "func", ",", "this_settings", ".", "timeout", ",", "*", "*", "this_settings", ".", "kwargs", ")", "api_call", "=", "_catch_errors", "(", "api_call", ",", "gax", ".", "config", ".", "API_ERRORS", ")", "return", "api_caller", "(", "api_call", ",", "this_settings", ",", "request", ")", "if", "settings", ".", "page_descriptor", ":", "if", "settings", ".", "bundler", "and", "settings", ".", "bundle_descriptor", ":", "raise", "ValueError", "(", "'The API call has incompatible settings: '", "'bundling and page streaming'", ")", "api_caller", "=", "_page_streamable", "(", "settings", ".", "page_descriptor", ")", "elif", "settings", ".", "bundler", "and", "settings", ".", "bundle_descriptor", ":", "api_caller", "=", "_bundleable", "(", "settings", ".", "bundle_descriptor", ")", "else", ":", "api_caller", "=", "base_caller", "return", "inner" ]
Default validation for all types
def validate_type ( prop , value , expected ) : # Validate on expected type(s), but ignore None: defaults handled elsewhere if value is not None and not isinstance ( value , expected ) : _validation_error ( prop , type ( value ) . __name__ , None , expected )
237
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L710-L715
[ "def", "handlePortfolio", "(", "self", ",", "msg", ")", ":", "# log handler msg", "self", ".", "log_msg", "(", "\"portfolio\"", ",", "msg", ")", "# contract identifier", "contract_tuple", "=", "self", ".", "contract_to_tuple", "(", "msg", ".", "contract", ")", "contractString", "=", "self", ".", "contractString", "(", "contract_tuple", ")", "# try creating the contract", "self", ".", "registerContract", "(", "msg", ".", "contract", ")", "# new account?", "if", "msg", ".", "accountName", "not", "in", "self", ".", "_portfolios", ".", "keys", "(", ")", ":", "self", ".", "_portfolios", "[", "msg", ".", "accountName", "]", "=", "{", "}", "self", ".", "_portfolios", "[", "msg", ".", "accountName", "]", "[", "contractString", "]", "=", "{", "\"symbol\"", ":", "contractString", ",", "\"position\"", ":", "int", "(", "msg", ".", "position", ")", ",", "\"marketPrice\"", ":", "float", "(", "msg", ".", "marketPrice", ")", ",", "\"marketValue\"", ":", "float", "(", "msg", ".", "marketValue", ")", ",", "\"averageCost\"", ":", "float", "(", "msg", ".", "averageCost", ")", ",", "\"unrealizedPNL\"", ":", "float", "(", "msg", ".", "unrealizedPNL", ")", ",", "\"realizedPNL\"", ":", "float", "(", "msg", ".", "realizedPNL", ")", ",", "\"totalPNL\"", ":", "float", "(", "msg", ".", "realizedPNL", ")", "+", "float", "(", "msg", ".", "unrealizedPNL", ")", ",", "\"account\"", ":", "msg", ".", "accountName", "}", "# fire callback", "self", ".", "ibCallback", "(", "caller", "=", "\"handlePortfolio\"", ",", "msg", "=", "msg", ")" ]
Default validation for updated properties
def _validation_error ( prop , prop_type , prop_value , expected ) : if prop_type is None : attrib = 'value' assigned = prop_value else : attrib = 'type' assigned = prop_type raise ValidationError ( 'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}' , attrib = attrib , prop = prop , assigned = assigned , expected = expected , invalid = { prop : prop_value } if attrib == 'value' else { } )
238
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L718-L732
[ "def", "handle_exit_code", "(", "d", ",", "code", ")", ":", "# d is supposed to be a Dialog instance", "if", "code", "in", "(", "d", ".", "DIALOG_CANCEL", ",", "d", ".", "DIALOG_ESC", ")", ":", "if", "code", "==", "d", ".", "DIALOG_CANCEL", ":", "msg", "=", "\"You chose cancel in the last dialog box. Do you want to \"", "\"exit this demo?\"", "else", ":", "msg", "=", "\"You pressed ESC in the last dialog box. Do you want to \"", "\"exit this demo?\"", "# \"No\" or \"ESC\" will bring the user back to the demo.", "# DIALOG_ERROR is propagated as an exception and caught in main().", "# So we only need to handle OK here.", "if", "d", ".", "yesno", "(", "msg", ")", "==", "d", ".", "DIALOG_OK", ":", "sys", ".", "exit", "(", "0", ")", "return", "0", "else", ":", "return", "1" ]
Calls the getter with no arguments and returns its value
def get_prop ( self , prop ) : if self . _parser is None : raise ConfigurationError ( 'Cannot call ParserProperty."get_prop" with no parser configured' ) return self . _parser ( prop ) if prop else self . _parser ( )
239
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L765-L771
[ "def", "_write_packet", "(", "self", ",", "packet", ",", "sec", "=", "None", ",", "usec", "=", "None", ",", "caplen", "=", "None", ",", "wirelen", "=", "None", ")", ":", "if", "caplen", "is", "None", ":", "caplen", "=", "len", "(", "packet", ")", "if", "wirelen", "is", "None", ":", "wirelen", "=", "caplen", "if", "sec", "is", "None", "or", "usec", "is", "None", ":", "t", "=", "time", ".", "time", "(", ")", "it", "=", "int", "(", "t", ")", "if", "sec", "is", "None", ":", "sec", "=", "it", "if", "usec", "is", "None", ":", "usec", "=", "int", "(", "round", "(", "(", "t", "-", "it", ")", "*", "1000000", ")", ")", "self", ".", "f", ".", "write", "(", "struct", ".", "pack", "(", "self", ".", "endian", "+", "\"IIII\"", ",", "sec", ",", "usec", ",", "caplen", ",", "wirelen", ")", ")", "self", ".", "f", ".", "write", "(", "packet", ")", "if", "self", ".", "gz", "and", "self", ".", "sync", ":", "self", ".", "f", ".", "flush", "(", ")" ]
Returns a boolean representing whether these commands can be grouped together or not .
def can_group_commands ( command , next_command ) : multi_capable_commands = ( 'get' , 'set' , 'delete' ) if next_command is None : return False name = command . get_name ( ) # TODO: support multi commands if name not in multi_capable_commands : return False if name != next_command . get_name ( ) : return False # if the shared args (key, or key/value) do not match, we cannot group if grouped_args_for_command ( command ) != grouped_args_for_command ( next_command ) : return False # If the keyword arguments do not much (e.g. key_prefix, or timeout on set) # then we cannot group if command . get_kwargs ( ) != next_command . get_kwargs ( ) : return False return True
240
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/backends/memcache.py#L97-L135
[ "def", "get_stream_duration", "(", "stream", ")", ":", "epoch", "=", "lal", ".", "LIGOTimeGPS", "(", "stream", ".", "epoch", ".", "gpsSeconds", ",", "stream", ".", "epoch", ".", "gpsNanoSeconds", ")", "# loop over each file in the stream cache and query its duration", "nfile", "=", "stream", ".", "cache", ".", "length", "duration", "=", "0", "for", "dummy_i", "in", "range", "(", "nfile", ")", ":", "for", "dummy_j", "in", "range", "(", "lalframe", ".", "FrFileQueryNFrame", "(", "stream", ".", "file", ")", ")", ":", "duration", "+=", "lalframe", ".", "FrFileQueryDt", "(", "stream", ".", "file", ",", "0", ")", "lalframe", ".", "FrStreamNext", "(", "stream", ")", "# rewind stream and return", "lalframe", ".", "FrStreamSeek", "(", "stream", ",", "epoch", ")", "return", "duration" ]
define ribosomal proteins and location of curated databases
def find_databases ( databases ) : # 16 ribosomal proteins in their expected order proteins = [ 'L15' , 'L18' , 'L6' , 'S8' , 'L5' , 'L24' , 'L14' , 'S17' , 'L16' , 'S3' , 'L22' , 'S19' , 'L2' , 'L4' , 'L3' , 'S10' ] # curated databases protein_databases = { 'L14' : 'rpL14_JGI_MDM.filtered.faa' , 'L15' : 'rpL15_JGI_MDM.filtered.faa' , 'L16' : 'rpL16_JGI_MDM.filtered.faa' , 'L18' : 'rpL18_JGI_MDM.filtered.faa' , 'L22' : 'rpL22_JGI_MDM.filtered.faa' , 'L24' : 'rpL24_JGI_MDM.filtered.faa' , 'L2' : 'rpL2_JGI_MDM.filtered.faa' , 'L3' : 'rpL3_JGI_MDM.filtered.faa' , 'L4' : 'rpL4_JGI_MDM.filtered.faa' , 'L5' : 'rpL5_JGI_MDM.filtered.faa' , 'L6' : 'rpL6_JGI_MDM.filtered.faa' , 'S10' : 'rpS10_JGI_MDM.filtered.faa' , 'S17' : 'rpS17_JGI_MDM.filtered.faa' , 'S19' : 'rpS19_JGI_MDM.filtered.faa' , 'S3' : 'rpS3_JGI_MDM.filtered.faa' , 'S8' : 'rpS8_JGI_MDM.filtered.faa' } protein_databases = { key : '%s/%s' % ( databases , database ) for key , database in list ( protein_databases . items ( ) ) } return proteins , protein_databases
241
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rp16.py#L21-L48
[ "def", "_sendStatCmd", "(", "self", ",", "cmd", ")", ":", "try", ":", "self", ".", "_conn", ".", "write", "(", "\"%s\\r\\n\"", "%", "cmd", ")", "regex", "=", "re", ".", "compile", "(", "'^(END|ERROR)\\r\\n'", ",", "re", ".", "MULTILINE", ")", "(", "idx", ",", "mobj", ",", "text", ")", "=", "self", ".", "_conn", ".", "expect", "(", "[", "regex", ",", "]", ",", "self", ".", "_timeout", ")", "#@UnusedVariable", "except", ":", "raise", "Exception", "(", "\"Communication with %s failed\"", "%", "self", ".", "_instanceName", ")", "if", "mobj", "is", "not", "None", ":", "if", "mobj", ".", "group", "(", "1", ")", "==", "'END'", ":", "return", "text", ".", "splitlines", "(", ")", "[", ":", "-", "1", "]", "elif", "mobj", ".", "group", "(", "1", ")", "==", "'ERROR'", ":", "raise", "Exception", "(", "\"Protocol error in communication with %s.\"", "%", "self", ".", "_instanceName", ")", "else", ":", "raise", "Exception", "(", "\"Connection with %s timed out.\"", "%", "self", ".", "_instanceName", ")" ]
which protein has the best hit the one to the right or to the left?
def find_next ( start , stop , i2hits ) : if start not in i2hits and stop in i2hits : index = stop elif stop not in i2hits and start in i2hits : index = start elif start not in i2hits and stop not in i2hits : index = choice ( [ start , stop ] ) i2hits [ index ] = [ [ False ] ] else : A , B = i2hits [ start ] [ 0 ] , i2hits [ stop ] [ 0 ] if B [ 10 ] <= A [ 10 ] : index = stop else : index = start if index == start : nstart = start - 1 nstop = stop else : nstop = stop + 1 nstart = start match = i2hits [ index ] [ 0 ] rp = match [ - 1 ] return index , nstart , nstop , rp , match
242
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rp16.py#L77-L102
[ "def", "construct_error_message", "(", "driver_id", ",", "error_type", ",", "message", ",", "timestamp", ")", ":", "builder", "=", "flatbuffers", ".", "Builder", "(", "0", ")", "driver_offset", "=", "builder", ".", "CreateString", "(", "driver_id", ".", "binary", "(", ")", ")", "error_type_offset", "=", "builder", ".", "CreateString", "(", "error_type", ")", "message_offset", "=", "builder", ".", "CreateString", "(", "message", ")", "ray", ".", "core", ".", "generated", ".", "ErrorTableData", ".", "ErrorTableDataStart", "(", "builder", ")", "ray", ".", "core", ".", "generated", ".", "ErrorTableData", ".", "ErrorTableDataAddDriverId", "(", "builder", ",", "driver_offset", ")", "ray", ".", "core", ".", "generated", ".", "ErrorTableData", ".", "ErrorTableDataAddType", "(", "builder", ",", "error_type_offset", ")", "ray", ".", "core", ".", "generated", ".", "ErrorTableData", ".", "ErrorTableDataAddErrorMessage", "(", "builder", ",", "message_offset", ")", "ray", ".", "core", ".", "generated", ".", "ErrorTableData", ".", "ErrorTableDataAddTimestamp", "(", "builder", ",", "timestamp", ")", "error_data_offset", "=", "ray", ".", "core", ".", "generated", ".", "ErrorTableData", ".", "ErrorTableDataEnd", "(", "builder", ")", "builder", ".", "Finish", "(", "error_data_offset", ")", "return", "bytes", "(", "builder", ".", "Output", "(", ")", ")" ]
determine which hits represent real ribosomal proteins identify each in syntenic block max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold
def find_ribosomal ( rps , scaffolds , s2rp , min_hits , max_hits_rp , max_errors ) : for scaffold , proteins in list ( s2rp . items ( ) ) : # for each scaffold, get best hits for each rp hits = { p : [ i for i in sorted ( hits , key = itemgetter ( 10 ) ) ] [ 0 : max_hits_rp ] for p , hits in list ( proteins . items ( ) ) if len ( hits ) > 0 } # skip if fewer than min_hits RPs are identified if len ( hits ) < min_hits : continue best = sorted ( [ hit [ 0 ] + [ p ] for p , hit in list ( hits . items ( ) ) ] , key = itemgetter ( 10 ) ) [ 0 ] block = find_block ( rps , scaffolds [ scaffold ] , hits , best , max_errors ) if ( len ( block ) - 1 ) >= min_hits : yield scaffold , block
243
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rp16.py#L134-L150
[ "def", "row_value", "(", "self", ",", "row", ")", ":", "irow", "=", "int", "(", "row", ")", "i", "=", "self", ".", "_get_key_index", "(", "irow", ")", "if", "i", "==", "-", "1", ":", "return", "0.0", "# Are we dealing with the last key?", "if", "i", "==", "len", "(", "self", ".", "keys", ")", "-", "1", ":", "return", "self", ".", "keys", "[", "-", "1", "]", ".", "value", "return", "TrackKey", ".", "interpolate", "(", "self", ".", "keys", "[", "i", "]", ",", "self", ".", "keys", "[", "i", "+", "1", "]", ",", "row", ")" ]
Parse the rep set file and remove all sequences not associated with unique OTUs .
def filter_rep_set ( inF , otuSet ) : seqs = [ ] for record in SeqIO . parse ( inF , "fasta" ) : if record . id in otuSet : seqs . append ( record ) return seqs
244
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/filter_rep_set.py#L32-L47
[ "def", "set_energy_range", "(", "self", ",", "logemin", ",", "logemax", ")", ":", "if", "logemin", "is", "None", ":", "logemin", "=", "self", ".", "log_energies", "[", "0", "]", "if", "logemax", "is", "None", ":", "logemax", "=", "self", ".", "log_energies", "[", "-", "1", "]", "imin", "=", "int", "(", "utils", ".", "val_to_edge", "(", "self", ".", "log_energies", ",", "logemin", ")", "[", "0", "]", ")", "imax", "=", "int", "(", "utils", ".", "val_to_edge", "(", "self", ".", "log_energies", ",", "logemax", ")", "[", "0", "]", ")", "if", "imin", "-", "imax", "==", "0", ":", "imin", "=", "int", "(", "len", "(", "self", ".", "log_energies", ")", "-", "1", ")", "imax", "=", "int", "(", "len", "(", "self", ".", "log_energies", ")", "-", "1", ")", "klims", "=", "self", ".", "like", ".", "logLike", ".", "klims", "(", ")", "if", "imin", "!=", "klims", "[", "0", "]", "or", "imax", "!=", "klims", "[", "1", "]", ":", "self", ".", "like", ".", "selectEbounds", "(", "imin", ",", "imax", ")", "return", "np", ".", "array", "(", "[", "self", ".", "log_energies", "[", "imin", "]", ",", "self", ".", "log_energies", "[", "imax", "]", "]", ")" ]
Update the text for each element at the configured path if attribute matches
def _update_report_item ( self , * * update_props ) : tree_to_update = update_props [ 'tree_to_update' ] prop = update_props [ 'prop' ] values = wrap_value ( update_props [ 'values' ] ) xroot = self . _get_xroot_for ( prop ) attr_key = 'type' attr_val = u'' if prop == 'attribute_accuracy' : attr_val = 'DQQuanAttAcc' elif prop == 'dataset_completeness' : attr_val = 'DQCompOm' # Clear (make empty) all elements of the appropriate type for elem in get_elements ( tree_to_update , xroot ) : if get_element_attributes ( elem ) . get ( attr_key ) == attr_val : clear_element ( elem ) # Remove all empty elements, including those previously cleared remove_empty_element ( tree_to_update , xroot ) # Insert elements with correct attributes for each new value attrs = { attr_key : attr_val } updated = [ ] for idx , value in enumerate ( values ) : elem = insert_element ( tree_to_update , idx , xroot , * * attrs ) updated . append ( insert_element ( elem , idx , 'measDesc' , value ) ) return updated
245
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/arcgis_metadata_parser.py#L407-L440
[ "def", "remove_headers", "(", "self", ",", "header_name", ")", ":", "if", "self", ".", "__ro_flag", ":", "raise", "RuntimeError", "(", "'ro'", ")", "header_name", "=", "self", ".", "normalize_name", "(", "header_name", ")", "if", "header_name", "in", "self", ".", "__headers", ".", "keys", "(", ")", ":", "self", ".", "__headers", ".", "pop", "(", "header_name", ")" ]
Clear the specified interrupt bit in the interrupt status register .
def _clear_interrupt ( self , intbit ) : int_status = self . _device . readU8 ( VCNL4010_INTSTAT ) int_status &= ~ intbit self . _device . write8 ( VCNL4010_INTSTAT , int_status )
246
https://github.com/adafruit/Adafruit_Python_VCNL40xx/blob/f88ec755fd23017028b6dec1be0607ff4a018e10/Adafruit_VCNL40xx/VCNL40xx.py#L123-L128
[ "def", "create_dirs", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "_path", ")", ":", "os", ".", "makedirs", "(", "self", ".", "_path", ")", "for", "dir_name", "in", "[", "self", ".", "OBJ_DIR", ",", "self", ".", "TMP_OBJ_DIR", ",", "self", ".", "PKG_DIR", ",", "self", ".", "CACHE_DIR", "]", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "dir_name", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "os", ".", "mkdir", "(", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_version_path", "(", ")", ")", ":", "self", ".", "_write_format_version", "(", ")" ]
Swaps two nodes
def move ( self ) : a = random . randint ( 0 , len ( self . state ) - 1 ) b = random . randint ( 0 , len ( self . state ) - 1 ) self . state [ [ a , b ] ] = self . state [ [ b , a ] ]
247
https://github.com/skojaku/core-periphery-detection/blob/d724e6441066622506ddb54d81ee9a1cfd15f766/cpalgorithm/Rombach.py#L19-L23
[ "def", "signalize_extensions", "(", ")", ":", "warnings", ".", "warn", "(", "\"DB-API extension cursor.rownumber used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension connection.<exception> used\"", ",", "SalesforceWarning", ")", "# TODO", "warnings", ".", "warn", "(", "\"DB-API extension cursor.connection used\"", ",", "SalesforceWarning", ")", "# not implemented DB-API extension cursor.scroll(, SalesforceWarning)", "warnings", ".", "warn", "(", "\"DB-API extension cursor.messages used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension connection.messages used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension cursor.next(, SalesforceWarning) used\"", ")", "warnings", ".", "warn", "(", "\"DB-API extension cursor.__iter__(, SalesforceWarning) used\"", ")", "warnings", ".", "warn", "(", "\"DB-API extension cursor.lastrowid used\"", ",", "SalesforceWarning", ")", "warnings", ".", "warn", "(", "\"DB-API extension .errorhandler used\"", ",", "SalesforceWarning", ")" ]
A bool - if the certificate should be self - signed .
def self_signed ( self , value ) : self . _self_signed = bool ( value ) if self . _self_signed : self . _issuer = None
248
https://github.com/wbond/certbuilder/blob/969dae884fa7f73988bbf1dcbec4fb51e234a3c5/certbuilder/__init__.py#L122-L130
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "TabView", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "try", ":", "tab_group", "=", "self", ".", "get_tabs", "(", "self", ".", "request", ",", "*", "*", "kwargs", ")", "context", "[", "\"tab_group\"", "]", "=", "tab_group", "# Make sure our data is pre-loaded to capture errors.", "context", "[", "\"tab_group\"", "]", ".", "load_tab_data", "(", ")", "except", "Exception", ":", "exceptions", ".", "handle", "(", "self", ".", "request", ")", "return", "context" ]
Grabs the first URL out of a asn1crypto . x509 . CRLDistributionPoints object
def _get_crl_url ( self , distribution_points ) : if distribution_points is None : return None for distribution_point in distribution_points : name = distribution_point [ 'distribution_point' ] if name . name == 'full_name' and name . chosen [ 0 ] . name == 'uniform_resource_identifier' : return name . chosen [ 0 ] . chosen . native return None
249
https://github.com/wbond/certbuilder/blob/969dae884fa7f73988bbf1dcbec4fb51e234a3c5/certbuilder/__init__.py#L544-L564
[ "def", "set_sync_limit", "(", "self", ",", "limit", ":", "int", ")", "->", "Optional", "[", "int", "]", ":", "try", ":", "prev_limit", "=", "json", ".", "loads", "(", "self", ".", "sync_filter", ")", "[", "'room'", "]", "[", "'timeline'", "]", "[", "'limit'", "]", "except", "(", "json", ".", "JSONDecodeError", ",", "KeyError", ")", ":", "prev_limit", "=", "None", "self", ".", "sync_filter", "=", "json", ".", "dumps", "(", "{", "'room'", ":", "{", "'timeline'", ":", "{", "'limit'", ":", "limit", "}", "}", "}", ")", "return", "prev_limit" ]
A bool - if the certificate should have the OCSP no check extension . Only applicable to certificates created for signing OCSP responses . Such certificates should normally be issued for a very short period of time since they are effectively whitelisted by clients .
def ocsp_no_check ( self , value ) : if value is None : self . _ocsp_no_check = None else : self . _ocsp_no_check = bool ( value )
250
https://github.com/wbond/certbuilder/blob/969dae884fa7f73988bbf1dcbec4fb51e234a3c5/certbuilder/__init__.py#L688-L699
[ "def", "add_to_virtualbox", "(", "self", ")", ":", "# VirtualBox Image", "if", "'vmname'", "not", "in", "self", ".", "node", "[", "'properties'", "]", ":", "self", ".", "node", "[", "'properties'", "]", "[", "'vmname'", "]", "=", "self", ".", "hypervisor", "[", "'VBoxDevice'", "]", "[", "'image'", "]", "# Number of adapters", "if", "'adapters'", "not", "in", "self", ".", "node", "[", "'properties'", "]", ":", "self", ".", "node", "[", "'properties'", "]", "[", "'adapters'", "]", "=", "self", ".", "hypervisor", "[", "'VBoxDevice'", "]", "[", "'nics'", "]", "# Console Port", "if", "'console'", "not", "in", "self", ".", "node", "[", "'properties'", "]", ":", "self", ".", "node", "[", "'properties'", "]", "[", "'console'", "]", "=", "self", ".", "base_ports", "[", "'vbox_console'", "]", "+", "self", ".", "node", "[", "'vbox_id'", "]", "-", "1" ]
Removes empty line .
def emptylineless ( parser , token ) : nodelist = parser . parse ( ( 'endemptylineless' , ) ) parser . delete_first_token ( ) return EmptylinelessNode ( nodelist )
251
https://github.com/tell-k/django-modelsdoc/blob/c9d336e76251feb142347b3a41365430d3365436/modelsdoc/templatetags/modelsdoc_tags.py#L31-L54
[ "def", "_GetPrimitiveEncoder", "(", "self", ")", ":", "# Decide what should the primitive type be for packing the target rdfvalue", "# into the protobuf and create a delegate descriptor to control that.", "primitive_cls", "=", "self", ".", "_PROTO_DATA_STORE_LOOKUP", "[", "self", ".", "type", ".", "data_store_type", "]", "self", ".", "primitive_desc", "=", "primitive_cls", "(", "*", "*", "self", ".", "_kwargs", ")", "# Our wiretype is the same as the delegate's.", "self", ".", "wire_type", "=", "self", ".", "primitive_desc", ".", "wire_type", "self", ".", "proto_type_name", "=", "self", ".", "primitive_desc", ".", "proto_type_name", "# Recalculate our tags.", "self", ".", "CalculateTags", "(", ")" ]
Do an HTTP PURGE of the given asset . The URL is run through urlparse and must point to the varnish instance not the varnishadm
def http_purge_url ( url ) : url = urlparse ( url ) connection = HTTPConnection ( url . hostname , url . port or 80 ) path = url . path or '/' connection . request ( 'PURGE' , '%s?%s' % ( path , url . query ) if url . query else path , '' , { 'Host' : '%s:%s' % ( url . hostname , url . port ) if url . port else url . hostname } ) response = connection . getresponse ( ) if response . status != 200 : logging . error ( 'Purge failed with status: %s' % response . status ) return response
252
https://github.com/justquick/python-varnish/blob/8f114c74898e6c5ade2ce49c8b595040bd150465/varnish.py#L47-L60
[ "def", "row_completed", "(", "self", ",", "index", ")", ":", "self", ".", "_completed_rows", ".", "append", "(", "index", ")", "for", "row_completed", "in", "self", ".", "_on_row_completed", ":", "row_completed", "(", "index", ")" ]
Non - threaded batch command runner returning output results
def run ( addr , * commands , * * kwargs ) : results = [ ] handler = VarnishHandler ( addr , * * kwargs ) for cmd in commands : if isinstance ( cmd , tuple ) and len ( cmd ) > 1 : results . extend ( [ getattr ( handler , c [ 0 ] . replace ( '.' , '_' ) ) ( * c [ 1 : ] ) for c in cmd ] ) else : results . append ( getattr ( handler , cmd . replace ( '.' , '_' ) ) ( * commands [ 1 : ] ) ) break handler . close ( ) return results
253
https://github.com/justquick/python-varnish/blob/8f114c74898e6c5ade2ce49c8b595040bd150465/varnish.py#L289-L302
[ "def", "isomorphic_to", "(", "self", ",", "other", ")", ":", "if", "self", ".", "molecule", ".", "composition", "!=", "other", ".", "molecule", ".", "composition", ":", "return", "False", "else", ":", "self_undir", "=", "self", ".", "graph", ".", "to_undirected", "(", ")", "other_undir", "=", "other", ".", "graph", ".", "to_undirected", "(", ")", "nm", "=", "iso", ".", "categorical_node_match", "(", "\"specie\"", ",", "\"ERROR\"", ")", "isomorphic", "=", "nx", ".", "is_isomorphic", "(", "self_undir", ",", "other_undir", ",", "node_match", "=", "nm", ")", "return", "isomorphic" ]
add stylesheet files in HTML head
def add_stylesheets ( self , * css_files ) : for css_file in css_files : self . main_soup . style . append ( self . _text_file ( css_file ) )
254
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L43-L46
[ "def", "get_command", "(", "self", ",", "version", "=", "2", ")", ":", "try", ":", "options", "=", "_C", "[", "'options'", "]", "options_str", "=", "\" -o \"", ".", "join", "(", "options", ")", "if", "options_str", ":", "options_str", "=", "\"-o \"", "+", "options_str", "+", "\" \"", "except", "KeyError", ":", "options_str", "=", "\"\"", "if", "self", ".", "username", ":", "# Not supported on SunOS", "# \"-o ConnectTimeout={}", "command", "=", "\"ssh {}\"", "\"-{} \"", "\"-p {} {}@{}\"", ".", "format", "(", "options_str", ",", "version", ",", "self", ".", "port", ",", "self", ".", "username", ",", "self", ".", "hostname", ")", "else", ":", "command", "=", "\"ssh {} \"", "\"-{} \"", "\"-p {} {}\"", ".", "format", "(", "options_str", ",", "version", ",", "self", ".", "port", ",", "self", ".", "hostname", ")", "return", "command" ]
add javascripts files in HTML body
def add_javascripts ( self , * js_files ) : # create the script tag if don't exists if self . main_soup . script is None : script_tag = self . main_soup . new_tag ( 'script' ) self . main_soup . body . append ( script_tag ) for js_file in js_files : self . main_soup . script . append ( self . _text_file ( js_file ) )
255
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L48-L56
[ "def", "plugin_counts", "(", "self", ")", ":", "ret", "=", "{", "'total'", ":", "0", ",", "}", "# As ususal, we need data before we can actually do anything ;)", "data", "=", "self", ".", "raw_query", "(", "'plugin'", ",", "'init'", ")", "# For backwards compatability purposes, we will be handling this a bit", "# differently than I would like. We are going to check to see if each", "# value exists and override the default value of 0. The only value that", "# I know existed in bost 4.2 and 4.4 is pluginCount, the rest aren't", "# listed in the API docs, however return back from my experimentation.", "ret", "[", "'total'", "]", "=", "data", "[", "'pluginCount'", "]", "if", "'lastUpdates'", "in", "data", ":", "for", "item", "in", "[", "'active'", ",", "'passive'", ",", "'compliance'", ",", "'custom'", ",", "'event'", "]", ":", "itemdata", "=", "{", "}", "if", "item", "in", "data", "[", "'lastUpdates'", "]", ":", "itemdata", "=", "data", "[", "'lastUpdates'", "]", "[", "item", "]", "if", "item", "in", "data", ":", "itemdata", "[", "'count'", "]", "=", "data", "[", "item", "]", "else", ":", "itemdata", "[", "'count'", "]", "=", "0", "ret", "[", "item", "]", "=", "itemdata", "return", "ret" ]
return the object in a file
def export ( self ) : with open ( self . export_url , 'w' , encoding = 'utf-8' ) as file : file . write ( self . build ( ) ) if self . open_browser : webbrowser . open_new_tab ( self . export_url )
256
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L58-L64
[ "def", "delete", "(", "name", ",", "timeout", "=", "90", ")", ":", "handle_scm", "=", "win32service", ".", "OpenSCManager", "(", "None", ",", "None", ",", "win32service", ".", "SC_MANAGER_CONNECT", ")", "try", ":", "handle_svc", "=", "win32service", ".", "OpenService", "(", "handle_scm", ",", "name", ",", "win32service", ".", "SERVICE_ALL_ACCESS", ")", "except", "pywintypes", ".", "error", "as", "exc", ":", "win32service", ".", "CloseServiceHandle", "(", "handle_scm", ")", "if", "exc", ".", "winerror", "!=", "1060", ":", "raise", "CommandExecutionError", "(", "'Failed to open {0}. {1}'", ".", "format", "(", "name", ",", "exc", ".", "strerror", ")", ")", "log", ".", "debug", "(", "'Service \"%s\" is not present'", ",", "name", ")", "return", "True", "try", ":", "win32service", ".", "DeleteService", "(", "handle_svc", ")", "except", "pywintypes", ".", "error", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Failed to delete {0}. {1}'", ".", "format", "(", "name", ",", "exc", ".", "strerror", ")", ")", "finally", ":", "log", ".", "debug", "(", "'Cleaning up'", ")", "win32service", ".", "CloseServiceHandle", "(", "handle_scm", ")", "win32service", ".", "CloseServiceHandle", "(", "handle_svc", ")", "end_time", "=", "time", ".", "time", "(", ")", "+", "int", "(", "timeout", ")", "while", "name", "in", "get_all", "(", ")", "and", "time", ".", "time", "(", ")", "<", "end_time", ":", "time", ".", "sleep", "(", "1", ")", "return", "name", "not", "in", "get_all", "(", ")" ]
convert Markdown text as html . return the html file as string
def build ( self ) : markdown_html = markdown . markdown ( self . markdown_text , extensions = [ TocExtension ( ) , 'fenced_code' , 'markdown_checklist.extension' , 'markdown.extensions.tables' ] ) markdown_soup = BeautifulSoup ( markdown_html , 'html.parser' ) # include jquery & mermaid.js only if there are Mermaid graph if markdown_soup . find ( 'code' , attrs = { 'class' : 'mermaid' } ) : self . _add_mermaid_js ( ) # search in markdown html if there are Dot Graph & replace it with .svg result for dot_tag in markdown_soup . find_all ( 'code' , attrs = { 'class' : 'dotgraph' } ) : grap_svg = self . _text_to_graphiz ( dot_tag . string ) graph_soup = BeautifulSoup ( grap_svg , 'html.parser' ) dot_tag . parent . replaceWith ( graph_soup ) self . main_soup . body . append ( markdown_soup ) return self . main_soup . prettify ( )
257
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L66-L84
[ "def", "__normalize", "(", "self", ")", ":", "# Don't normalize if we're already normalizing or intializing", "if", "self", ".", "__normalizing", "is", "True", "or", "self", ".", "__initialized", "is", "False", ":", "return", "self", ".", "__normalizing", "=", "True", "self", ".", "__normalize_grades", "(", ")", "self", ".", "__normalize_progress", "(", ")", "self", ".", "__normalizing", "=", "False" ]
return the content of a file
def _text_file ( self , url ) : try : with open ( url , 'r' , encoding = 'utf-8' ) as file : return file . read ( ) except FileNotFoundError : print ( 'File `{}` not found' . format ( url ) ) sys . exit ( 0 )
258
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L86-L93
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
create a graphviz graph from text
def _text_to_graphiz ( self , text ) : dot = Source ( text , format = 'svg' ) return dot . pipe ( ) . decode ( 'utf-8' )
259
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L95-L98
[ "def", "__get_vibration_code", "(", "self", ",", "left_motor", ",", "right_motor", ",", "duration", ")", ":", "inner_event", "=", "struct", ".", "pack", "(", "'2h6x2h2x2H28x'", ",", "0x50", ",", "-", "1", ",", "duration", ",", "0", ",", "int", "(", "left_motor", "*", "65535", ")", ",", "int", "(", "right_motor", "*", "65535", ")", ")", "buf_conts", "=", "ioctl", "(", "self", ".", "_write_device", ",", "1076905344", ",", "inner_event", ")", "return", "int", "(", "codecs", ".", "encode", "(", "buf_conts", "[", "1", ":", "3", "]", ",", "'hex'", ")", ",", "16", ")" ]
add js libraries and css files of mermaid js_file
def _add_mermaid_js ( self ) : self . add_javascripts ( '{}/js/jquery-1.11.3.min.js' . format ( self . resources_path ) ) self . add_javascripts ( '{}/js/mermaid.min.js' . format ( self . resources_path ) ) self . add_stylesheets ( '{}/css/mermaid.css' . format ( self . resources_path ) ) self . main_soup . script . append ( 'mermaid.initialize({startOnLoad:true });' )
260
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L100-L105
[ "def", "shape", "(", "self", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "return", "(", ")", "elif", "self", ".", "is_power_space", ":", "try", ":", "sub_shape", "=", "self", "[", "0", "]", ".", "shape", "except", "AttributeError", ":", "sub_shape", "=", "(", ")", "else", ":", "sub_shape", "=", "(", ")", "return", "(", "len", "(", "self", ")", ",", ")", "+", "sub_shape" ]
Get a character set with individual members or ranges .
def getCharacterSet ( self ) : chars = u'' c = None cnt = 1 start = 0 while True : escaped_slash = False c = self . next ( ) # print "pattern : ", self.pattern # print "C : ", c # print "Slash : ", c == u'\\' # print 'chars : ', chars # print 'index : ', self.index # print 'last : ', self.last() # print 'lookahead : ', self.lookahead() if self . lookahead ( ) == u'-' and not c == u'\\' : f = c self . next ( ) # skip hyphen c = self . next ( ) # get far range if not c or ( c in self . meta_chars ) : raise StringGenerator . SyntaxError ( u"unexpected end of class range" ) chars += self . getCharacterRange ( f , c ) elif c == u'\\' : if self . lookahead ( ) in self . meta_chars : c = self . next ( ) chars += c continue elif self . lookahead ( ) in self . string_code : c = self . next ( ) chars += self . string_code [ c ] elif c and c not in self . meta_chars : chars += c if c == u']' : if self . lookahead ( ) == u'{' : [ start , cnt ] = self . getQuantifier ( ) else : start = - 1 cnt = 1 break if c and c in self . meta_chars and not self . last ( ) == u"\\" : raise StringGenerator . SyntaxError ( u"Un-escaped character in class definition: %s" % c ) if not c : break return StringGenerator . CharacterSet ( chars , start , cnt )
261
https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L368-L419
[ "def", "WaitForVirtualMachineShutdown", "(", "self", ",", "vm_to_poll", ",", "timeout_seconds", ",", "sleep_period", "=", "5", ")", ":", "seconds_waited", "=", "0", "# wait counter", "while", "seconds_waited", "<", "timeout_seconds", ":", "# sleep first, since nothing shuts down instantly", "seconds_waited", "+=", "sleep_period", "time", ".", "sleep", "(", "sleep_period", ")", "vm", "=", "self", ".", "get_vm", "(", "vm_to_poll", ".", "name", ")", "if", "vm", ".", "runtime", ".", "powerState", "==", "vim", ".", "VirtualMachinePowerState", ".", "poweredOff", ":", "return", "True", "return", "False" ]
Get a sequence of non - special characters .
def getLiteral ( self ) : # we are on the first non-special character chars = u'' c = self . current ( ) while True : if c and c == u"\\" : c = self . next ( ) if c : chars += c continue elif not c or ( c in self . meta_chars ) : break else : chars += c if self . lookahead ( ) and self . lookahead ( ) in self . meta_chars : break c = self . next ( ) return StringGenerator . Literal ( chars )
262
https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L421-L439
[ "def", "initialize_communities_bucket", "(", ")", ":", "bucket_id", "=", "UUID", "(", "current_app", ".", "config", "[", "'COMMUNITIES_BUCKET_UUID'", "]", ")", "if", "Bucket", ".", "query", ".", "get", "(", "bucket_id", ")", ":", "raise", "FilesException", "(", "\"Bucket with UUID {} already exists.\"", ".", "format", "(", "bucket_id", ")", ")", "else", ":", "storage_class", "=", "current_app", ".", "config", "[", "'FILES_REST_DEFAULT_STORAGE_CLASS'", "]", "location", "=", "Location", ".", "get_default", "(", ")", "bucket", "=", "Bucket", "(", "id", "=", "bucket_id", ",", "location", "=", "location", ",", "default_storage_class", "=", "storage_class", ")", "db", ".", "session", ".", "add", "(", "bucket", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Get a sequence of nodes .
def getSequence ( self , level = 0 ) : seq = [ ] op = '' left_operand = None right_operand = None sequence_closed = False while True : c = self . next ( ) if not c : break if c and c not in self . meta_chars : seq . append ( self . getLiteral ( ) ) elif c and c == u'$' and self . lookahead ( ) == u'{' : seq . append ( self . getSource ( ) ) elif c == u'[' and not self . last ( ) == u'\\' : seq . append ( self . getCharacterSet ( ) ) elif c == u'(' and not self . last ( ) == u'\\' : seq . append ( self . getSequence ( level + 1 ) ) elif c == u')' and not self . last ( ) == u'\\' : # end of this sequence if level == 0 : # there should be no parens here raise StringGenerator . SyntaxError ( u"Extra closing parenthesis" ) sequence_closed = True break elif c == u'|' and not self . last ( ) == u'\\' : op = c elif c == u'&' and not self . last ( ) == u'\\' : op = c else : if c in self . meta_chars and not self . last ( ) == u"\\" : raise StringGenerator . SyntaxError ( u"Un-escaped special character: %s" % c ) #print( op,len(seq) ) if op and not left_operand : if not seq or len ( seq ) < 1 : raise StringGenerator . SyntaxError ( u"Operator: %s with no left operand" % op ) left_operand = seq . pop ( ) elif op and len ( seq ) >= 1 and left_operand : right_operand = seq . pop ( ) #print( "popped: [%s] %s:%s"%( op, left_operand, right_operand) ) if op == u'|' : seq . append ( StringGenerator . SequenceOR ( [ left_operand , right_operand ] ) ) elif op == u'&' : seq . append ( StringGenerator . SequenceAND ( [ left_operand , right_operand ] ) ) op = u'' left_operand = None right_operand = None # check for syntax errors if op : raise StringGenerator . SyntaxError ( u"Operator: %s with no right operand" % op ) if level > 0 and not sequence_closed : # it means we are finishing a non-first-level sequence without closing parens raise StringGenerator . SyntaxError ( u"Missing closing parenthesis" ) return StringGenerator . Sequence ( seq )
263
https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L441-L501
[ "def", "crtPwBoxCarFn", "(", "varNumVol", ",", "aryPngData", ",", "aryPresOrd", ",", "vecMtDrctn", ")", ":", "print", "(", "'------Create pixel-wise boxcar functions'", ")", "aryBoxCar", "=", "np", ".", "empty", "(", "aryPngData", ".", "shape", "[", "0", ":", "2", "]", "+", "(", "len", "(", "vecMtDrctn", ")", ",", ")", "+", "(", "varNumVol", ",", ")", ",", "dtype", "=", "'int64'", ")", "for", "ind", ",", "num", "in", "enumerate", "(", "vecMtDrctn", ")", ":", "aryCondTemp", "=", "np", ".", "zeros", "(", "(", "aryPngData", ".", "shape", ")", ",", "dtype", "=", "'int64'", ")", "lgcTempMtDrctn", "=", "[", "aryPresOrd", "==", "num", "]", "[", "0", "]", "aryCondTemp", "[", ":", ",", ":", ",", "lgcTempMtDrctn", "]", "=", "np", ".", "copy", "(", "aryPngData", "[", ":", ",", ":", ",", "lgcTempMtDrctn", "]", ")", "aryBoxCar", "[", ":", ",", ":", ",", "ind", ",", ":", "]", "=", "aryCondTemp", "return", "aryBoxCar" ]
Print the parse tree and then call render for an example .
def dump ( self , * * kwargs ) : import sys if not self . seq : self . seq = self . getSequence ( ) print ( "StringGenerator version: %s" % ( __version__ ) ) print ( "Python version: %s" % sys . version ) # this doesn't work anymore in p3 # print("Random method provider class: %s" % randint.im_class.__name__) self . seq . dump ( ) return self . render ( * * kwargs )
264
https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L521-L531
[ "def", "wait_for_keypresses", "(", "self", ",", "refresh_rate", "=", "1", ")", ":", "if", "not", "self", ".", "_enable_keyboard", ":", "return", "with", "self", ".", "_blessed_term", ".", "cbreak", "(", ")", ":", "while", "True", ":", "yield", "self", ".", "_blessed_term", ".", "inkey", "(", "timeout", "=", "refresh_rate", ")" ]
Return a list of generated strings .
def render_list ( self , cnt , unique = False , progress_callback = None , * * kwargs ) : rendered_list = [ ] i = 0 total_attempts = 0 while True : if i >= cnt : break if total_attempts > cnt * self . unique_attempts_factor : raise StringGenerator . UniquenessError ( u"couldn't satisfy uniqueness" ) s = self . render ( * * kwargs ) if unique : if not s in rendered_list : rendered_list . append ( s ) i += 1 else : rendered_list . append ( s ) i += 1 total_attempts += 1 # Optionally trigger the progress indicator to inform others about our progress if progress_callback and callable ( progress_callback ) : progress_callback ( i , cnt ) return rendered_list
265
https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L533-L570
[ "def", "_is_binary_stl", "(", "data", ")", ":", "is_bin", "=", "False", "start_byte", "=", "0", "end_byte", "=", "80", "_", "=", "data", "[", "start_byte", ":", "end_byte", "]", "# header data", "start_byte", "=", "end_byte", "end_byte", "+=", "4", "facet_count", "=", "struct", ".", "unpack", "(", "'I'", ",", "data", "[", "start_byte", ":", "end_byte", "]", ")", "[", "0", "]", "if", "facet_count", ">", "0", ":", "is_bin", "=", "True", "return", "is_bin" ]
Establish the connection . This is done automatically for you .
def connect ( self ) : self . conn = boto . connect_s3 ( self . AWS_ACCESS_KEY_ID , self . AWS_SECRET_ACCESS_KEY , debug = self . S3UTILS_DEBUG_LEVEL ) self . bucket = self . conn . get_bucket ( self . AWS_STORAGE_BUCKET_NAME ) self . k = Key ( self . bucket )
266
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L145-L155
[ "def", "setOverlayTexelAspect", "(", "self", ",", "ulOverlayHandle", ",", "fTexelAspect", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTexelAspect", "result", "=", "fn", "(", "ulOverlayHandle", ",", "fTexelAspect", ")", "return", "result" ]
Connect to Cloud Front . This is done automatically for you when needed .
def connect_cloudfront ( self ) : self . conn_cloudfront = connect_cloudfront ( self . AWS_ACCESS_KEY_ID , self . AWS_SECRET_ACCESS_KEY , debug = self . S3UTILS_DEBUG_LEVEL )
267
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L166-L168
[ "def", "remove_organization", "(", "self", ",", "service_desk_id", ",", "organization_id", ")", ":", "log", ".", "warning", "(", "'Removing organization...'", ")", "url", "=", "'rest/servicedeskapi/servicedesk/{}/organization'", ".", "format", "(", "service_desk_id", ")", "data", "=", "{", "'organizationId'", ":", "organization_id", "}", "return", "self", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
Create a folder on S3 .
def mkdir ( self , target_folder ) : self . printv ( "Making directory: %s" % target_folder ) self . k . key = re . sub ( r"^/|/$" , "" , target_folder ) + "/" self . k . set_contents_from_string ( '' ) self . k . close ( )
268
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L171-L183
[ "def", "get_changed_devices", "(", "self", ",", "timestamp", ")", ":", "if", "timestamp", "is", "None", ":", "payload", "=", "{", "}", "else", ":", "payload", "=", "{", "'timeout'", ":", "SUBSCRIPTION_WAIT", ",", "'minimumdelay'", ":", "SUBSCRIPTION_MIN_WAIT", "}", "payload", ".", "update", "(", "timestamp", ")", "# double the timeout here so requests doesn't timeout before vera", "payload", ".", "update", "(", "{", "'id'", ":", "'lu_sdata'", ",", "}", ")", "logger", ".", "debug", "(", "\"get_changed_devices() requesting payload %s\"", ",", "str", "(", "payload", ")", ")", "r", "=", "self", ".", "data_request", "(", "payload", ",", "TIMEOUT", "*", "2", ")", "r", ".", "raise_for_status", "(", ")", "# If the Vera disconnects before writing a full response (as lu_sdata", "# will do when interrupted by a Luup reload), the requests module will", "# happily return 200 with an empty string. So, test for empty response,", "# so we don't rely on the JSON parser to throw an exception.", "if", "r", ".", "text", "==", "\"\"", ":", "raise", "PyveraError", "(", "\"Empty response from Vera\"", ")", "# Catch a wide swath of what the JSON parser might throw, within", "# reason. Unfortunately, some parsers don't specifically return", "# json.decode.JSONDecodeError, but so far most seem to derive what", "# they do throw from ValueError, so that's helpful.", "try", ":", "result", "=", "r", ".", "json", "(", ")", "except", "ValueError", "as", "ex", ":", "raise", "PyveraError", "(", "\"JSON decode error: \"", "+", "str", "(", "ex", ")", ")", "if", "not", "(", "type", "(", "result", ")", "is", "dict", "and", "'loadtime'", "in", "result", "and", "'dataversion'", "in", "result", ")", ":", "raise", "PyveraError", "(", "\"Unexpected/garbled response from Vera\"", ")", "# At this point, all good. Update timestamp and return change data.", "device_data", "=", "result", ".", "get", "(", "'devices'", ")", "timestamp", "=", "{", "'loadtime'", ":", "result", ".", "get", "(", "'loadtime'", ")", ",", "'dataversion'", ":", "result", ".", "get", "(", "'dataversion'", ")", "}", "return", "[", "device_data", ",", "timestamp", "]" ]
Delete the path and anything under the path .
def rm ( self , path ) : list_of_files = list ( self . ls ( path ) ) if list_of_files : if len ( list_of_files ) == 1 : self . bucket . delete_key ( list_of_files [ 0 ] ) else : self . bucket . delete_keys ( list_of_files ) self . printv ( "Deleted: %s" % list_of_files ) else : logger . error ( "There was nothing to remove under %s" , path )
269
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L186-L204
[ "def", "map_system_entities", "(", "engine", ",", "metadata", ",", "reset", ")", ":", "# Map the user message system entity.", "msg_tbl", "=", "Table", "(", "'_user_messages'", ",", "metadata", ",", "Column", "(", "'guid'", ",", "String", ",", "nullable", "=", "False", ",", "primary_key", "=", "True", ")", ",", "Column", "(", "'text'", ",", "String", ",", "nullable", "=", "False", ")", ",", "Column", "(", "'time_stamp'", ",", "DateTime", "(", "timezone", "=", "True", ")", ",", "nullable", "=", "False", ",", "default", "=", "sa_func", ".", "now", "(", ")", ")", ",", ")", "mapper", "(", "UserMessage", ",", "msg_tbl", ",", "id_attribute", "=", "'guid'", ")", "if", "reset", ":", "metadata", ".", "drop_all", "(", "bind", "=", "engine", ",", "tables", "=", "[", "msg_tbl", "]", ")", "metadata", ".", "create_all", "(", "bind", "=", "engine", ",", "tables", "=", "[", "msg_tbl", "]", ")" ]
Copy a file to s3 .
def __put_key ( self , local_file , target_file , acl = 'public-read' , del_after_upload = False , overwrite = True , source = "filename" ) : action_word = "moving" if del_after_upload else "copying" try : self . k . key = target_file # setting the path (key) of file in the container if source == "filename" : # grabs the contents from local_file address. Note that it loads the whole file into memory self . k . set_contents_from_filename ( local_file , self . AWS_HEADERS ) elif source == "fileobj" : self . k . set_contents_from_file ( local_file , self . AWS_HEADERS ) elif source == "string" : self . k . set_contents_from_string ( local_file , self . AWS_HEADERS ) else : raise Exception ( "%s is not implemented as a source." % source ) self . k . set_acl ( acl ) # setting the file permissions self . k . close ( ) # not sure if it is needed. Somewhere I read it is recommended. self . printv ( "%s %s to %s" % ( action_word , local_file , target_file ) ) # if it is supposed to delete the local file after uploading if del_after_upload and source == "filename" : try : os . remove ( local_file ) except : logger . error ( "Unable to delete the file: " , local_file , exc_info = True ) return True except : logger . error ( "Error in writing to %s" , target_file , exc_info = True ) return False
270
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L207-L238
[ "def", "parse_recommendations", "(", "self", ",", "recommendations_page", ")", ":", "user_info", "=", "self", ".", "parse_sidebar", "(", "recommendations_page", ")", "second_col", "=", "recommendations_page", ".", "find", "(", "u'div'", ",", "{", "u'id'", ":", "u'content'", "}", ")", ".", "find", "(", "u'table'", ")", ".", "find", "(", "u'tr'", ")", ".", "find_all", "(", "u'td'", ",", "recursive", "=", "False", ")", "[", "1", "]", "try", ":", "recommendations", "=", "second_col", ".", "find_all", "(", "u\"div\"", ",", "{", "u\"class\"", ":", "u\"spaceit borderClass\"", "}", ")", "if", "recommendations", ":", "user_info", "[", "u'recommendations'", "]", "=", "{", "}", "for", "row", "in", "recommendations", "[", "1", ":", "]", ":", "anime_table", "=", "row", ".", "find", "(", "u'table'", ")", "animes", "=", "anime_table", ".", "find_all", "(", "u'td'", ")", "liked_media_link", "=", "animes", "[", "0", "]", ".", "find", "(", "u'a'", ",", "recursive", "=", "False", ")", "link_parts", "=", "liked_media_link", ".", "get", "(", "u'href'", ")", ".", "split", "(", "u'/'", ")", "# of the form /anime|manga/64/Rozen_Maiden", "liked_media", "=", "getattr", "(", "self", ".", "session", ",", "link_parts", "[", "1", "]", ")", "(", "int", "(", "link_parts", "[", "2", "]", ")", ")", ".", "set", "(", "{", "u'title'", ":", "liked_media_link", ".", "text", "}", ")", "recommended_media_link", "=", "animes", "[", "1", "]", ".", "find", "(", "u'a'", ",", "recursive", "=", "False", ")", "link_parts", "=", "recommended_media_link", ".", "get", "(", "u'href'", ")", ".", "split", "(", "u'/'", ")", "# of the form /anime|manga/64/Rozen_Maiden", "recommended_media", "=", "getattr", "(", "self", ".", "session", ",", "link_parts", "[", "1", "]", ")", "(", "int", "(", "link_parts", "[", "2", "]", ")", ")", ".", "set", "(", "{", "u'title'", ":", "recommended_media_link", ".", "text", "}", ")", "recommendation_text", "=", "row", ".", "find", "(", "u'p'", ")", ".", "text", "recommendation_menu", "=", "row", ".", "find", "(", "u'div'", ",", "recursive", "=", "False", ")", "utilities", ".", "extract_tags", "(", "recommendation_menu", ")", "recommendation_date", "=", "utilities", ".", "parse_profile_date", "(", "recommendation_menu", ".", "text", ".", "split", "(", "u' - '", ")", "[", "1", "]", ")", "user_info", "[", "u'recommendations'", "]", "[", "liked_media", "]", "=", "{", "link_parts", "[", "1", "]", ":", "recommended_media", ",", "'text'", ":", "recommendation_text", ",", "'date'", ":", "recommendation_date", "}", "except", ":", "if", "not", "self", ".", "session", ".", "suppress_parse_exceptions", ":", "raise", "return", "user_info" ]
Copy a file or folder from local to s3 .
def cp ( self , local_path , target_path , acl = 'public-read' , del_after_upload = False , overwrite = True , invalidate = False ) : result = None if overwrite : list_of_files = [ ] else : list_of_files = self . ls ( folder = target_path , begin_from_file = "" , num = - 1 , get_grants = False , all_grant_data = False ) # copying the contents of the folder and not folder itself if local_path . endswith ( "/*" ) : local_path = local_path [ : - 2 ] target_path = re . sub ( r"^/|/$" , "" , target_path ) # Amazon S3 doesn't let the name to begin with / # copying folder too else : local_base_name = os . path . basename ( local_path ) local_path = re . sub ( r"/$" , "" , local_path ) target_path = re . sub ( r"^/" , "" , target_path ) if not target_path . endswith ( local_base_name ) : target_path = os . path . join ( target_path , local_base_name ) if os . path . exists ( local_path ) : result = self . __find_files_and_copy ( local_path , target_path , acl , del_after_upload , overwrite , invalidate , list_of_files ) else : result = { 'file_does_not_exist' : local_path } logger . error ( "trying to upload to s3 but file doesn't exist: %s" % local_path ) return result
271
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L240-L336
[ "def", "stop_experiment", "(", "args", ")", ":", "experiment_id_list", "=", "parse_ids", "(", "args", ")", "if", "experiment_id_list", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "for", "experiment_id", "in", "experiment_id_list", ":", "print_normal", "(", "'Stoping experiment %s'", "%", "experiment_id", ")", "nni_config", "=", "Config", "(", "experiment_dict", "[", "experiment_id", "]", "[", "'fileName'", "]", ")", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "rest_pid", ":", "kill_command", "(", "rest_pid", ")", "tensorboard_pid_list", "=", "nni_config", ".", "get_config", "(", "'tensorboardPidList'", ")", "if", "tensorboard_pid_list", ":", "for", "tensorboard_pid", "in", "tensorboard_pid_list", ":", "try", ":", "kill_command", "(", "tensorboard_pid", ")", "except", "Exception", "as", "exception", ":", "print_error", "(", "exception", ")", "nni_config", ".", "set_config", "(", "'tensorboardPidList'", ",", "[", "]", ")", "print_normal", "(", "'Stop experiment success!'", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'status'", ",", "'STOPPED'", ")", "time_now", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "time", ".", "time", "(", ")", ")", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'endTime'", ",", "str", "(", "time_now", ")", ")" ]
Similar to Linux mv command .
def mv ( self , local_file , target_file , acl = 'public-read' , overwrite = True , invalidate = False ) : self . cp ( local_file , target_file , acl = acl , del_after_upload = True , overwrite = overwrite , invalidate = invalidate )
272
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L483-L507
[ "def", "__json_strnum_to_bignum", "(", "json_object", ")", ":", "for", "key", "in", "(", "'id'", ",", "'week'", ",", "'in_reply_to_id'", ",", "'in_reply_to_account_id'", ",", "'logins'", ",", "'registrations'", ",", "'statuses'", ")", ":", "if", "(", "key", "in", "json_object", "and", "isinstance", "(", "json_object", "[", "key", "]", ",", "six", ".", "text_type", ")", ")", ":", "try", ":", "json_object", "[", "key", "]", "=", "int", "(", "json_object", "[", "key", "]", ")", "except", "ValueError", ":", "pass", "return", "json_object" ]
Deal with saving cropduster images to S3 . Cropduster is a Django library for resizing editorial images . S3utils was originally written to put cropduster images on S3 bucket .
def cp_cropduster_image ( self , the_image_path , del_after_upload = False , overwrite = False , invalidate = False ) : local_file = os . path . join ( settings . MEDIA_ROOT , the_image_path ) # only try to upload things if the origin cropduster file exists (so it is not already uploaded to the CDN) if os . path . exists ( local_file ) : the_image_crops_path = os . path . splitext ( the_image_path ) [ 0 ] the_image_crops_path_full_path = os . path . join ( settings . MEDIA_ROOT , the_image_crops_path ) self . cp ( local_path = local_file , target_path = os . path . join ( settings . S3_ROOT_BASE , the_image_path ) , del_after_upload = del_after_upload , overwrite = overwrite , invalidate = invalidate , ) self . cp ( local_path = the_image_crops_path_full_path + "/*" , target_path = os . path . join ( settings . S3_ROOT_BASE , the_image_crops_path ) , del_after_upload = del_after_upload , overwrite = overwrite , invalidate = invalidate , )
273
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L510-L551
[ "def", "revert_timefactor", "(", "cls", ",", "values", ")", ":", "if", "cls", ".", "TIME", "is", "True", ":", "return", "values", "/", "cls", ".", "get_timefactor", "(", ")", "if", "cls", ".", "TIME", "is", "False", ":", "return", "values", "*", "cls", ".", "get_timefactor", "(", ")", "return", "values" ]
sets permissions for a file on S3
def chmod ( self , target_file , acl = 'public-read' ) : self . k . key = target_file # setting the path (key) of file in the container self . k . set_acl ( acl ) # setting the file permissions self . k . close ( )
274
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L582-L610
[ "def", "group_experiments_greedy", "(", "tomo_expt", ":", "TomographyExperiment", ")", ":", "diag_sets", "=", "_max_tpb_overlap", "(", "tomo_expt", ")", "grouped_expt_settings_list", "=", "list", "(", "diag_sets", ".", "values", "(", ")", ")", "grouped_tomo_expt", "=", "TomographyExperiment", "(", "grouped_expt_settings_list", ",", "program", "=", "tomo_expt", ".", "program", ")", "return", "grouped_tomo_expt" ]
Get the list of files and permissions from S3 .
def ll ( self , folder = "" , begin_from_file = "" , num = - 1 , all_grant_data = False ) : return self . ls ( folder = folder , begin_from_file = begin_from_file , num = num , get_grants = True , all_grant_data = all_grant_data )
275
https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L669-L764
[ "def", "from_indexed_arrays", "(", "cls", ",", "name", ",", "verts", ",", "normals", ")", ":", "# Put header in string", "wavefront_str", "=", "\"o {name}\\n\"", ".", "format", "(", "name", "=", "name", ")", "new_verts", ",", "face_indices", "=", "face_index", "(", "verts", ")", "assert", "new_verts", ".", "shape", "[", "1", "]", "==", "3", ",", "\"verts should be Nx3 array\"", "assert", "face_indices", ".", "ndim", "==", "2", "face_indices", "=", "fan_triangulate", "(", "face_indices", ")", "# Write Vertex data from vert_dict", "for", "vert", "in", "new_verts", ":", "wavefront_str", "+=", "\"v {0} {1} {2}\\n\"", ".", "format", "(", "*", "vert", ")", "# Write (false) UV Texture data", "wavefront_str", "+=", "\"vt 1.0 1.0\\n\"", "for", "norm", "in", "normals", ":", "wavefront_str", "+=", "\"vn {0} {1} {2}\\n\"", ".", "format", "(", "*", "norm", ")", "assert", "len", "(", "face_indices", ")", "==", "len", "(", "normals", ")", "*", "2", "for", "norm_idx", ",", "vert_idx", ",", "in", "enumerate", "(", "face_indices", ")", ":", "wavefront_str", "+=", "\"f\"", "for", "vv", "in", "vert_idx", ":", "wavefront_str", "+=", "\" {}/{}/{}\"", ".", "format", "(", "vv", "+", "1", ",", "1", ",", "(", "norm_idx", "//", "2", ")", "+", "1", ")", "wavefront_str", "+=", "\"\\n\"", "return", "cls", "(", "string", "=", "wavefront_str", ")" ]
Get the path from a given url including the querystring .
def get_path ( url ) : url = urlsplit ( url ) path = url . path if url . query : path += "?{}" . format ( url . query ) return path
276
https://github.com/uktrade/directory-signature-auth/blob/1a1b1e887b25a938133d7bcc146d3fecf1079313/sigauth/helpers.py#L79-L94
[ "def", "hasReaders", "(", "self", ",", "ulBuffer", ")", ":", "fn", "=", "self", ".", "function_table", ".", "hasReaders", "result", "=", "fn", "(", "ulBuffer", ")", "return", "result" ]
Reads data from disk and generates CSV files .
def run ( self ) : # Try to create the directory if not os . path . exists ( self . output ) : try : os . mkdir ( self . output ) except : print 'failed to create output directory %s' % self . output # Be sure it is a directory if not os . path . isdir ( self . output ) : print 'invalid output directory %s' % self . output sys . exit ( 1 ) # Create the CSV handlers visitors = [ _CompaniesCSV ( self . output ) , _ActivitiesCSV ( self . output ) , _ActivitiesSeenCSV ( self . output ) , _QSACSV ( self . output ) , ] # Run by each company populating the CSV files for path in glob . glob ( os . path . join ( self . input , '*.json' ) ) : with open ( path , 'r' ) as f : try : data = json . load ( f , encoding = 'utf-8' ) except ValueError : continue for visitor in visitors : visitor . visit ( data )
277
https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/build.py#L144-L175
[ "def", "get_queryset", "(", "self", ")", ":", "return", "Event", ".", "objects", ".", "filter", "(", "Q", "(", "startTime__gte", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "90", ")", ")", "&", "(", "Q", "(", "series__isnull", "=", "False", ")", "|", "Q", "(", "publicevent__isnull", "=", "False", ")", ")", ")", ".", "annotate", "(", "count", "=", "Count", "(", "'eventregistration'", ")", ")", ".", "annotate", "(", "*", "*", "self", ".", "get_annotations", "(", ")", ")", ".", "exclude", "(", "Q", "(", "count", "=", "0", ")", "&", "Q", "(", "status__in", "=", "[", "Event", ".", "RegStatus", ".", "hidden", ",", "Event", ".", "RegStatus", ".", "regHidden", ",", "Event", ".", "RegStatus", ".", "disabled", "]", ")", ")" ]
Process a list of simple string field definitions and assign their order based on prefix .
def process_fields ( self , fields ) : result = [ ] strip = '' . join ( self . PREFIX_MAP ) for field in fields : direction = self . PREFIX_MAP [ '' ] if field [ 0 ] in self . PREFIX_MAP : direction = self . PREFIX_MAP [ field [ 0 ] ] field = field . lstrip ( strip ) result . append ( ( field , direction ) ) return result
278
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/index.py#L60-L75
[ "def", "start", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "is_running", "(", ")", ":", "self", ".", "websock_url", "=", "self", ".", "chrome", ".", "start", "(", "*", "*", "kwargs", ")", "self", ".", "websock", "=", "websocket", ".", "WebSocketApp", "(", "self", ".", "websock_url", ")", "self", ".", "websock_thread", "=", "WebsockReceiverThread", "(", "self", ".", "websock", ",", "name", "=", "'WebsockThread:%s'", "%", "self", ".", "chrome", ".", "port", ")", "self", ".", "websock_thread", ".", "start", "(", ")", "self", ".", "_wait_for", "(", "lambda", ":", "self", ".", "websock_thread", ".", "is_open", ",", "timeout", "=", "30", ")", "# tell browser to send us messages we're interested in", "self", ".", "send_to_chrome", "(", "method", "=", "'Network.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Page.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Console.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'Runtime.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'ServiceWorker.enable'", ")", "self", ".", "send_to_chrome", "(", "method", "=", "'ServiceWorker.setForceUpdateOnPageLoad'", ")", "# disable google analytics", "self", ".", "send_to_chrome", "(", "method", "=", "'Network.setBlockedURLs'", ",", "params", "=", "{", "'urls'", ":", "[", "'*google-analytics.com/analytics.js'", ",", "'*google-analytics.com/ga.js'", "]", "}", ")" ]
Firms search in rubric
def search_in_rubric ( self , * * kwargs ) : point = kwargs . pop ( 'point' , False ) if point : kwargs [ 'point' ] = '%s,%s' % point bound = kwargs . pop ( 'bound' , False ) if bound : kwargs [ 'bound[point1]' ] = bound [ 0 ] kwargs [ 'bound[point2]' ] = bound [ 1 ] filters = kwargs . pop ( 'filters' , False ) if filters : for k , v in filters . items ( ) : kwargs [ 'filters[%s]' % k ] = v return self . _search_in_rubric ( * * kwargs )
279
https://github.com/svartalf/python-2gis/blob/6eccd6073c99494b7abf20b38a5455cbd55d6420/dgis/__init__.py#L89-L109
[ "def", "__write_to_character_device", "(", "self", ",", "event_list", ",", "timeval", "=", "None", ")", ":", "# Remember the position of the stream", "pos", "=", "self", ".", "_character_device", ".", "tell", "(", ")", "# Go to the end of the stream", "self", ".", "_character_device", ".", "seek", "(", "0", ",", "2", ")", "# Write the new data to the end", "for", "event", "in", "event_list", ":", "self", ".", "_character_device", ".", "write", "(", "event", ")", "# Add a sync marker", "sync", "=", "self", ".", "create_event_object", "(", "\"Sync\"", ",", "0", ",", "0", ",", "timeval", ")", "self", ".", "_character_device", ".", "write", "(", "sync", ")", "# Put the stream back to its original position", "self", ".", "_character_device", ".", "seek", "(", "pos", ")" ]
Refresh the list and the screen
def refresh ( self ) : self . _screen . force_update ( ) self . _screen . refresh ( ) self . _update ( 1 )
280
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L54-L60
[ "def", "import_file", "(", "filename", ")", ":", "#file_path = os.path.relpath(filename)", "file_path", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "log", "(", "DEBUG", ",", "\"Loading prices from %s\"", ",", "file_path", ")", "prices", "=", "__read_prices_from_file", "(", "file_path", ")", "with", "BookAggregate", "(", "for_writing", "=", "True", ")", "as", "svc", ":", "svc", ".", "prices", ".", "import_prices", "(", "prices", ")", "print", "(", "\"Saving book...\"", ")", "svc", ".", "book", ".", "save", "(", ")" ]
Mark an action as started
def start ( self , activity , action ) : try : self . _start_action ( activity , action ) except ValueError : retox_log . debug ( "Could not find action %s in env %s" % ( activity , self . name ) ) self . refresh ( )
281
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L233-L247
[ "def", "_read", "(", "self", ",", "size", ")", ":", "if", "self", ".", "comptype", "==", "\"tar\"", ":", "return", "self", ".", "__read", "(", "size", ")", "c", "=", "len", "(", "self", ".", "dbuf", ")", "while", "c", "<", "size", ":", "buf", "=", "self", ".", "__read", "(", "self", ".", "bufsize", ")", "if", "not", "buf", ":", "break", "try", ":", "buf", "=", "self", ".", "cmp", ".", "decompress", "(", "buf", ")", "except", "IOError", ":", "raise", "ReadError", "(", "\"invalid compressed data\"", ")", "self", ".", "dbuf", "+=", "buf", "c", "+=", "len", "(", "buf", ")", "buf", "=", "self", ".", "dbuf", "[", ":", "size", "]", "self", ".", "dbuf", "=", "self", ".", "dbuf", "[", "size", ":", "]", "return", "buf" ]
Mark a task as completed
def stop ( self , activity , action ) : try : self . _remove_running_action ( activity , action ) except ValueError : retox_log . debug ( "Could not find action %s in env %s" % ( activity , self . name ) ) self . _mark_action_completed ( activity , action ) self . refresh ( )
282
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L249-L264
[ "def", "rate_limit", "(", "f", ")", ":", "def", "new_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "errors", "=", "0", "while", "True", ":", "resp", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "resp", ".", "status_code", "==", "200", ":", "errors", "=", "0", "return", "resp", "elif", "resp", ".", "status_code", "==", "401", ":", "# Hack to retain the original exception, but augment it with", "# additional context for the user to interpret it. In a Python", "# 3 only future we can raise a new exception of the same type", "# with a new message from the old error.", "try", ":", "resp", ".", "raise_for_status", "(", ")", "except", "requests", ".", "HTTPError", "as", "e", ":", "message", "=", "\"\\nThis is a protected or locked account, or\"", "+", "\" the credentials provided are no longer valid.\"", "e", ".", "args", "=", "(", "e", ".", "args", "[", "0", "]", "+", "message", ",", ")", "+", "e", ".", "args", "[", "1", ":", "]", "log", ".", "warning", "(", "\"401 Authentication required for %s\"", ",", "resp", ".", "url", ")", "raise", "elif", "resp", ".", "status_code", "==", "429", ":", "reset", "=", "int", "(", "resp", ".", "headers", "[", "'x-rate-limit-reset'", "]", ")", "now", "=", "time", ".", "time", "(", ")", "seconds", "=", "reset", "-", "now", "+", "10", "if", "seconds", "<", "1", ":", "seconds", "=", "10", "log", ".", "warning", "(", "\"rate limit exceeded: sleeping %s secs\"", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "elif", "resp", ".", "status_code", ">=", "500", ":", "errors", "+=", "1", "if", "errors", ">", "30", ":", "log", ".", "warning", "(", "\"too many errors from Twitter, giving up\"", ")", "resp", ".", "raise_for_status", "(", ")", "seconds", "=", "60", "*", "errors", "log", ".", "warning", "(", "\"%s from Twitter API, sleeping %s\"", ",", "resp", ".", "status_code", ",", "seconds", ")", "time", ".", "sleep", "(", "seconds", ")", "else", ":", "resp", ".", "raise_for_status", "(", ")", "return", "new_f" ]
Move laggard tasks over
def finish ( self , status ) : retox_log . info ( "Completing %s with status %s" % ( self . name , status ) ) result = Screen . COLOUR_GREEN if not status else Screen . COLOUR_RED self . palette [ 'title' ] = ( Screen . COLOUR_WHITE , Screen . A_BOLD , result ) for item in list ( self . _task_view . options ) : self . _task_view . options . remove ( item ) self . _completed_view . options . append ( item ) self . refresh ( )
283
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L266-L279
[ "def", "_session_check", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "SESSION_FILE", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"Session file does not exist\"", ")", "return", "False", "with", "open", "(", "SESSION_FILE", ",", "'rb'", ")", "as", "f", ":", "cookies", "=", "requests", ".", "utils", ".", "cookiejar_from_dict", "(", "pickle", ".", "load", "(", "f", ")", ")", "self", ".", "_session", ".", "cookies", "=", "cookies", "self", ".", "_log", ".", "debug", "(", "\"Loaded cookies from session file\"", ")", "response", "=", "self", ".", "_session", ".", "get", "(", "url", "=", "self", ".", "TEST_URL", ",", "headers", "=", "self", ".", "HEADERS", ")", "if", "self", ".", "TEST_KEY", "in", "str", "(", "response", ".", "content", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"Session file appears invalid\"", ")", "return", "False", "self", ".", "_is_authenticated", "=", "True", "self", ".", "_process_state", "(", ")", "return", "True" ]
Reset the frame between jobs
def reset ( self ) : self . palette [ 'title' ] = ( Screen . COLOUR_WHITE , Screen . A_BOLD , Screen . COLOUR_BLUE ) self . _completed_view . options = [ ] self . _task_view . options = [ ] self . refresh ( )
284
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L281-L288
[ "def", "get_grid_district_polygon", "(", "config", ",", "subst_id", "=", "None", ",", "projection", "=", "4326", ")", ":", "# make DB session", "conn", "=", "connection", "(", "section", "=", "config", "[", "'db_connection'", "]", "[", "'section'", "]", ")", "Session", "=", "sessionmaker", "(", "bind", "=", "conn", ")", "session", "=", "Session", "(", ")", "# get polygon from versioned schema", "if", "config", "[", "'data_source'", "]", "[", "'oedb_data_source'", "]", "==", "'versioned'", ":", "version", "=", "config", "[", "'versioned'", "]", "[", "'version'", "]", "query", "=", "session", ".", "query", "(", "EgoDpMvGriddistrict", ".", "subst_id", ",", "EgoDpMvGriddistrict", ".", "geom", ")", "Regions", "=", "[", "(", "subst_id", ",", "shape", ".", "to_shape", "(", "geom", ")", ")", "for", "subst_id", ",", "geom", "in", "query", ".", "filter", "(", "EgoDpMvGriddistrict", ".", "version", "==", "version", ",", "EgoDpMvGriddistrict", ".", "subst_id", "==", "subst_id", ")", ".", "all", "(", ")", "]", "# get polygon from model_draft", "else", ":", "query", "=", "session", ".", "query", "(", "EgoGridMvGriddistrict", ".", "subst_id", ",", "EgoGridMvGriddistrict", ".", "geom", ")", "Regions", "=", "[", "(", "subst_id", ",", "shape", ".", "to_shape", "(", "geom", ")", ")", "for", "subst_id", ",", "geom", "in", "query", ".", "filter", "(", "EgoGridMvGriddistrict", ".", "subst_id", ".", "in_", "(", "subst_id", ")", ")", ".", "all", "(", ")", "]", "crs", "=", "{", "'init'", ":", "'epsg:3035'", "}", "region", "=", "gpd", ".", "GeoDataFrame", "(", "Regions", ",", "columns", "=", "[", "'subst_id'", ",", "'geometry'", "]", ",", "crs", "=", "crs", ")", "region", "=", "region", ".", "to_crs", "(", "epsg", "=", "projection", ")", "return", "region" ]
Returns the available kwargs of the called class
def default_arguments ( cls ) : func = cls . __init__ args = func . __code__ . co_varnames defaults = func . __defaults__ index = - len ( defaults ) return { k : v for k , v in zip ( args [ index : ] , defaults ) }
285
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L134-L140
[ "def", "_get_image_paths", "(", "im1", ",", "im2", ")", ":", "paths", "=", "[", "]", "for", "im", "in", "[", "im1", ",", "im2", "]", ":", "if", "im", "is", "None", ":", "# Groupwise registration: only one image (ndim+1 dimensions)", "paths", ".", "append", "(", "paths", "[", "0", "]", ")", "continue", "if", "isinstance", "(", "im", ",", "str", ")", ":", "# Given a location", "if", "os", ".", "path", ".", "isfile", "(", "im1", ")", ":", "paths", ".", "append", "(", "im", ")", "else", ":", "raise", "ValueError", "(", "'Image location does not exist.'", ")", "elif", "isinstance", "(", "im", ",", "np", ".", "ndarray", ")", ":", "# Given a numpy array", "id", "=", "len", "(", "paths", ")", "+", "1", "p", "=", "_write_image_data", "(", "im", ",", "id", ")", "paths", ".", "append", "(", "p", ")", "else", ":", "# Given something else ...", "raise", "ValueError", "(", "'Invalid input image.'", ")", "# Done", "return", "tuple", "(", "paths", ")" ]
Recreate the class based in your args multiple uses
def recreate ( cls , * args , * * kwargs ) : cls . check_arguments ( kwargs ) first_is_callable = True if any ( args ) and callable ( args [ 0 ] ) else False signature = cls . default_arguments ( ) allowed_arguments = { k : v for k , v in kwargs . items ( ) if k in signature } if ( any ( allowed_arguments ) or any ( args ) ) and not first_is_callable : if any ( args ) and not first_is_callable : return cls ( args [ 0 ] , * * allowed_arguments ) elif any ( allowed_arguments ) : return cls ( * * allowed_arguments ) return cls . instances [ - 1 ] if any ( cls . instances ) else cls ( )
286
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L143-L155
[ "def", "_PrintProcessingTime", "(", "self", ",", "processing_status", ")", ":", "if", "not", "processing_status", ":", "processing_time", "=", "'00:00:00'", "else", ":", "processing_time", "=", "time", ".", "time", "(", ")", "-", "processing_status", ".", "start_time", "time_struct", "=", "time", ".", "gmtime", "(", "processing_time", ")", "processing_time", "=", "time", ".", "strftime", "(", "'%H:%M:%S'", ",", "time_struct", ")", "self", ".", "_output_writer", ".", "Write", "(", "'Processing time\\t\\t: {0:s}\\n'", ".", "format", "(", "processing_time", ")", ")" ]
Put warnings of arguments whose can t be handle by the class
def check_arguments ( cls , passed ) : defaults = list ( cls . default_arguments ( ) . keys ( ) ) template = ( "Pass arg {argument:!r} in {cname:!r}, can be a typo? " "Supported key arguments: {defaults}" ) fails = [ ] for arg in passed : if arg not in defaults : warn ( template . format ( argument = arg , cname = cls . __name__ , defaults = defaults ) ) fails . append ( arg ) return any ( fails )
287
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L158-L171
[ "def", "queries", "(", "self", ",", "request", ")", ":", "queries", "=", "self", ".", "get_queries", "(", "request", ")", "worlds", "=", "[", "]", "with", "self", ".", "mapper", ".", "begin", "(", ")", "as", "session", ":", "for", "_", "in", "range", "(", "queries", ")", ":", "world", "=", "session", ".", "query", "(", "World", ")", ".", "get", "(", "randint", "(", "1", ",", "MAXINT", ")", ")", "worlds", ".", "append", "(", "self", ".", "get_json", "(", "world", ")", ")", "return", "Json", "(", "worlds", ")", ".", "http_response", "(", "request", ")" ]
process the specified type then process its children
def process ( self , data , type , history ) : if type in history : return if type . enum ( ) : return history . append ( type ) resolved = type . resolve ( ) value = None if type . multi_occurrence ( ) : value = [ ] else : if len ( resolved ) > 0 : if resolved . mixed ( ) : value = Factory . property ( resolved . name ) md = value . __metadata__ md . sxtype = resolved else : value = Factory . object ( resolved . name ) md = value . __metadata__ md . sxtype = resolved md . ordering = self . ordering ( resolved ) setattr ( data , type . name , value ) if value is not None : data = value if not isinstance ( data , list ) : self . add_attributes ( data , resolved ) for child , ancestry in resolved . children ( ) : if self . skip_child ( child , ancestry ) : continue self . process ( data , child , history [ : ] )
288
https://github.com/ovnicraft/suds2/blob/e5b540792206a41efc22f5d5b9cfac2dbe7a7992/suds/builder.py#L60-L90
[ "def", "gc_velocity_update", "(", "particle", ",", "social", ",", "state", ")", ":", "gbest", "=", "state", ".", "swarm", "[", "gbest_idx", "(", "state", ".", "swarm", ")", "]", ".", "position", "if", "not", "np", ".", "array_equal", "(", "gbest", ",", "particle", ".", "position", ")", ":", "return", "std_velocity", "(", "particle", ",", "social", ",", "state", ")", "rho", "=", "state", ".", "params", "[", "'rho'", "]", "inertia", "=", "state", ".", "params", "[", "'inertia'", "]", "v_max", "=", "state", ".", "params", "[", "'v_max'", "]", "size", "=", "particle", ".", "position", ".", "size", "r2", "=", "state", ".", "rng", ".", "uniform", "(", "0.0", ",", "1.0", ",", "size", ")", "velocity", "=", "__gc_velocity_equation__", "(", "inertia", ",", "rho", ",", "r2", ",", "particle", ",", "gbest", ")", "return", "__clamp__", "(", "velocity", ",", "v_max", ")" ]
get whether or not to skip the specified child
def skip_child ( self , child , ancestry ) : if child . any ( ) : return True for x in ancestry : if x . choice ( ) : return True return False
289
https://github.com/ovnicraft/suds2/blob/e5b540792206a41efc22f5d5b9cfac2dbe7a7992/suds/builder.py#L99-L105
[ "def", "_load_cache", "(", "self", ")", ":", "# If the cached file exist, read-it", "max_refresh_date", "=", "timedelta", "(", "days", "=", "7", ")", "cached_data", "=", "{", "}", "try", ":", "with", "open", "(", "self", ".", "cache_file", ",", "'rb'", ")", "as", "f", ":", "cached_data", "=", "pickle", ".", "load", "(", "f", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "\"Cannot read version from cache file: {} ({})\"", ".", "format", "(", "self", ".", "cache_file", ",", "e", ")", ")", "else", ":", "logger", ".", "debug", "(", "\"Read version from cache file\"", ")", "if", "(", "cached_data", "[", "'installed_version'", "]", "!=", "self", ".", "installed_version", "(", ")", "or", "datetime", ".", "now", "(", ")", "-", "cached_data", "[", "'refresh_date'", "]", ">", "max_refresh_date", ")", ":", "# Reset the cache if:", "# - the installed version is different", "# - the refresh_date is > max_refresh_date", "cached_data", "=", "{", "}", "return", "cached_data" ]
Checks whether knocks are enabled for the model given as argument
def active_knocks ( obj ) : if not hasattr ( _thread_locals , 'knock_enabled' ) : return True return _thread_locals . knock_enabled . get ( obj . __class__ , True )
290
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/signals.py#L34-L43
[ "def", "_parse_array_bitmap", "(", "self", ",", "block", ")", ":", "array_bitmap_regexp", "=", "re", ".", "compile", "(", "'^ *bitmap: (?P<allocated_pages>[0-9]*)/'", "'(?P<total_pages>[0-9]*) pages '", "'\\[(?P<page_size>[0-9]*)KB\\], '", "'(?P<chunk_size>[0-9]*)KB chunk.*$'", ",", "re", ".", "MULTILINE", ")", "regexp_res", "=", "array_bitmap_regexp", ".", "search", "(", "block", ")", "# bitmap is optionally in mdstat", "if", "not", "regexp_res", ":", "return", "None", "array_bitmap_dict", "=", "regexp_res", ".", "groupdict", "(", ")", "array_bitmap_dict_sanitizied", "=", "{", "}", "# convert all values to int", "for", "key", ",", "value", "in", "array_bitmap_dict", ".", "iteritems", "(", ")", ":", "if", "not", "value", ":", "continue", "array_bitmap_dict_sanitizied", "[", "key", "]", "=", "int", "(", "value", ")", "# convert page_size to bytes", "array_bitmap_dict_sanitizied", "[", "'page_size'", "]", "*=", "1024", "# convert chunk_size to bytes", "array_bitmap_dict_sanitizied", "[", "'chunk_size'", "]", "*=", "1024", "return", "array_bitmap_dict" ]
Context manager to suspend sending knocks for the given model
def pause_knocks ( obj ) : if not hasattr ( _thread_locals , 'knock_enabled' ) : _thread_locals . knock_enabled = { } obj . __class__ . _disconnect ( ) _thread_locals . knock_enabled [ obj . __class__ ] = False yield _thread_locals . knock_enabled [ obj . __class__ ] = True obj . __class__ . _connect ( )
291
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/signals.py#L47-L59
[ "def", "stats", "(", "args", ")", ":", "from", "jcvi", ".", "utils", ".", "cbook", "import", "percentage", "p", "=", "OptionParser", "(", "stats", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "blocksfile", ",", "=", "args", "fp", "=", "open", "(", "blocksfile", ")", "counts", "=", "defaultdict", "(", "int", ")", "total", "=", "orthologous", "=", "0", "for", "row", "in", "fp", ":", "atoms", "=", "row", ".", "rstrip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "hits", "=", "[", "x", "for", "x", "in", "atoms", "[", "1", ":", "]", "if", "x", "!=", "'.'", "]", "counts", "[", "len", "(", "hits", ")", "]", "+=", "1", "total", "+=", "1", "if", "atoms", "[", "1", "]", "!=", "'.'", ":", "orthologous", "+=", "1", "print", "(", "\"Total lines: {0}\"", ".", "format", "(", "total", ")", ",", "file", "=", "sys", ".", "stderr", ")", "for", "i", ",", "n", "in", "sorted", "(", "counts", ".", "items", "(", ")", ")", ":", "print", "(", "\"Count {0}: {1}\"", ".", "format", "(", "i", ",", "percentage", "(", "n", ",", "total", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "file", "=", "sys", ".", "stderr", ")", "matches", "=", "sum", "(", "n", "for", "i", ",", "n", "in", "counts", ".", "items", "(", ")", "if", "i", "!=", "0", ")", "print", "(", "\"Total lines with matches: {0}\"", ".", "format", "(", "percentage", "(", "matches", ",", "total", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "for", "i", ",", "n", "in", "sorted", "(", "counts", ".", "items", "(", ")", ")", ":", "if", "i", "==", "0", ":", "continue", "print", "(", "\"Count {0}: {1}\"", ".", "format", "(", "i", ",", "percentage", "(", "n", ",", "matches", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"Orthologous matches: {0}\"", ".", "format", "(", "percentage", "(", "orthologous", ",", "matches", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
Loop over the report progress
def _loopreport ( self ) : while 1 : eventlet . sleep ( 0.2 ) ac2popenlist = { } for action in self . session . _actions : for popen in action . _popenlist : if popen . poll ( ) is None : lst = ac2popenlist . setdefault ( action . activity , [ ] ) lst . append ( popen ) if not action . _popenlist and action in self . _actionmayfinish : super ( RetoxReporter , self ) . logaction_finish ( action ) self . _actionmayfinish . remove ( action ) self . screen . draw_next_frame ( repeat = False )
292
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/reporter.py#L49-L65
[ "def", "union", "(", "self", ",", "rdds", ")", ":", "first_jrdd_deserializer", "=", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", "if", "any", "(", "x", ".", "_jrdd_deserializer", "!=", "first_jrdd_deserializer", "for", "x", "in", "rdds", ")", ":", "rdds", "=", "[", "x", ".", "_reserialize", "(", ")", "for", "x", "in", "rdds", "]", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "api", ".", "java", ".", "JavaRDD", "jrdds", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "rdds", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "rdds", ")", ")", ":", "jrdds", "[", "i", "]", "=", "rdds", "[", "i", "]", ".", "_jrdd", "return", "RDD", "(", "self", ".", "_jsc", ".", "union", "(", "jrdds", ")", ",", "self", ",", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Send markdown email
def send ( email , subject = None , from_email = None , to_email = None , cc = None , bcc = None , reply_to = None , smtp = None ) : if is_string ( email ) : email = EmailContent ( email ) from_email = sanitize_email_address ( from_email or email . headers . get ( 'from' ) ) to_email = sanitize_email_address ( to_email or email . headers . get ( 'to' ) ) cc = sanitize_email_address ( cc or email . headers . get ( 'cc' ) ) bcc = sanitize_email_address ( bcc or email . headers . get ( 'bcc' ) ) reply_to = sanitize_email_address ( reply_to or email . headers . get ( 'reply-to' ) ) message_args = { 'html' : email . html , 'text' : email . text , 'subject' : ( subject or email . headers . get ( 'subject' , '' ) ) , 'mail_from' : from_email , 'mail_to' : to_email } if cc : message_args [ 'cc' ] = cc if bcc : message_args [ 'bcc' ] = bcc if reply_to : message_args [ 'headers' ] = { 'reply-to' : reply_to } message = emails . Message ( * * message_args ) for filename , data in email . inline_images : message . attach ( filename = filename , content_disposition = 'inline' , data = data ) message . send ( smtp = smtp )
293
https://github.com/yejianye/mdmail/blob/ef03da8d5836b5ae0a4ad8c44f2fe4936a896644/mdmail/api.py#L11-L63
[ "def", "set_size", "(", "self", ",", "size", ",", "surface_size", ")", ":", "self", ".", "size", "=", "size", "self", ".", "position", "=", "(", "0", ",", "surface_size", "[", "1", "]", "-", "self", ".", "size", "[", "1", "]", ")", "y", "=", "self", ".", "position", "[", "1", "]", "+", "self", ".", "padding", "max_length", "=", "self", ".", "max_length", "for", "row", "in", "self", ".", "rows", ":", "r", "=", "len", "(", "row", ")", "width", "=", "(", "r", "*", "self", ".", "key_size", ")", "+", "(", "(", "r", "+", "1", ")", "*", "self", ".", "padding", ")", "x", "=", "(", "surface_size", "[", "0", "]", "-", "width", ")", "/", "2", "if", "row", ".", "space", "is", "not", "None", ":", "x", "-=", "(", "(", "row", ".", "space", ".", "length", "-", "1", ")", "*", "self", ".", "key_size", ")", "/", "2", "row", ".", "set_size", "(", "(", "x", ",", "y", ")", ",", "self", ".", "key_size", ",", "self", ".", "padding", ")", "y", "+=", "self", ".", "padding", "+", "self", ".", "key_size" ]
Process timezone casting and conversion .
def _process_tz ( self , dt , naive , tz ) : def _tz ( t ) : if t in ( None , 'naive' ) : return t if t == 'local' : if __debug__ and not localtz : raise ValueError ( "Requested conversion to local timezone, but `localtz` not installed." ) t = localtz if not isinstance ( t , tzinfo ) : if __debug__ and not localtz : raise ValueError ( "The `pytz` package must be installed to look up timezone: " + repr ( t ) ) t = get_tz ( t ) if not hasattr ( t , 'normalize' ) and get_tz : # Attempt to handle non-pytz tzinfo. t = get_tz ( t . tzname ( dt ) ) return t naive = _tz ( naive ) tz = _tz ( tz ) if not dt . tzinfo and naive : if hasattr ( naive , 'localize' ) : dt = naive . localize ( dt ) else : dt = dt . replace ( tzinfo = naive ) if not tz : return dt if hasattr ( tz , 'normalize' ) : dt = tz . normalize ( dt . astimezone ( tz ) ) elif tz == 'naive' : dt = dt . replace ( tzinfo = None ) else : dt = dt . astimezone ( tz ) # Warning: this might not always be entirely correct! return dt
294
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/field/date.py#L59-L102
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "SmartView", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "# derive our field config", "self", ".", "field_config", "=", "self", ".", "derive_field_config", "(", ")", "# add our fields", "self", ".", "fields", "=", "self", ".", "derive_fields", "(", ")", "# build up our current parameter string, EXCLUSIVE of our page. These", "# are used to build pagination URLs", "url_params", "=", "\"?\"", "order_params", "=", "\"\"", "for", "key", "in", "self", ".", "request", ".", "GET", ".", "keys", "(", ")", ":", "if", "key", "!=", "'page'", "and", "key", "!=", "'pjax'", "and", "(", "len", "(", "key", ")", "==", "0", "or", "key", "[", "0", "]", "!=", "'_'", ")", ":", "for", "value", "in", "self", ".", "request", ".", "GET", ".", "getlist", "(", "key", ")", ":", "url_params", "+=", "\"%s=%s&\"", "%", "(", "key", ",", "urlquote", "(", "value", ")", ")", "elif", "key", "==", "'_order'", ":", "order_params", "=", "\"&\"", ".", "join", "(", "[", "\"%s=%s\"", "%", "(", "key", ",", "_", ")", "for", "_", "in", "self", ".", "request", ".", "GET", ".", "getlist", "(", "key", ")", "]", ")", "context", "[", "'url_params'", "]", "=", "url_params", "context", "[", "'order_params'", "]", "=", "order_params", "+", "\"&\"", "context", "[", "'pjax'", "]", "=", "self", ".", "pjax", "# set our blocks", "context", "[", "'blocks'", "]", "=", "dict", "(", ")", "# stuff it all in our context", "context", "[", "'fields'", "]", "=", "self", ".", "fields", "context", "[", "'view'", "]", "=", "self", "context", "[", "'field_config'", "]", "=", "self", ".", "field_config", "context", "[", "'title'", "]", "=", "self", ".", "derive_title", "(", ")", "# and any extra context the user specified", "context", ".", "update", "(", "self", ".", "extra_context", ")", "# by default, our base is 'base.html', but we might be pjax", "base_template", "=", "\"base.html\"", "if", "'pjax'", "in", "self", ".", "request", ".", "GET", "or", "'pjax'", "in", "self", ".", "request", ".", "POST", ":", "base_template", "=", "\"smartmin/pjax.html\"", "if", "'HTTP_X_PJAX'", "in", "self", ".", "request", ".", "META", ":", "base_template", "=", "\"smartmin/pjax.html\"", "context", "[", "'base_template'", "]", "=", "base_template", "# set our refresh if we have one", "refresh", "=", "self", ".", "derive_refresh", "(", ")", "if", "refresh", ":", "context", "[", "'refresh'", "]", "=", "refresh", "return", "context" ]
Trigger assignment of default values .
def _prepare_defaults ( self ) : for name , field in self . __fields__ . items ( ) : if field . assign : getattr ( self , name )
295
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/document.py#L71-L76
[ "def", "trace_integration", "(", "tracer", "=", "None", ")", ":", "log", ".", "info", "(", "'Integrated module: {}'", ".", "format", "(", "MODULE_NAME", ")", ")", "# Wrap the httplib request function", "request_func", "=", "getattr", "(", "httplib", ".", "HTTPConnection", ",", "HTTPLIB_REQUEST_FUNC", ")", "wrapped_request", "=", "wrap_httplib_request", "(", "request_func", ")", "setattr", "(", "httplib", ".", "HTTPConnection", ",", "request_func", ".", "__name__", ",", "wrapped_request", ")", "# Wrap the httplib response function", "response_func", "=", "getattr", "(", "httplib", ".", "HTTPConnection", ",", "HTTPLIB_RESPONSE_FUNC", ")", "wrapped_response", "=", "wrap_httplib_response", "(", "response_func", ")", "setattr", "(", "httplib", ".", "HTTPConnection", ",", "response_func", ".", "__name__", ",", "wrapped_response", ")" ]
Convert data coming in from the MongoDB wire driver into a Document instance .
def from_mongo ( cls , doc ) : if doc is None : # To support simplified iterative use, None should return None. return None if isinstance ( doc , Document ) : # No need to perform processing on existing Document instances. return doc if cls . __type_store__ and cls . __type_store__ in doc : # Instantiate specific class mentioned in the data. cls = load ( doc [ cls . __type_store__ ] , 'marrow.mongo.document' ) # Prepare a new instance in such a way that changes to the instance will be reflected in the originating doc. instance = cls ( _prepare_defaults = False ) # Construct an instance, but delay default value processing. instance . __data__ = doc # I am Popeye of Borg (pattern); you will be askimilgrated. instance . _prepare_defaults ( ) # pylint:disable=protected-access -- deferred default value processing. return instance
296
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/document.py#L81-L98
[ "def", "variance", "(", "self", ")", ":", "alpha", "=", "self", ".", "__success", "+", "self", ".", "__default_alpha", "beta", "=", "self", ".", "__failure", "+", "self", ".", "__default_beta", "try", ":", "variance", "=", "alpha", "*", "beta", "/", "(", "(", "alpha", "+", "beta", ")", "**", "2", ")", "*", "(", "alpha", "+", "beta", "+", "1", ")", "except", "ZeroDivisionError", ":", "variance", "=", "0.0", "return", "variance" ]
Retrieve and remove a value from the backing store optionally with a default .
def pop ( self , name , default = SENTINEL ) : if default is SENTINEL : return self . __data__ . pop ( name ) return self . __data__ . pop ( name , default )
297
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/document.py#L246-L252
[ "def", "update_table", "(", "event", ")", ":", "update_number", "=", "0", "for", "update", "in", "event", ".", "get", "(", "'updates'", ",", "[", "]", ")", ":", "header", "=", "\"======= Update #%s on %s =======\"", "%", "(", "update_number", ",", "utils", ".", "clean_time", "(", "update", ".", "get", "(", "'startDate'", ")", ")", ")", "click", ".", "secho", "(", "header", ",", "fg", "=", "'green'", ")", "update_number", "=", "update_number", "+", "1", "text", "=", "update", ".", "get", "(", "'contents'", ")", "# deals with all the \\r\\n from the API", "click", ".", "secho", "(", "utils", ".", "clean_splitlines", "(", "text", ")", ")" ]
A basic operation operating on a single value .
def _op ( self , operation , other , * allowed ) : f = self . _field if self . _combining : # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz). return reduce ( self . _combining , ( q . _op ( operation , other , * allowed ) for q in f ) ) # pylint:disable=protected-access # Optimize this away in production; diagnosic aide. if __debug__ and _complex_safety_check ( f , { operation } | set ( allowed ) ) : # pragma: no cover raise NotImplementedError ( "{self!r} does not allow {op} comparison." . format ( self = self , op = operation ) ) if other is not None : other = f . transformer . foreign ( other , ( f , self . _document ) ) return Filter ( { self . _name : { operation : other } } )
298
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L154-L170
[ "def", "create", "(", "cls", ",", "destination", ")", ":", "mdb_gz_b64", "=", "\"\"\"\\\n H4sICIenn1gC/25ldzIwMDMubWRiAO2de2wcRx3Hf7O7Pt/d3u6eLyEtVaOaqg+EkjQvuVVDwa9a\n jWXHdZxQQlCJ7fOrfp3OTpqkhVxTItFWIhVQVFBRVNIKRaColVpAUKGKRwwFqUAhKiBIpUaoVWP+\n qKgIIHL8Znb39u72znWJiWP3+9l473fzm/nNY3cdf2fmbBJEPdO9E+nebLq+fWC6vrWZOImen9D7\n 9sR+vPPNE0PZxo/TE5879mj+yNc3/OzAD2bXv3DmV9/o/8PZnxxr+/fDL2w79ulzN7e+/sS/zvzz\n w3+N1z28p3PTfQ3nfn/m2YmeFS2no89uWnvqwO5HUvd/5Phr938tes3j/zm5+qT41J8/P/iZx87/\n +qHrjgyduubG1t/+7eWB2XztTNuT+1clZt9c2/e7HRGizevWEwAAAAAAAACAhUEIwvE+PoRIO8K7\n FzT6obPPwTMBAAAAAAAAAABcfpzPXwya+Ispo1xlEO2KEEX9eaGyWnrqyKQ60tQ0AcNZRcR1RYuy\n +XZCxoqRzmaMI6cKGRJuJVrIEZUOQ9UrHStUYpyzKkdNmSPFDkM6aguhXMdVHCMuHXE2Suu4IFQJ\n l6CErNWUDouDlbdKOZIcrKLD4S5WdNhqIEodqlVaofKgVTHpiBQ6uLG0uaKsuYbf3IS8BmV1qFAm\n j1Z5Hbp06GWDKC+DTS00SRN8DFA/TXNfW6mXX3upj7+mOHWllzLAObN8du0gdSdlKO3ZcWqjMbaH\n uOQqtidViRF+P0HbOH2c3xm0lfMb1EH7uHZ5vp32c+ks+5PqfSeXS9NejjTAvZQpd7J3kuuJFqLE\n qYvuVa3Ocqk7OVXWNMFxZPRVtJ1zSXuCBrlkh+rjEF1Zlt5Dw6qN0xx5Bx3gGgbowVo56EIjkc9T\n xX9Jdd+5PKDOD6q3VQvwv7qiZ8st419cdYHlo6iuriF8X4HA590AsodXhvrsj0yMDPnAuI+ZvOrq\n 1o7K51Hdy7a8cdXNm5AedbfG5W3j3lOybxFZKb6zAgAAAAAAsNzQxAlbvnYJV3VcUU3/S2luBIKF\n ha+IlWp+wxW4IiRXRSXxKeNU1eOxUuUbSOIINbEM7WT506ZE3LASgCOeYJWCMcnCsI/u8eSsFEYR\n lnlbWa6+u0jTYqSkvuQL9G5CLFwTRBMAAAAAAAAAgMtW/79lyVdLKxW7oqDF3bXOniib0UD/m/xq\n loWqvFwt3DX/mrLNALIu3V35NkpK1JDmL+2XOmr9pf1gKiFY4I672wc0mveaf6zaenyKmljPT6t5\n hT7a6y13y0XqjFpwneJjRC0oRwvL3eUL2fHCcuyGIntjhTkDuZCd5Vc5j+HNUMyx+myYcpHW5YG5\n ZijUdbg2VFu4ZzzcHFM3seQLAAAAAAAAAMtc//9S6cm1emX97ytK1v81rHelhtfVfAFnseZXRdV9\n Ad7+dhGS5kbl3eqe/K8pU/nnYwX5X2VeoLbCZwHi7txD6aTELabnoLJ5AfPFC8JmFd3Pun+MlfM4\n q/846/4s62i5+8Dmc7EvSVN0UG2tL00p1uPXqZTt/G5QqX+5lbufz+mSctVzFce6upBrTG3Fd+cn\n pmiYrUyw8+GNfL4hn8/k83qZrVlyGzgPeqbhjcOqx7KMEZRpU/MPQ+rsldEtuYm8vExkznoMS+6b\n KC5TZRt8wVf4xEkFX4V5D/X2vYz1/EcR8yMAAAAAAACAJY0Qf/d3vLPUlb//b4Nzzv6W3Wevtl+1\n vmxts2LWTxOHErcm3jGfMUfNG0yMGQAAAAAAeJ/8rLwAMXIYRgCARFv8IIaYtKpGqCdqlN/2kupD\n /ob67qXhsi0lDh2Vp6728faO9tHuUflfWJ1wE0e6724f35XuG71r16Dr0FwH573by6rKi0N7RveN\n tnd6aTVBWrpjd3fnuJtsBMnDk90ju7zckSA5XGGtdGrK2dWhUnRcMgAAAAAAAAD4v2CIV6vqf82I\n Jusbcwsy7wkWSf/n1JQNq/Oc+uQGq/ecmsphYZ6Tn6XwRLjwxb7mTxDoakLgURUFshwAAAAAAAAA\n ljpCrHZ8W/f2/2NUAAAAAAAAAAAAhXH5RLm4IIbotqot7hbW/0MGWCp46/+pgpHwjZS3IyAlfMPy\n tgakNN+wfcPxNgukdN9I+kadt30gZfhGjW+s8I2V3s6CVNTbWZCK+Eatb3zAN1Z5mw5SMd+I+wZ+\n +QQAAAAAAAAA/K8IcdT27Zqi3/+HkQEAAAAAAAAAsGgkMQQLjSHqbQPDAAAAAAAAAAAALGuw/g8A\n AAAAAAAA4DJUqwsQI7cQDWlcLiMq1/9rcGMBAAAAAAAAAADLGuh/AAAAAAAAAAAA+h8AAAAAAAAA\n AABLHyHusDTPjtLzTtoxnRftUftqe8YatDA+AAAAAAAAAPDeqJN/KVt+et0R9PYnzz7W8PrZRv+V\n HblO6qEDNEXbaYDGqJemaYQmaYJThtnK8Gvzb1opfDRTPZmUlxUY86qgm/ZyFVkOOqCC3kLhoyEI\n qs8raBO10O0q3EYKH+uDcNq8wnVRH93D7evnYZhHG5kkB3a0OYO2ctCWV9ZR+FhT0l2HCzl6xVBz\n XZyPUvi4taTjcwRuVUF7uYW9HMy9MJspfGwMAoo5A+5Qwca8UHN2WogeU/fu0ito1vmjM+M85zzp\n fNG5zxl2djrNzk3O9+0m+yWrx2q0fpH4buJ4Yk3ig4lvmkfxx9gBAAAAAAC4OAylQfJ5h5pfSVCc\n f853gqSmWPSZux6xjUznltH2HT/flNu7++0NZ7/07cg/vnPbVu30y6d/NLvlabPh+j81v/Xc5g9l\n 1h2f+epn9+VPdN90OHHvU50fm94y/ZXvWQ/tP/yJG/NH3llz8A79tlNPG72DHSePHdzz2s3XPzVj\n vzSUvSHjVys1Rv5CSUv8pEvcEqkbV/KX35JaQ+npikmRS9o4rtYIt8RYnJa4Ou6SV6stTm+l7rcX\n q9qSy+23pCVIcgV/SZKuJj5CSRc4Y/PpkiesLJcI53J37NvFuQzv4peGL0/SypP+C+45xVAAMAEA\n \"\"\"", "pristine", "=", "StringIO", "(", ")", "pristine", ".", "write", "(", "base64", ".", "b64decode", "(", "mdb_gz_b64", ")", ")", "pristine", ".", "seek", "(", "0", ")", "pristine", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "pristine", ",", "mode", "=", "'rb'", ")", "with", "open", "(", "destination", ",", "'wb'", ")", "as", "handle", ":", "shutil", ".", "copyfileobj", "(", "pristine", ",", "handle", ")", "return", "cls", "(", "destination", ")" ]
An iterative operation operating on multiple values . Consumes iterators to construct a concrete list at time of execution .
def _iop ( self , operation , other , * allowed ) : f = self . _field if self . _combining : # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz). return reduce ( self . _combining , ( q . _iop ( operation , other , * allowed ) for q in f ) ) # pylint:disable=protected-access # Optimize this away in production; diagnosic aide. if __debug__ and _complex_safety_check ( f , { operation } | set ( allowed ) ) : # pragma: no cover raise NotImplementedError ( "{self!r} does not allow {op} comparison." . format ( self = self , op = operation ) ) def _t ( o ) : for value in o : yield None if value is None else f . transformer . foreign ( value , ( f , self . _document ) ) other = other if len ( other ) > 1 else other [ 0 ] values = list ( _t ( other ) ) return Filter ( { self . _name : { operation : values } } )
299
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L172-L196
[ "def", "reset_flags", "(", "self", ")", ":", "self", ".", "C", "=", "None", "self", ".", "Z", "=", "None", "self", ".", "P", "=", "None", "self", ".", "S", "=", "None" ]