signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def use ( self , obj , parent_form = None ) : """Note : if title is None , will be replaced with obj . filename"""
if not isinstance ( obj , self . input_classes ) : raise RuntimeError ( '{0!s} cannot handle a {1!s}' . format ( self . __class__ . __name__ , obj . __class__ . __name__ ) ) self . parent_form = parent_form if self . title is None : self . title = 'file: ' + obj . filename self . _do_use ( obj )
def _all ( confs = None , ** kwargs ) : """True iif all input confs are True . : param confs : confs to check . : type confs : list or dict or str : param kwargs : additional task kwargs . : return : True if all conditions are checked . False otherwise . : rtype : bool"""
result = False if confs is not None : # ensure confs is a list if isinstance ( confs , string_types ) or isinstance ( confs , dict ) : confs = [ confs ] # if at least one conf exists , result is True by default result = True for conf in confs : result = run ( conf , ** kwargs ) # stop when a result is False if not result : break return result
def get_targets ( self , predicate = None ) : """Returns the candidate targets this task should act on . This method is a convenience for processing optional transitivity . Tasks may bypass it and make their own decisions on which targets to act on . NOTE : This method was introduced in 2018 , so at the time of writing few tasks consult it . Instead , they query self . context . targets directly . TODO : Fix up existing targets to consult this method , for uniformity . Note that returned targets have not been checked for invalidation . The caller should do so as needed , typically by calling self . invalidated ( ) . : API : public"""
initial_targets = ( self . context . targets ( predicate ) if self . act_transitively else list ( filter ( predicate , self . context . target_roots ) ) ) if not self . target_filtering_enabled : return initial_targets else : return self . _filter_targets ( initial_targets )
def get_extra_info ( self , name , default = None ) : """Called by the client protocol to return optional transport information . Information requests not recognized by the ` ` FramerProtocol ` ` are passed on to the underlying transport . The values of ` ` name ` ` recognized directly by ` ` FramerProtocol ` ` are : Value Description send _ framer The active framer for the send direction . send _ state The state for the send framer . recv _ framer The active framer for the receive direction . recv _ state The state for the receive framer . recv _ buf The current receive buffer . recv _ paused ` ` True ` ` if reading is paused . client _ protocol The client ` ` FramedProtocol ` ` . transport The underlying transport . : param name : A string representing the piece of transport - specific information to get . : param default : The value to return if the information doesn ' t exist . : returns : The requested data ."""
# Handle data we know about if name in self . _handlers : return self . _handlers [ name ] ( self ) # Call get _ extra _ info ( ) on the transport return self . _transport . get_extra_info ( name , default = default )
def from_oauth_file ( cls , filepath = None ) : """Get an object bound to the Twitter API using your own credentials . The ` twitter ` library ships with a ` twitter ` command that uses PIN OAuth . Generate your own OAuth credentials by running ` twitter ` from the shell , which will open a browser window to authenticate you . Once successfully run , even just one time , you will have a credential file at ~ / . twitter _ oauth . This factory function reuses your credential file to get a ` Twitter ` object . ( Really , this code is just lifted from the ` twitter . cmdline ` module to minimize OAuth dancing . )"""
if filepath is None : # Use default OAuth filepath from ` twitter ` command - line program . home = os . environ . get ( 'HOME' , os . environ . get ( 'USERPROFILE' , '' ) ) filepath = os . path . join ( home , '.twitter_oauth' ) oauth_token , oauth_token_secret = read_token_file ( filepath ) twitter = cls ( auth = OAuth ( oauth_token , oauth_token_secret , CONSUMER_KEY , CONSUMER_SECRET ) , api_version = '1.1' , domain = 'api.twitter.com' ) return twitter
def get_fileb64_image_prediction ( self , model_id , filename , token = None , url = API_GET_PREDICTION_IMAGE_URL ) : """Gets a prediction from a supplied image on your machine , by encoding the image data as b64 and posting to the API . : param model _ id : string , once you train a model you ' ll be given a model id to use . : param filename : string , the name of a file to be posted to the api . returns : requests object"""
auth = 'Bearer ' + self . check_for_token ( token ) h = { 'Authorization' : auth , 'Cache-Control' : 'no-cache' } the_url = url with open ( filename , "rb" ) as image_file : encoded_string = base64 . b64encode ( image_file . read ( ) ) m = MultipartEncoder ( fields = { 'sampleBase64Content' : encoded_string , 'modelId' : model_id } ) h = { 'Authorization' : auth , 'Cache-Control' : 'no-cache' , 'Content-Type' : m . content_type } r = requests . post ( the_url , headers = h , data = m ) return r
def _check_for_definition ( iface , cls , tag , defines ) : """Check for a valid definition of a value . Args : iface ( Iface ) : An Iface specification . cls ( type ) : Some type to check for a definition . tag ( str ) : The name of the tag attribute used to mark the abstract methods . defines ( callable ) : A callable that accepts an attribute and returns True if the attribute is a valid definition . Returns : bool : Whether or not the definition is found ."""
attributes = ( attr for attr in iface . __abstractmethods__ if hasattr ( getattr ( iface , attr ) , tag ) ) for attribute in attributes : for node in cls . __mro__ : if hasattr ( node , attribute ) and defines ( getattr ( node , attribute ) ) : return True try : attribute return False except NameError : # Pass the test if the loop was never executed . This indicates there # were no iface elements defined in the search . return True
def rpc_get_subdomains_owned_by_address ( self , address , ** con_info ) : """Get the list of subdomains owned by an address . Return { ' status ' : True , ' subdomains ' : . . . } on success Return { ' error ' : . . . } on error"""
if not check_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } res = get_subdomains_owned_by_address ( address ) return self . success_response ( { 'subdomains' : res } )
def getCredentialValues ( self , new = False ) : """Return the values in credentials . * ."""
credentials_base = "credentials." return tuple ( [ self . getValue ( credentials_base + p ) for p in [ "username" , "password" , "private_key" ] ] )
def update_configuration ( self ) : """Asynchronously applies or re - applies the SAS Logical Interconnect configuration to all managed interconnects of a SAS Logical Interconnect . Returns : dict : SAS Logical Interconnect ."""
uri = "{}/configuration" . format ( self . data [ "uri" ] ) result = self . _helper . update ( { } , uri ) self . refresh ( ) return result
def differentiate ( self , n = 1 ) : """n - th derivative , default 1."""
ak = self . coefficients ( ) a_ , b_ = self . domain ( ) for _ in range ( n ) : ak = self . differentiator ( ak ) return self . from_coeff ( ( 2. / ( b_ - a_ ) ) ** n * ak , domain = self . domain ( ) )
def prior_model_name_prior_tuples_dict ( self ) : """Returns class _ priors _ dict : { String : [ Prior ] } A dictionary mapping _ matrix the names of priors to lists of associated priors"""
return { name : list ( prior_model . prior_tuples ) for name , prior_model in self . prior_model_tuples }
def editors ( self , value ) : """Update editors . DEPRECATED : use ` ` policy [ " roles / editors " ] = value ` ` instead ."""
warnings . warn ( _ASSIGNMENT_DEPRECATED_MSG . format ( "editors" , EDITOR_ROLE ) , DeprecationWarning , ) self [ EDITOR_ROLE ] = value
def pseudo_dir ( source_path , cache_path = None , delete = Ellipsis , credentials = None , meta_data = None ) : '''pseudo _ dir ( source _ path ) yields a pseudo - directory object that represents files in the given source path . Pseudo - dir objects act as an interface for loading data from abstract sources . The given source path may be either a directory , a ( possibly zipped ) tarball , or a URL . In all cases but the local directory , the pseudo - dir object will quietly extract / download the requested files to a cache directory as their paths are requested . This is managed through two methods : * find ( args . . . ) joins the argument list as in os . path . join , then , if the resulting file is found in the source _ path , this ( relative ) path - name is returned ; otherwise None is returned . * local _ path ( args . . . ) joins the argument list as in os . path . join , then , if the resulting file is found in the source _ path , it is extracted / downloaded to the local cache directory if necessary , and this path ( or the original path when no cache directory is used ) is returned . The following optional arguments may be given : * cache _ path ( default : None ) specifies the cache directory in which to put any extracted or downloaded contents . If None , then a temporary directory is created and used . If the source path is a local directory , then the cache path is not needed and is instead ignored . Note that if the cache path is not deleted , it can be reused across sessions - - the pseudo - dir will always check for files in the cache path before extracting or downloading them . * delete ( default : Ellipsis ) may be set to True or False to declare that the cache directory should be deleted at system exit ( assuming a normal Python system exit ) . If Ellipsis , then the cache _ path is deleted only if it is created by the pseudo - dir object - - given cache paths are never deleted . * credentials ( default : None ) may be set to a valid set of Amazon S3 credentials for use if the source path is an S3 path . The contents are passed through the to _ credentials function . * meta _ data ( default : None ) specifies an optional map of meta - data for the pseudo - dir .'''
return PseudoDir ( source_path , cache_path = cache_path , delete = delete , credentials = credentials , meta_data = meta_data )
def scatter ( self , * args , ** kwargs ) : """Add a scatter plot ."""
cls = _make_class ( ScatterVisual , _default_marker = kwargs . pop ( 'marker' , None ) , ) return self . _add_item ( cls , * args , ** kwargs )
def dict_given_run_array ( samples , thread_min_max ) : """Converts an array of information about samples back into a nested sampling run dictionary ( see data _ processing module docstring for more details ) . N . B . the output dict only contains the following keys : ' logl ' , ' thread _ label ' , ' nlive _ array ' , ' theta ' . Any other keys giving additional information about the run output cannot be reproduced from the function arguments , and are therefore ommitted . Parameters samples : numpy array Numpy array containing columns [ logl , thread label , change in nlive at sample , ( thetas ) ] with each row representing a single sample . thread _ min _ max ' : numpy array , optional 2d array with a row for each thread containing the likelihoods at which it begins and ends . Needed to calculate nlive _ array ( otherwise this is set to None ) . Returns ns _ run : dict Nested sampling run dict ( see data _ processing module docstring for more details ) ."""
ns_run = { 'logl' : samples [ : , 0 ] , 'thread_labels' : samples [ : , 1 ] , 'thread_min_max' : thread_min_max , 'theta' : samples [ : , 3 : ] } if np . all ( ~ np . isnan ( ns_run [ 'thread_labels' ] ) ) : ns_run [ 'thread_labels' ] = ns_run [ 'thread_labels' ] . astype ( int ) assert np . array_equal ( samples [ : , 1 ] , ns_run [ 'thread_labels' ] ) , ( ( 'Casting thread labels from samples array to int has changed ' 'their values!\nsamples[:, 1]={}\nthread_labels={}' ) . format ( samples [ : , 1 ] , ns_run [ 'thread_labels' ] ) ) nlive_0 = ( thread_min_max [ : , 0 ] <= ns_run [ 'logl' ] . min ( ) ) . sum ( ) assert nlive_0 > 0 , 'nlive_0={}' . format ( nlive_0 ) nlive_array = np . zeros ( samples . shape [ 0 ] ) + nlive_0 nlive_array [ 1 : ] += np . cumsum ( samples [ : - 1 , 2 ] ) # Check if there are multiple threads starting on the first logl point dup_th_starts = ( thread_min_max [ : , 0 ] == ns_run [ 'logl' ] . min ( ) ) . sum ( ) if dup_th_starts > 1 : # In this case we approximate the true nlive ( which we dont really # know ) by making sure the array ' s final point is 1 and setting all # points with logl = logl . min ( ) to have the same nlive nlive_array += ( 1 - nlive_array [ - 1 ] ) n_logl_min = ( ns_run [ 'logl' ] == ns_run [ 'logl' ] . min ( ) ) . sum ( ) nlive_array [ : n_logl_min ] = nlive_0 warnings . warn ( ( 'duplicate starting logls: {} threads start at logl.min()={}, ' 'and {} points have logl=logl.min(). nlive_array may only be ' 'approximately correct.' ) . format ( dup_th_starts , ns_run [ 'logl' ] . min ( ) , n_logl_min ) , UserWarning ) assert nlive_array . min ( ) > 0 , ( ( 'nlive contains 0s or negative values. nlive_0={}' '\nnlive_array = {}\nthread_min_max={}' ) . format ( nlive_0 , nlive_array , thread_min_max ) ) assert nlive_array [ - 1 ] == 1 , ( 'final point in nlive_array != 1.\nnlive_array = ' + str ( nlive_array ) ) ns_run [ 'nlive_array' ] = nlive_array return ns_run
def next ( self , now = None , increments = _increments , delta = True , default_utc = WARN_CHANGE ) : '''How long to wait in seconds before this crontab entry can next be executed .'''
if default_utc is WARN_CHANGE and ( isinstance ( now , _number_types ) or ( now and not now . tzinfo ) or now is None ) : warnings . warn ( WARNING_CHANGE_MESSAGE , FutureWarning , 2 ) default_utc = False now = now or ( datetime . utcnow ( ) if default_utc and default_utc is not WARN_CHANGE else datetime . now ( ) ) if isinstance ( now , _number_types ) : now = datetime . utcfromtimestamp ( now ) if default_utc else datetime . fromtimestamp ( now ) # handle timezones if the datetime object has a timezone and get a # reasonable future / past start time onow , now = now , now . replace ( tzinfo = None ) tz = onow . tzinfo future = now . replace ( microsecond = 0 ) + increments [ 0 ] ( ) if future < now : # we are going backwards . . . _test = lambda : future . year < self . matchers . year if now . microsecond : future = now . replace ( microsecond = 0 ) else : # we are going forwards _test = lambda : self . matchers . year < future . year # Start from the year and work our way down . Any time we increment a # higher - magnitude value , we reset all lower - magnitude values . This # gets us performance without sacrificing correctness . Still more # complicated than a brute - force approach , but also orders of # magnitude faster in basically all cases . to_test = ENTRIES - 1 while to_test >= 0 : if not self . _test_match ( to_test , future ) : inc = increments [ to_test ] ( future , self . matchers ) future += inc for i in xrange ( 0 , to_test ) : future = increments [ ENTRIES + i ] ( future , inc ) try : if _test ( ) : return None except : print ( future , type ( future ) , type ( inc ) ) raise to_test = ENTRIES - 1 continue to_test -= 1 # verify the match match = [ self . _test_match ( i , future ) for i in xrange ( ENTRIES ) ] _assert ( all ( match ) , "\nYou have discovered a bug with crontab, please notify the\n" "author with the following information:\n" "crontab: %r\n" "now: %r" , ' ' . join ( m . input for m in self . matchers ) , now ) if not delta : onow = now = datetime ( 1970 , 1 , 1 ) delay = future - now if tz : delay += _fix_none ( onow . utcoffset ( ) ) if hasattr ( tz , 'localize' ) : delay -= _fix_none ( tz . localize ( future ) . utcoffset ( ) ) else : delay -= _fix_none ( future . replace ( tzinfo = tz ) . utcoffset ( ) ) return delay . days * 86400 + delay . seconds + delay . microseconds / 1000000.
def find_files ( folder ) : """Discover stereo photos and return them as a pairwise sorted list ."""
files = [ i for i in os . listdir ( folder ) if i . startswith ( "left" ) ] files . sort ( ) for i in range ( len ( files ) ) : insert_string = "right{}" . format ( files [ i * 2 ] [ 4 : ] ) files . insert ( i * 2 + 1 , insert_string ) files = [ os . path . join ( folder , filename ) for filename in files ] return files
def _hardware_count ( self ) : """Amount of hardware resources . : return : integer"""
return self . _counts . get ( "hardware" ) + self . _counts . get ( "serial" ) + self . _counts . get ( "mbed" )
def train ( ) : """Training process"""
start_pipeline_time = time . time ( ) # Training / Testing best_valid_acc = 0 stop_early = 0 for epoch in range ( args . epochs ) : # Epoch training stats start_epoch_time = time . time ( ) epoch_L = 0.0 epoch_sent_num = 0 epoch_wc = 0 # Log interval training stats start_log_interval_time = time . time ( ) log_interval_wc = 0 log_interval_sent_num = 0 log_interval_L = 0.0 for i , ( ( data , valid_length ) , label ) in enumerate ( train_dataloader ) : data = mx . nd . transpose ( data . as_in_context ( context ) ) label = label . as_in_context ( context ) valid_length = valid_length . as_in_context ( context ) . astype ( np . float32 ) wc = valid_length . sum ( ) . asscalar ( ) log_interval_wc += wc epoch_wc += wc log_interval_sent_num += data . shape [ 1 ] epoch_sent_num += data . shape [ 1 ] with autograd . record ( ) : output = net ( data , valid_length ) L = loss ( output , label ) . mean ( ) L . backward ( ) # Clip gradient if args . clip is not None : grads = [ p . grad ( context ) for p in net . collect_params ( ) . values ( ) ] gluon . utils . clip_global_norm ( grads , args . clip ) # Update parameter trainer . step ( 1 ) log_interval_L += L . asscalar ( ) epoch_L += L . asscalar ( ) if ( i + 1 ) % args . log_interval == 0 : print ( '[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % ( epoch , i + 1 , len ( train_dataloader ) , log_interval_L / log_interval_sent_num , log_interval_wc / 1000 / ( time . time ( ) - start_log_interval_time ) ) ) # Clear log interval training stats start_log_interval_time = time . time ( ) log_interval_wc = 0 log_interval_sent_num = 0 log_interval_L = 0 end_epoch_time = time . time ( ) valid_avg_L , valid_acc = evaluate ( valid_dataloader ) test_avg_L , test_acc = evaluate ( test_dataloader ) print ( '[Epoch %d] train avg loss %g, ' 'valid acc %.4f, valid avg loss %g, ' 'test acc %.4f, test avg loss %g, throughput %gK wps' % ( epoch , epoch_L / epoch_sent_num , valid_acc , valid_avg_L , test_acc , test_avg_L , epoch_wc / 1000 / ( end_epoch_time - start_epoch_time ) ) ) if valid_acc < best_valid_acc : print ( 'No Improvement.' ) stop_early += 1 if stop_early == 3 : break else : # Reset stop _ early if the validation loss finds a new low value print ( 'Observed Improvement.' ) stop_early = 0 net . save_parameters ( args . save_prefix + '_{:04d}.params' . format ( epoch ) ) best_valid_acc = valid_acc net . load_parameters ( glob . glob ( args . save_prefix + '_*.params' ) [ - 1 ] , context ) valid_avg_L , valid_acc = evaluate ( valid_dataloader ) test_avg_L , test_acc = evaluate ( test_dataloader ) print ( 'Best validation loss %g, validation acc %.4f' % ( valid_avg_L , valid_acc ) ) print ( 'Best test loss %g, test acc %.4f' % ( test_avg_L , test_acc ) ) print ( 'Total time cost %.2fs' % ( time . time ( ) - start_pipeline_time ) )
def finish ( self ) : """Finalize the MAR file . The MAR header , index and signatures need to be updated once we ' ve finished adding all the files ."""
# Update the last _ offset in the mar header self . write_header ( ) # Write out the index of contents self . write_index ( ) if not self . use_old_format : # Refresh the signature sigs = self . calculate_signatures ( ) self . write_signatures ( sigs )
def __driver_completer ( self , toks , text , state ) : """Driver level completer . Arguments : toks : A list of tokens , tokenized from the original input line . text : A string , the text to be replaced if a completion candidate is chosen . state : An integer , the index of the candidate out of the list of candidates . Returns : A string , the candidate ."""
if state != 0 : return self . __completion_candidates [ state ] # Update the cache when this method is first called , i . e . , state = = 0. # If the line is empty or the user is still inputing the first token , # complete with available commands . if not toks or ( len ( toks ) == 1 and text == toks [ 0 ] ) : try : self . __completion_candidates = self . __complete_cmds ( text ) except : self . stderr . write ( '\n' ) self . stderr . write ( traceback . format_exc ( ) ) self . __completion_candidates = [ ] return self . __completion_candidates [ state ] # Otherwise , try to complete with the registered completer method . cmd = toks [ 0 ] args = toks [ 1 : ] if len ( toks ) > 1 else None if text and args : del args [ - 1 ] if cmd in self . _completer_map . keys ( ) : completer_name = self . _completer_map [ cmd ] completer_method = getattr ( self , completer_name ) try : self . __completion_candidates = completer_method ( cmd , args , text ) except : self . stderr . write ( '\n' ) self . stderr . write ( traceback . format_exc ( ) ) self . __completion_candidates = [ ] else : self . __completion_candidates = [ ] return self . __completion_candidates [ state ]
def pre_save ( self , instance , add ) : """Ensure slug uniqunes before save ."""
slug = self . value_from_object ( instance ) # We don ' t want to change slug defined by user . predefined_slug = bool ( slug ) if not slug and self . populate_from : slug = self . _get_populate_from_value ( instance ) if slug : slug = slugify ( slug ) if not slug : slug = None if not self . blank : slug = instance . _meta . model_name # pylint : disable = protected - access elif not self . null : slug = '' if slug : # Make sure that auto generated slug with added sequence # won ' t excede maximal length . # Validation of predefined slugs is handled by Django . if not predefined_slug : slug = slug [ : ( self . max_length - MAX_SLUG_SEQUENCE_DIGITS - 1 ) ] constraints_placeholder , constraints_values = self . _get_unique_constraints ( instance ) instance_pk_name = instance . _meta . pk . name # pylint : disable = protected - access # Safe values - make sure that there is no chance of SQL injection . query_params = { 'constraints_placeholder' : constraints_placeholder , 'slug_column' : connection . ops . quote_name ( self . column ) , 'slug_len' : len ( slug ) , 'table_name' : connection . ops . quote_name ( self . model . _meta . db_table ) , # pylint : disable = protected - access 'pk_neq_placeholder' : 'AND {} != %(instance_pk)s' . format ( instance_pk_name ) if instance . pk else '' } # SQL injection unsafe values - will be escaped . # Keys prefixed with ` unique _ ` are reserved for ` constraints _ values ` dict . query_escape_params = { 'slug' : slug , 'slug_regex' : '^{}(-[0-9]*)?$' . format ( slug ) , } query_escape_params . update ( constraints_values ) if instance . pk : query_escape_params [ 'instance_pk' ] = instance . pk with connection . cursor ( ) as cursor : # TODO : Slowest part of this query is ` MAX ` function . It can # be optimized by indexing slug column by slug sequence . # https : / / www . postgresql . org / docs / 9.4 / static / indexes - expressional . html cursor . execute ( """ SELECT CASE WHEN ( EXISTS( SELECT 1 FROM {table_name} WHERE ( {slug_column} = %(slug)s {pk_neq_placeholder} {constraints_placeholder} ) ) ) THEN MAX(slug_sequence) + 1 ELSE NULL END FROM ( SELECT COALESCE( NULLIF( RIGHT({slug_column}, -{slug_len}-1), '' ), '1' )::text::integer AS slug_sequence FROM {table_name} WHERE ( {slug_column} ~ %(slug_regex)s {pk_neq_placeholder} {constraints_placeholder} ) ) AS tmp """ . format ( ** query_params ) , params = query_escape_params ) result = cursor . fetchone ( ) [ 0 ] if result is not None : if predefined_slug : raise SlugError ( "Slug '{}' (version {}) is already taken." . format ( slug , instance . version ) ) if len ( str ( result ) ) > MAX_SLUG_SEQUENCE_DIGITS : raise SlugError ( "Auto-generated slug sequence too long - please choose a different slug." ) slug = '{}-{}' . format ( slug , result ) # Make the updated slug available as instance attribute . setattr ( instance , self . name , slug ) return slug
def _process_input_wcs_single ( fname , wcskey , updatewcs ) : """See docs for _ process _ input _ wcs . This is separated to be spawned in parallel ."""
if wcskey in [ '' , ' ' , 'INDEF' , None ] : if updatewcs : uw . updatewcs ( fname , checkfiles = False ) else : numext = fileutil . countExtn ( fname ) extlist = [ ] for extn in range ( 1 , numext + 1 ) : extlist . append ( ( 'SCI' , extn ) ) if wcskey in string . ascii_uppercase : wkey = wcskey wname = ' ' else : wname = wcskey wkey = ' ' altwcs . restoreWCS ( fname , extlist , wcskey = wkey , wcsname = wname ) # make an asn table at the end # Make sure there is a WCSCORR table for each input image if wcskey not in [ '' , ' ' , 'INDEF' , None ] or updatewcs : wcscorr . init_wcscorr ( fname )
def _list_nodes ( call = None ) : '''List the nodes , ask all ' vagrant ' minions , return dict of grains .'''
local = salt . client . LocalClient ( ) ret = local . cmd ( 'salt-cloud:driver:vagrant' , 'grains.items' , '' , tgt_type = 'grain' ) return ret
def get_page ( self , url ) : """Get the HTML for an URL , possibly from an in - memory cache . XXX TODO Note : this cache is never actually cleared . It ' s assumed that the data won ' t get stale over the lifetime of a locator instance ( not necessarily true for the default _ locator ) ."""
# http : / / peak . telecommunity . com / DevCenter / EasyInstall # package - index - api scheme , netloc , path , _ , _ , _ = urlparse ( url ) if scheme == 'file' and os . path . isdir ( url2pathname ( path ) ) : url = urljoin ( ensure_slash ( url ) , 'index.html' ) if url in self . _page_cache : result = self . _page_cache [ url ] logger . debug ( 'Returning %s from cache: %s' , url , result ) else : host = netloc . split ( ':' , 1 ) [ 0 ] result = None if host in self . _bad_hosts : logger . debug ( 'Skipping %s due to bad host %s' , url , host ) else : req = Request ( url , headers = { 'Accept-encoding' : 'identity' } ) try : logger . debug ( 'Fetching %s' , url ) resp = self . opener . open ( req , timeout = self . timeout ) logger . debug ( 'Fetched %s' , url ) headers = resp . info ( ) content_type = headers . get ( 'Content-Type' , '' ) if HTML_CONTENT_TYPE . match ( content_type ) : final_url = resp . geturl ( ) data = resp . read ( ) encoding = headers . get ( 'Content-Encoding' ) if encoding : decoder = self . decoders [ encoding ] # fail if not found data = decoder ( data ) encoding = 'utf-8' m = CHARSET . search ( content_type ) if m : encoding = m . group ( 1 ) try : data = data . decode ( encoding ) except UnicodeError : data = data . decode ( 'latin-1' ) # fallback result = Page ( data , final_url ) self . _page_cache [ final_url ] = result except HTTPError as e : if e . code != 404 : logger . exception ( 'Fetch failed: %s: %s' , url , e ) except URLError as e : logger . exception ( 'Fetch failed: %s: %s' , url , e ) with self . _lock : self . _bad_hosts . add ( host ) except Exception as e : logger . exception ( 'Fetch failed: %s: %s' , url , e ) finally : self . _page_cache [ url ] = result # even if None ( failure ) return result
def history ( namespace_module ) : """Hash all versions on Artifactory ."""
for path in get_namespace_history ( namespace_module ) : h = get_bel_resource_hash ( path . as_posix ( ) ) click . echo ( '{}\t{}' . format ( path , h ) )
def conv2d ( self , filter_size , output_channels , stride = 1 , padding = 'SAME' , activation_fn = tf . nn . relu , b_value = 0.0 , s_value = 1.0 , bn = True , stoch = False ) : """: param filter _ size : int . assumes square filter : param output _ channels : int : param stride : int : param padding : ' VALID ' or ' SAME ' : param activation _ fn : tf . nn function : param b _ value : float : param s _ value : float"""
self . count [ 'conv' ] += 1 self . _layer_count += 1 scope = 'conv_' + str ( self . count [ 'conv' ] ) if stoch is True : clean = False else : clean = True with tf . variable_scope ( scope ) : input_channels = self . input . get_shape ( ) [ 3 ] output_shape = [ filter_size , filter_size , input_channels , output_channels ] w = self . weight_variable ( name = 'weights' , shape = output_shape ) self . input = tf . nn . conv2d ( self . input , w , strides = [ 1 , stride , stride , 1 ] , padding = padding ) if bn is True : self . input = self . conv_batch_norm ( self . input , clean = clean , count = self . _layer_count ) if stoch is True : self . input = tf . random_normal ( tf . shape ( self . input ) ) + self . input self . _noisy_z_dict [ self . _layer_count ] = self . input if b_value is not None : b = self . const_variable ( name = 'bias' , shape = [ output_channels ] , value = b_value ) self . input = tf . add ( self . input , b ) if s_value is not None : s = self . const_variable ( name = 'scale' , shape = [ output_channels ] , value = s_value ) self . input = tf . multiply ( self . input , s ) if activation_fn is not None : self . input = activation_fn ( self . input ) self . print_log ( scope + ' output: ' + str ( self . input . get_shape ( ) ) )
def between ( self , minimum : int = 1 , maximum : int = 1000 ) -> int : """Generate a random number between minimum and maximum . : param minimum : Minimum of range . : param maximum : Maximum of range . : return : Number ."""
return self . random . randint ( minimum , maximum )
def get_course ( self , courseid ) : """: param courseid : the course id of the course : raise InvalidNameException , CourseNotFoundException , CourseUnreadableException : return : an object representing the course , of the type given in the constructor"""
if not id_checker ( courseid ) : raise InvalidNameException ( "Course with invalid name: " + courseid ) if self . _cache_update_needed ( courseid ) : self . _update_cache ( courseid ) return self . _cache [ courseid ] [ 0 ]
def get_id_token_hint ( self , request_args = None , ** kwargs ) : """Add id _ token _ hint to request : param request _ args : : param kwargs : : return :"""
request_args = self . multiple_extend_request_args ( request_args , kwargs [ 'state' ] , [ 'id_token' ] , [ 'auth_response' , 'token_response' , 'refresh_token_response' ] , orig = True ) try : request_args [ 'id_token_hint' ] = request_args [ 'id_token' ] except KeyError : pass else : del request_args [ 'id_token' ] return request_args , { }
def acp_users_delete ( ) : """Delete or undelete an user account ."""
if not current_user . is_admin : return error ( "Not authorized to edit users." , 401 ) if not db : return error ( 'The ACP is not available in single-user mode.' , 500 ) form = UserDeleteForm ( ) if not form . validate ( ) : return error ( "Bad Request" , 400 ) user = get_user ( int ( request . form [ 'uid' ] ) ) direction = request . form [ 'direction' ] if not user : return error ( "User does not exist." , 404 ) else : for p in PERMISSIONS : setattr ( user , p , False ) user . active = direction == 'undel' write_user ( user ) return redirect ( url_for ( 'acp_users' ) + '?status={_del}eted' . format ( _del = direction ) )
def fprint ( fmt , * args , ** kwargs ) : """Parse and print a colored and perhaps formatted string . The remaining keyword arguments are the same as for Python ' s built - in print function . Colors are returning to their defaults before the function returns ."""
if not fmt : return hascolor = False target = kwargs . get ( "target" , sys . stdout ) # Format the string before feeding it to the parser fmt = fmt . format ( * args , ** kwargs ) for txt , markups in _color_format_parser . parse ( fmt ) : if markups != ( None , None ) : _color_manager . set_color ( * markups ) hascolor = True else : if hascolor : _color_manager . set_defaults ( ) hascolor = False target . write ( txt ) target . flush ( ) # Needed for Python 3 . x _color_manager . set_defaults ( ) target . write ( kwargs . get ( 'end' , '\n' ) ) _color_manager . set_defaults ( )
def get_resource_name ( context , expand_polymorphic_types = False ) : """Return the name of a resource ."""
from rest_framework_json_api . serializers import PolymorphicModelSerializer view = context . get ( 'view' ) # Sanity check to make sure we have a view . if not view : return None # Check to see if there is a status code and return early # with the resource _ name value of ` errors ` . try : code = str ( view . response . status_code ) except ( AttributeError , ValueError ) : pass else : if code . startswith ( '4' ) or code . startswith ( '5' ) : return 'errors' try : resource_name = getattr ( view , 'resource_name' ) except AttributeError : try : serializer = view . get_serializer_class ( ) if expand_polymorphic_types and issubclass ( serializer , PolymorphicModelSerializer ) : return serializer . get_polymorphic_types ( ) else : return get_resource_type_from_serializer ( serializer ) except AttributeError : try : resource_name = get_resource_type_from_model ( view . model ) except AttributeError : resource_name = view . __class__ . __name__ if not isinstance ( resource_name , six . string_types ) : # The resource name is not a string - return as is return resource_name # the name was calculated automatically from the view > pluralize and format resource_name = format_resource_type ( resource_name ) return resource_name
def _get_build_flags ( cls , build_flags_from_option , is_flagged , target ) : """Merge build flags with global < target < command - line order Build flags can be defined as globals ( in ` pants . ini ` ) , as arguments to a Target , and via the command - line ."""
# If self . get _ options ( ) . build _ flags returns a quoted string , remove the outer quotes , # which happens for flags passed from the command - line . if ( build_flags_from_option . startswith ( '\'' ) and build_flags_from_option . endswith ( '\'' ) ) or ( build_flags_from_option . startswith ( '"' ) and build_flags_from_option . endswith ( '"' ) ) : bfo = build_flags_from_option [ 1 : - 1 ] else : bfo = build_flags_from_option global_build_flags , ephemeral_build_flags = ( '' , bfo ) if is_flagged else ( bfo , '' ) target_build_flags = target . build_flags if getattr ( target , 'build_flags' , None ) else '' joined_build_flags = ' ' . join ( [ global_build_flags , target_build_flags , ephemeral_build_flags ] ) return cls . _split_build_flags ( joined_build_flags )
def extract_geo ( self ) : '''Extract geo - related information from exif'''
altitude = self . extract_altitude ( ) dop = self . extract_dop ( ) lon , lat = self . extract_lon_lat ( ) d = { } if lon is not None and lat is not None : d [ 'latitude' ] = lat d [ 'longitude' ] = lon if altitude is not None : d [ 'altitude' ] = altitude if dop is not None : d [ 'dop' ] = dop return d
def figure_populate ( outputpath , csv , xlabels , ylabels , analysistype , description , fail = False ) : """Create the report image from the summary report created in self . dataframesetup : param outputpath : Path in which the outputs are to be created : param csv : Name of the report file from which data are to be extracted : param xlabels : List of all the labels to use on the x - axis : param ylabels : List of all the labels to use on the y - axis : param analysistype : String of the analysis type : param description : String describing the analysis : set to either template for the empty heatmap created prior to analyses or report for normal functionality : param fail : Boolean of whether any samples have failed the quality checks - used for determining the palette"""
# Create a data frame from the summary report df = pd . read_csv ( os . path . join ( outputpath , csv ) , delimiter = ',' , index_col = 0 ) # Set the palette appropriately - ' template ' uses only grey if description == 'template' : cmap = [ '#a0a0a0' ] # ' fail ' uses red ( fail ) , grey ( not detected ) , and green ( detected / pass ) elif fail : cmap = [ '#ff0000' , '#a0a0a0' , '#00cc00' ] # Otherwise only use grey ( not detected ) and green ( detected / pass ) else : cmap = [ '#a0a0a0' , '#00cc00' ] # Use seaborn to create a heatmap of the data plot = sns . heatmap ( df , cbar = False , linewidths = .5 , cmap = cmap ) # Move the x - axis to the top of the plot plot . xaxis . set_ticks_position ( 'top' ) # Remove the y - labels plot . set_ylabel ( '' ) # Set the x - tick labels as a slice of the x - labels list ( first entry is not required , as it makes the # report image look crowded . Rotate the x - tick labels 90 degrees plot . set_xticklabels ( xlabels [ 1 : ] , rotation = 90 ) # Set the y - tick labels from the supplied list plot . set_yticklabels ( ylabels , rotation = 0 ) # Create the figure fig = plot . get_figure ( ) # Save the figure in . png format , using the bbox _ inches = ' tight ' option to ensure that everything is scaled fig . savefig ( os . path . join ( outputpath , '{at}_{desc}.png' . format ( at = analysistype , desc = description ) ) , bbox_inches = 'tight' )
def bootstrap_indexes_moving_block ( data , n_samples = 10000 , block_length = 3 , wrap = False ) : """Generate moving - block bootstrap samples . Given data points ` data ` , where axis 0 is considered to delineate points , return a generator for sets of bootstrap indexes . This can be used as a list of bootstrap indexes ( with list ( bootstrap _ indexes _ moving _ block ( data ) ) ) as well . Parameters n _ samples [ default 10000 ] : the number of subsamples to generate . block _ length [ default 3 ] : the length of block . wrap [ default False ] : if false , choose only blocks within the data , making the last block for data of length L start at L - block _ length . If true , choose blocks starting anywhere , and if they extend past the end of the data , wrap around to the beginning of the data again ."""
n_obs = data . shape [ 0 ] n_blocks = int ( ceil ( n_obs / block_length ) ) nexts = np . repeat ( np . arange ( 0 , block_length ) [ None , : ] , n_blocks , axis = 0 ) if wrap : last_block = n_obs else : last_block = n_obs - block_length for _ in xrange ( n_samples ) : blocks = np . random . randint ( 0 , last_block , size = n_blocks ) if not wrap : yield ( blocks [ : , None ] + nexts ) . ravel ( ) [ : n_obs ] else : yield np . mod ( ( blocks [ : , None ] + nexts ) . ravel ( ) [ : n_obs ] , n_obs )
def contribute_to_class ( self , cls , name , ** kwargs ) : """Internal Django method to associate the field with the Model ; it assigns the descriptor ."""
super ( PlaceholderField , self ) . contribute_to_class ( cls , name , ** kwargs ) # overwrites what instance . < colname > returns ; give direct access to the placeholder setattr ( cls , name , PlaceholderFieldDescriptor ( self . slot ) ) # Make placeholder fields easy to find # Can ' t assign this to cls . _ meta because that gets overwritten by every level of model inheritance . if not hasattr ( cls , '_meta_placeholder_fields' ) : cls . _meta_placeholder_fields = { } cls . _meta_placeholder_fields [ name ] = self # Configure the revere relation if possible . # TODO : make sure reverse queries work properly if django . VERSION >= ( 1 , 11 ) : rel = self . remote_field else : rel = self . rel if rel . related_name is None : # Make unique for model ( multiple models can use same slotnane ) rel . related_name = '{app}_{model}_{slot}_FIXME' . format ( app = cls . _meta . app_label , model = cls . _meta . object_name . lower ( ) , slot = self . slot ) # Remove attribute must exist for the delete page . Currently it ' s not actively used . # The regular ForeignKey assigns a ForeignRelatedObjectsDescriptor to it for example . # In this case , the PlaceholderRelation is already the reverse relation . # Being able to move forward from the Placeholder to the derived models does not have that much value . setattr ( rel . to , self . rel . related_name , None )
def add_status_message ( self , message , level = "info" ) : """Set a portal status message"""
return self . context . plone_utils . addPortalMessage ( message , level )
def pos_tags ( self ) : """Returns an list of tuples of the form ( word , POS tag ) . Example : [ ( ' At ' , ' IN ' ) , ( ' eight ' , ' CD ' ) , ( " o ' clock " , ' JJ ' ) , ( ' on ' , ' IN ' ) , ( ' Thursday ' , ' NNP ' ) , ( ' morning ' , ' NN ' ) ] : rtype : list of tuples"""
return [ ( Word ( word , pos_tag = t ) , unicode ( t ) ) for word , t in self . pos_tagger . tag ( self . raw ) # new keyword PatternTagger ( include _ punc = False ) # if not PUNCTUATION _ REGEX . match ( unicode ( t ) ) ]
def wifi_status ( self ) : """Get the wifi status ."""
return self . _info_json . get ( CONST . STATUS , { } ) . get ( CONST . WIFI_LINK )
def add_field ( self , name , ftype , docfield = None ) : """Add a field to the document ( and to the underlying schema ) : param name : name of the new field : type name : str : param ftype : type of the new field : type ftype : subclass of : class : ` . GenericType `"""
self . schema . add_field ( name , ftype ) self [ name ] = docfield or DocField . FromType ( ftype )
def authenticate ( url , account , key , by = 'name' , expires = 0 , timestamp = None , timeout = None , request_type = "xml" , admin_auth = False , use_password = False , raise_on_error = False ) : """Authenticate to the Zimbra server : param url : URL of Zimbra SOAP service : param account : The account to be authenticated against : param key : The preauth key of the domain of the account or a password ( if admin _ auth or use _ password is True ) : param by : If the account is specified as a name , an ID or a ForeignPrincipal : param expires : When the token expires ( or 0 for default expiration ) : param timestamp : When the token was requested ( None for " now " ) : param timeout : Timeout for the communication with the server . Defaults to the urllib2 - default : param request _ type : Which type of request to use ( " xml " ( default ) or " json " ) : param admin _ auth : This request should authenticate and generate an admin token . The " key " - parameter therefore holds the admin password ( implies use _ password ) : param use _ password : The " key " - parameter holds a password . Do a password - based user authentication . : param raise _ on _ error : Should I raise an exception when an authentication error occurs or just return None ? : return : The authentication token or None : rtype : str or None or unicode"""
if timestamp is None : timestamp = int ( time . time ( ) ) * 1000 pak = "" if not admin_auth : pak = preauth . create_preauth ( account , key , by , expires , timestamp ) if request_type == 'xml' : auth_request = RequestXml ( ) else : auth_request = RequestJson ( ) request_data = { 'account' : { 'by' : by , '_content' : account } } ns = "urn:zimbraAccount" if admin_auth : ns = "urn:zimbraAdmin" request_data [ 'password' ] = key elif use_password : request_data [ 'password' ] = { "_content" : key } else : request_data [ 'preauth' ] = { 'timestamp' : timestamp , 'expires' : expires , '_content' : pak } auth_request . add_request ( 'AuthRequest' , request_data , ns ) server = Communication ( url , timeout ) if request_type == 'xml' : response = ResponseXml ( ) else : response = ResponseJson ( ) server . send_request ( auth_request , response ) if response . is_fault ( ) : if raise_on_error : raise AuthenticationFailed ( "Cannot authenticate user: (%s) %s" % ( response . get_fault_code ( ) , response . get_fault_message ( ) ) ) return None return response . get_response ( ) [ 'AuthResponse' ] [ 'authToken' ]
def init_scalable ( X , n_clusters , random_state = None , max_iter = None , oversampling_factor = 2 ) : """K - Means initialization using k - means | | This is algorithm 2 in Scalable K - Means + + ( 2012 ) ."""
logger . info ( "Initializing with k-means||" ) # Step 1 : Initialize Centers idx = 0 centers = da . compute ( X [ idx , np . newaxis ] ) [ 0 ] c_idx = { idx } # Step 2 : Initialize cost cost , = compute ( evaluate_cost ( X , centers ) ) if cost == 0 : n_iter = 0 else : n_iter = int ( np . round ( np . log ( cost ) ) ) if max_iter is not None : n_iter = min ( max_iter , n_iter ) # Steps 3 - 6 : update candidate Centers for i in range ( n_iter ) : with _timer ( "init iteration %2d/%2d , %2d centers" % ( i + 1 , n_iter , len ( c_idx ) ) , _logger = logger , ) : new_idxs = _sample_points ( X , centers , oversampling_factor , random_state ) new_idxs = set ( * compute ( new_idxs ) ) c_idx |= new_idxs # Sort before slicing , for better performance / memory # usage with the scheduler . # See https : / / github . com / dask / dask - ml / issues / 39 centers = X [ sorted ( c_idx ) ] . compute ( ) # XXX : scikit - learn doesn ' t have weighted k - means . # The paper weights each center by the number of points closest to it . # https : / / stackoverflow . com / a / 37198799/1889400 claims you can scale the # features before clustering , but that doesn ' t seem right . # I think that replicating the * points * , proportional to the number of # original points closest to the candidate centers , would be a better way # to do that . if len ( centers ) < n_clusters : logger . warning ( "Found fewer than %d clusters in init." , n_clusters ) # supplement with random need = n_clusters - len ( centers ) locs = sorted ( random_state . choice ( np . arange ( 0 , len ( X ) ) , size = need , replace = False , chunks = len ( X ) ) ) extra = X [ locs ] . compute ( ) return np . vstack ( [ centers , extra ] ) else : # Step 7 , 8 without weights # dask RandomState objects aren ' t valid for scikit - learn rng2 = ( random_state . randint ( 0 , np . iinfo ( "i4" ) . max - 1 , chunks = ( ) ) . compute ( scheduler = "single-threaded" ) . item ( ) ) km = sk_k_means . KMeans ( n_clusters , random_state = rng2 ) km . fit ( centers ) return km . cluster_centers_
def encode ( self , key ) : """Encodes a user key into a particular format . The result of this method will be used by swauth for storing user credentials . If salt is not manually set in conf file , a random salt will be generated and used . : param key : User ' s secret key : returns : A string representing user credentials"""
salt = self . salt or os . urandom ( 32 ) . encode ( 'base64' ) . rstrip ( ) return self . encode_w_salt ( salt , key )
def get_device_names_to_objects ( devices ) : '''Map a list of devices to their hostnames . : param devices : list - - list of ManagementRoot objects : returns : dict - - mapping of hostnames to ManagementRoot objects'''
name_to_object = { } for device in devices : device_name = get_device_info ( device ) . name name_to_object [ device_name ] = device return name_to_object
def replace ( parent , idx , value , check_value = _NO_VAL ) : """Replace a value in a dict ."""
if isinstance ( parent , dict ) : if idx not in parent : raise JSONPatchError ( "Item does not exist" ) elif isinstance ( parent , list ) : idx = int ( idx ) if idx < 0 or idx >= len ( parent ) : raise JSONPatchError ( "List index out of range" ) if check_value is not _NO_VAL : if parent [ idx ] != check_value : raise JSONPatchError ( "Check value did not pass" ) parent [ idx ] = value
def match_planted ( fk_candidate_observations , match_filename , bright_limit = BRIGHT_LIMIT , object_planted = OBJECT_PLANTED , minimum_bright_detections = MINIMUM_BRIGHT_DETECTIONS , bright_fraction = MINIMUM_BRIGHT_FRACTION ) : """Using the fk _ candidate _ observations as input get the Object . planted file from VOSpace and match planted sources with found sources . The Object . planted list is pulled from VOSpace based on the standard file - layout and name of the first exposure as read from the . astrom file . : param fk _ candidate _ observations : name of the fk * reals . astrom file to check against Object . planted : param match _ filename : a file that will contain a list of all planted sources and the matched found source @ param minimum _ bright _ detections : if there are too few bright detections we raise an error ."""
found_pos = [ ] detections = fk_candidate_observations . get_sources ( ) for detection in detections : reading = detection . get_reading ( 0 ) # create a list of positions , to be used later by match _ lists found_pos . append ( [ reading . x , reading . y ] ) # Now get the Object . planted file , either from the local FS or from VOSpace . objects_planted_uri = object_planted if not os . access ( objects_planted_uri , os . F_OK ) : objects_planted_uri = fk_candidate_observations . observations [ 0 ] . get_object_planted_uri ( ) lines = open ( objects_planted_uri ) . read ( ) # we are changing the format of the Object . planted header to be compatible with astropy . io . ascii but # there are some old Object . planted files out there so we do these string / replace calls to reset those . new_lines = lines . replace ( "pix rate" , "pix_rate" ) new_lines = new_lines . replace ( """''/h rate""" , "sky_rate" ) planted_objects_table = ascii . read ( new_lines , header_start = - 1 , data_start = 0 ) planted_objects_table . meta = None # The match _ list method expects a list that contains a position , not an x and a y vector , so we transpose . planted_pos = numpy . transpose ( [ planted_objects_table [ 'x' ] . data , planted_objects_table [ 'y' ] . data ] ) # match _ idx is an order list . The list is in the order of the first list of positions and each entry # is the index of the matching position from the second list . ( match_idx , match_fnd ) = util . match_lists ( numpy . array ( planted_pos ) , numpy . array ( found_pos ) ) assert isinstance ( match_idx , numpy . ma . MaskedArray ) assert isinstance ( match_fnd , numpy . ma . MaskedArray ) false_positives_table = Table ( ) # Once we ' ve matched the two lists we ' ll need some new columns to store the information in . # these are masked columns so that object . planted entries that have no detected match are left ' blank ' . new_columns = [ MaskedColumn ( name = "measure_x" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_y" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_rate" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_angle" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_mag1" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_merr1" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_mag2" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_merr2" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_mag3" , length = len ( planted_objects_table ) , mask = True ) , MaskedColumn ( name = "measure_merr3" , length = len ( planted_objects_table ) , mask = True ) ] planted_objects_table . add_columns ( new_columns ) tlength = 0 new_columns = [ MaskedColumn ( name = "measure_x" , length = tlength , mask = True ) , MaskedColumn ( name = "measure_y" , length = tlength , mask = True ) , MaskedColumn ( name = "measure_rate" , length = 0 , mask = True ) , MaskedColumn ( name = "measure_angle" , length = 0 , mask = True ) , MaskedColumn ( name = "measure_mag1" , length = 0 , mask = True ) , MaskedColumn ( name = "measure_merr1" , length = 0 , mask = True ) , MaskedColumn ( name = "measure_mag2" , length = 0 , mask = True ) , MaskedColumn ( name = "measure_merr2" , length = 0 , mask = True ) , MaskedColumn ( name = "measure_mag3" , length = tlength , mask = True ) , MaskedColumn ( name = "measure_merr3" , length = tlength , mask = True ) ] false_positives_table . add_columns ( new_columns ) # We do some ' checks ' on the Object . planted match to diagnose pipeline issues . Those checks are made using just # those planted sources we should have detected . bright = planted_objects_table [ 'mag' ] < bright_limit n_bright_planted = numpy . count_nonzero ( planted_objects_table [ 'mag' ] [ bright ] ) measures = [ ] idxs = [ ] for idx in range ( len ( match_idx ) ) : # The match _ idx value is False if nothing was found . if not match_idx . mask [ idx ] : # Each ' source ' has multiple ' readings ' measures . append ( detections [ match_idx [ idx ] ] . get_readings ( ) ) idxs . append ( idx ) observations = measure_mags ( measures ) for oidx in range ( len ( measures ) ) : idx = idxs [ oidx ] readings = measures [ oidx ] start_jd = util . Time ( readings [ 0 ] . obs . header [ 'MJD_OBS_CENTER' ] , format = 'mpc' , scale = 'utc' ) . jd end_jd = util . Time ( readings [ - 1 ] . obs . header [ 'MJD_OBS_CENTER' ] , format = 'mpc' , scale = 'utc' ) . jd rate = math . sqrt ( ( readings [ - 1 ] . x - readings [ 0 ] . x ) ** 2 + ( readings [ - 1 ] . y - readings [ 0 ] . y ) ** 2 ) / ( 24 * ( end_jd - start_jd ) ) rate = int ( rate * 100 ) / 100.0 angle = math . degrees ( math . atan2 ( readings [ - 1 ] . y - readings [ 0 ] . y , readings [ - 1 ] . x - readings [ 0 ] . x ) ) angle = int ( angle * 100 ) / 100.0 planted_objects_table [ idx ] [ 'measure_rate' ] = rate planted_objects_table [ idx ] [ 'measure_angle' ] = angle planted_objects_table [ idx ] [ 'measure_x' ] = observations [ readings [ 0 ] . obs ] [ 'mags' ] [ "XCENTER" ] [ oidx ] planted_objects_table [ idx ] [ 'measure_y' ] = observations [ readings [ 0 ] . obs ] [ 'mags' ] [ "YCENTER" ] [ oidx ] for ridx in range ( len ( readings ) ) : reading = readings [ ridx ] mags = observations [ reading . obs ] [ 'mags' ] planted_objects_table [ idx ] [ 'measure_mag{}' . format ( ridx + 1 ) ] = mags [ "MAG" ] [ oidx ] planted_objects_table [ idx ] [ 'measure_merr{}' . format ( ridx + 1 ) ] = mags [ "MERR" ] [ oidx ] # for idx in range ( len ( match _ fnd ) ) : # if match _ fnd . mask [ idx ] : # measures = detections [ idx ] . get _ readings ( ) # false _ positives _ table . add _ row ( ) # false _ positives _ table [ - 1 ] = measure _ mags ( measures , false _ positives _ table [ - 1 ] ) # Count an object as detected if it has a measured magnitude in the first frame of the triplet . n_bright_found = numpy . count_nonzero ( planted_objects_table [ 'measure_mag1' ] [ bright ] ) # Also compute the offset and standard deviation of the measured magnitude from that planted ones . offset = numpy . mean ( planted_objects_table [ 'mag' ] [ bright ] - planted_objects_table [ 'measure_mag1' ] [ bright ] ) try : offset = "{:5.2f}" . format ( offset ) except : offset = "indef" std = numpy . std ( planted_objects_table [ 'mag' ] [ bright ] - planted_objects_table [ 'measure_mag1' ] [ bright ] ) try : std = "{:5.2f}" . format ( std ) except : std = "indef" if os . access ( match_filename , os . R_OK ) : fout = open ( match_filename , 'a' ) else : fout = open ( match_filename , 'w' ) fout . write ( "#K {:10s} {:10s}\n" . format ( "EXPNUM" , "FWHM" ) ) for measure in detections [ 0 ] . get_readings ( ) : fout . write ( '#V {:10s} {:10s}\n' . format ( measure . obs . header [ 'EXPNUM' ] , measure . obs . header [ 'FWHM' ] ) ) fout . write ( "#K " ) for keyword in [ "RMIN" , "RMAX" , "ANGLE" , "AWIDTH" ] : fout . write ( "{:10s} " . format ( keyword ) ) fout . write ( "\n" ) fout . write ( "#V " ) for keyword in [ "RMIN" , "RMAX" , "ANGLE" , "AWIDTH" ] : fout . write ( "{:10s} " . format ( fk_candidate_observations . sys_header [ keyword ] ) ) fout . write ( "\n" ) fout . write ( "#K " ) for keyword in [ "NBRIGHT" , "NFOUND" , "OFFSET" , "STDEV" ] : fout . write ( "{:10s} " . format ( keyword ) ) fout . write ( "\n" ) fout . write ( "#V {:<10} {:<10} {:<10} {:<10}\n" . format ( n_bright_planted , n_bright_found , offset , std ) ) try : writer = ascii . FixedWidth # add a hash to the start of line that will have header columns : for JMP fout . write ( "#" ) fout . flush ( ) ascii . write ( planted_objects_table , output = fout , Writer = writer , delimiter = None ) if len ( false_positives_table ) > 0 : with open ( match_filename + ".fp" , 'a' ) as fpout : fpout . write ( "#" ) ascii . write ( false_positives_table , output = fpout , Writer = writer , delimiter = None ) except Exception as e : logging . error ( str ( e ) ) raise e finally : fout . close ( ) # Some simple checks to report a failure how we ' re doing . if n_bright_planted < minimum_bright_detections : raise RuntimeError ( 1 , "Too few bright objects planted." ) if n_bright_found / float ( n_bright_planted ) < bright_fraction : raise RuntimeError ( 2 , "Too few bright objects found." ) return "{} {} {} {}" . format ( n_bright_planted , n_bright_found , offset , std )
def get_input ( self , key , force = False ) : """Get the value of < key > if it already exists , or prompt for it if not"""
if key not in self . _inputs : raise InputException ( "Key {0} is not a valid input!" . format ( key ) ) if self . _inputs [ key ] . prompt : prompt = self . _inputs [ key ] . prompt elif self . _inputs [ key ] . is_bool ( ) : prompt = "{0}?" . format ( key ) else : prompt = "please enter your {0}" . format ( key ) help_text = self . _inputs [ key ] . help if hasattr ( self . _inputs [ key ] , 'help' ) else None if self . _inputs [ key ] . value is EMPTY or force : default_value = None if self . _inputs [ key ] . default is not EMPTY : default_value = self . _inputs [ key ] . default if self . _inputs [ key ] . value is not EMPTY : default_value = self . _inputs [ key ] . value input_value = EMPTY while input_value is EMPTY or input_value == '?' : if input_value == '?' and help_text : print ( help_text ) input_value = lib . prompt ( prompt , default = default_value , bool_type = self . _inputs [ key ] . in_type , secret = self . _inputs [ key ] . is_secret ) self . _inputs [ key ] . value = input_value return self . _inputs [ key ] . value
def _webfinger ( provider , request , ** kwargs ) : """Handle webfinger requests ."""
params = urlparse . parse_qs ( request ) if params [ "rel" ] [ 0 ] == OIC_ISSUER : wf = WebFinger ( ) return Response ( wf . response ( params [ "resource" ] [ 0 ] , provider . baseurl ) , headers = [ ( "Content-Type" , "application/jrd+json" ) ] ) else : return BadRequest ( "Incorrect webfinger." )
def make_remote_image_result ( annotations = None , labels = None ) : """Instantiate BuildResult for image not built locally ."""
return BuildResult ( image_id = BuildResult . REMOTE_IMAGE , annotations = annotations , labels = labels )
def get_data_specifier ( string ) : """Return a tuple ( table , col ) for some [ incr tsdb ( ) ] data specifier . For example : : item - > ( ' item ' , None ) item : i - input - > ( ' item ' , [ ' i - input ' ] ) item : i - input @ i - wf - > ( ' item ' , [ ' i - input ' , ' i - wf ' ] ) : i - input - > ( None , [ ' i - input ' ] ) ( otherwise ) - > ( None , None )"""
match = data_specifier_re . match ( string ) if match is None : return ( None , None ) table = match . group ( 'table' ) if table is not None : table = table . strip ( ) cols = _split_cols ( match . group ( 'cols' ) ) return ( table , cols )
def _addDatasetAction ( self , dataset ) : """Adds an action for the inputed dataset to the toolbar : param dataset | < XChartDataset >"""
# create the toolbar action action = QAction ( dataset . name ( ) , self ) action . setIcon ( XColorIcon ( dataset . color ( ) ) ) action . setCheckable ( True ) action . setChecked ( True ) action . setData ( wrapVariant ( dataset ) ) action . toggled . connect ( self . toggleDataset ) self . uiDatasetTBAR . addAction ( action )
def respond ( request , code ) : """Responds to the request with the given response code . If ` ` next ` ` is in the form , it will redirect instead ."""
redirect = request . GET . get ( 'next' , request . POST . get ( 'next' ) ) if redirect : return HttpResponseRedirect ( redirect ) return type ( 'Response%d' % code , ( HttpResponse , ) , { 'status_code' : code } ) ( )
def Get3DData ( self , datapath , qt_app = None , dataplus_format = True , gui = False , start = 0 , stop = None , step = 1 , convert_to_gray = True , series_number = None , use_economic_dtype = True , dicom_expected = None , ** kwargs ) : """Returns 3D data and its metadata . # NOTE ( : param qt _ app : ) If it is set to None ( as default ) all dialogs for series selection are performed in terminal . If qt _ app is set to QtGui . QApplication ( ) dialogs are in Qt . : param datapath : directory with input data : param qt _ app : Dialog destination . If None ( default ) - > terminal , if ' QtGui . QApplication ( ) ' - > Qt : param dataplus _ format : New data format . Metadata and data are returned in one structure . : param gui : True if ' QtGui . QApplication ( ) ' instead of terminal should be used : param int start : used for DicomReader , defines where 3D data reading should start : param int stop : used for DicomReader , defines where 3D data reading should stop : param int step : used for DicomReader , defines step for 3D data reading : param bool convert _ to _ gray : if True - > RGB is converted to gray : param int series _ number : used in DicomReader , essential in metadata : param use _ economic _ dtype : if True , casts 3D data array to less space consuming dtype : param dicom _ expected : set true if it is known that data is in dicom format . Set False to suppress dicom warnings . : return : tuple ( data3d , metadata )"""
self . orig_datapath = datapath datapath = os . path . expanduser ( datapath ) if series_number is not None and type ( series_number ) != int : series_number = int ( series_number ) if not os . path . exists ( datapath ) : logger . error ( "Path '" + datapath + "' does not exist" ) return if qt_app is None and gui is True : from PyQt4 . QtGui import QApplication qt_app = QApplication ( sys . argv ) if type ( datapath ) is not str : datapath = str ( datapath ) datapath = os . path . normpath ( datapath ) self . start = start self . stop = stop self . step = step self . convert_to_gray = convert_to_gray self . series_number = series_number self . kwargs = kwargs self . qt_app = qt_app self . gui = gui if os . path . isfile ( datapath ) : logger . debug ( 'file read recognized' ) data3d , metadata = self . __ReadFromFile ( datapath ) elif os . path . exists ( datapath ) : logger . debug ( 'directory read recognized' ) data3d , metadata = self . __ReadFromDirectory ( datapath = datapath , dicom_expected = dicom_expected ) # datapath , start , stop , step , gui = gui , * * kwargs ) else : logger . error ( 'Data path {} not found' . format ( datapath ) ) if convert_to_gray : if len ( data3d . shape ) > 3 : # TODO : implement better rgb2gray data3d = data3d [ : , : , : , 0 ] if use_economic_dtype : data3d = self . __use_economic_dtype ( data3d ) if dataplus_format : logger . debug ( 'dataplus format' ) # metadata = { ' voxelsize _ mm ' : [ 1 , 1 , 1 ] } datap = metadata datap [ 'data3d' ] = data3d logger . debug ( 'datap keys () : ' + str ( datap . keys ( ) ) ) return datap else : return data3d , metadata
def get_mean_DEV ( sum_ptrm_checks , sum_abs_ptrm_checks , n_pTRM , delta_x_prime ) : """input : sum _ ptrm _ checks , sum _ abs _ ptrm _ checks , n _ pTRM , delta _ x _ prime output : Mean deviation of a pTRM check"""
if not n_pTRM : return float ( 'nan' ) , float ( 'nan' ) mean_DEV = ( ( old_div ( 1. , n_pTRM ) ) * ( old_div ( sum_ptrm_checks , delta_x_prime ) ) ) * 100 mean_DEV_prime = ( ( old_div ( 1. , n_pTRM ) ) * ( old_div ( sum_abs_ptrm_checks , delta_x_prime ) ) ) * 100 return mean_DEV , mean_DEV_prime
def _check_smart_storage_message ( self ) : """Check for smart storage message . : returns : result , raid _ message"""
ssc_mesg = self . smart_storage_config_message result = True raid_message = "" for element in ssc_mesg : if "Success" not in element [ 'MessageId' ] : result = False raid_message = element [ 'MessageId' ] return result , raid_message
def _failed ( self , msg ) : """Log a validation failure . : param string msg : the error message"""
self . log ( msg ) self . result . passed = False self . result . add_error ( msg ) self . log ( u"Failed" )
def for_name ( modpath , classname ) : '''Returns a class of " classname " from module " modname " .'''
module = __import__ ( modpath , fromlist = [ classname ] ) classobj = getattr ( module , classname ) return classobj ( )
def pretty_dict_str ( d , indent = 2 ) : """shows JSON indented representation of d"""
b = StringIO ( ) write_pretty_dict_str ( b , d , indent = indent ) return b . getvalue ( )
def purge_stream ( self , stream_id , remove_definition = False , sandbox = None ) : """Purge the stream : param stream _ id : The stream identifier : param remove _ definition : Whether to remove the stream definition as well : param sandbox : The sandbox for this stream : return : None : raises : NotImplementedError"""
# TODO : Add time interval to this if sandbox is not None : raise NotImplementedError if stream_id not in self . streams : raise StreamNotFoundError ( "Stream with id '{}' not found" . format ( stream_id ) ) stream = self . streams [ stream_id ] query = stream_id . as_raw ( ) with switch_db ( StreamInstanceModel , 'hyperstream' ) : StreamInstanceModel . objects ( __raw__ = query ) . delete ( ) # Also update the stream status stream . calculated_intervals = TimeIntervals ( [ ] ) if remove_definition : with switch_db ( StreamDefinitionModel , 'hyperstream' ) : StreamDefinitionModel . objects ( __raw__ = query ) . delete ( ) logging . info ( "Purged stream {}" . format ( stream_id ) )
def get_keys ( logger = None , host_pkey_directories = None , allow_agent = False ) : """Load public keys from any available SSH agent or local . ssh directory . Arguments : logger ( Optional [ logging . Logger ] ) host _ pkey _ directories ( Optional [ list [ str ] ] ) : List of local directories where host SSH pkeys in the format " id _ * " are searched . For example , [ ' ~ / . ssh ' ] . . versionadded : : 0.1.0 allow _ agent ( Optional [ boolean ] ) : Whether or not load keys from agent Default : False Return : list"""
keys = SSHTunnelForwarder . get_agent_keys ( logger = logger ) if allow_agent else [ ] if host_pkey_directories is not None : paramiko_key_types = { 'rsa' : paramiko . RSAKey , 'dsa' : paramiko . DSSKey , 'ecdsa' : paramiko . ECDSAKey , 'ed25519' : paramiko . Ed25519Key } for directory in host_pkey_directories or [ DEFAULT_SSH_DIRECTORY ] : for keytype in paramiko_key_types . keys ( ) : ssh_pkey_expanded = os . path . expanduser ( os . path . join ( directory , 'id_{}' . format ( keytype ) ) ) if os . path . isfile ( ssh_pkey_expanded ) : ssh_pkey = SSHTunnelForwarder . read_private_key_file ( pkey_file = ssh_pkey_expanded , logger = logger , key_type = paramiko_key_types [ keytype ] ) if ssh_pkey : keys . append ( ssh_pkey ) if logger : logger . info ( '{0} keys loaded from host directory' . format ( len ( keys ) ) ) return keys
def export_epoch_file ( stimfunction , filename , tr_duration , temporal_resolution = 100.0 ) : """Output an epoch file , necessary for some inputs into brainiak This takes in the time course of stimulus events and outputs the epoch file used in Brainiak . The epoch file is a way to structure the timing information in fMRI that allows you to flexibly input different stimulus sequences . This is a list with each entry a 3d matrix corresponding to a participant . The dimensions of the 3d matrix are condition by epoch by time . For the i - th condition , if its k - th epoch spans time points t _ m to t _ n - 1 , then [ i , k , t _ m : t _ n ] are 1 in the epoch file . Parameters stimfunction : list of timepoint by condition arrays The stimulus function describing the time course of events . Each list entry is from a different participant , each row is a different timepoint ( with the given temporal precision ) , each column is a different condition . export _ epoch _ file is looking for differences in the value of stimfunction to identify the start and end of an epoch . If epochs in stimfunction are coded with the same weight and there is no time between blocks then export _ epoch _ file won ' t be able to label them as different epochs filename : str The name of the epoch file to be output tr _ duration : float How long is each TR in seconds temporal _ resolution : float How many elements per second are you modeling with the stimfunction ?"""
# Cycle through the participants , different entries in the list epoch_file = [ 0 ] * len ( stimfunction ) for ppt_counter in range ( len ( stimfunction ) ) : # What is the time course for the participant ( binarized ) stimfunction_ppt = np . abs ( stimfunction [ ppt_counter ] ) > 0 # Down sample the stim function stride = tr_duration * temporal_resolution stimfunction_downsampled = stimfunction_ppt [ : : int ( stride ) , : ] # Calculates the number of event onsets . This uses changes in value # to reflect different epochs . This might be false in some cases ( the # weight is non - uniform over an epoch or there is no break between # identically weighted epochs ) . epochs = 0 # Preset conditions = stimfunction_ppt . shape [ 1 ] for condition_counter in range ( conditions ) : weight_change = ( np . diff ( stimfunction_downsampled [ : , condition_counter ] , 1 , 0 ) != 0 ) # If the first or last events are ' on ' then make these # represent a epoch change if stimfunction_downsampled [ 0 , condition_counter ] == 1 : weight_change [ 0 ] = True if stimfunction_downsampled [ - 1 , condition_counter ] == 1 : weight_change [ - 1 ] = True epochs += int ( np . max ( np . sum ( weight_change , 0 ) ) / 2 ) # Get other information trs = stimfunction_downsampled . shape [ 0 ] # Make a timing file for this participant epoch_file [ ppt_counter ] = np . zeros ( ( conditions , epochs , trs ) ) # Cycle through conditions epoch_counter = 0 # Reset and count across conditions tr_counter = 0 while tr_counter < stimfunction_downsampled . shape [ 0 ] : for condition_counter in range ( conditions ) : # Is it an event ? if tr_counter < stimfunction_downsampled . shape [ 0 ] and stimfunction_downsampled [ tr_counter , condition_counter ] == 1 : # Add a one for this TR epoch_file [ ppt_counter ] [ condition_counter , epoch_counter , tr_counter ] = 1 # Find the next non event value end_idx = np . where ( stimfunction_downsampled [ tr_counter : , condition_counter ] == 0 ) [ 0 ] [ 0 ] tr_idxs = list ( range ( tr_counter , tr_counter + end_idx ) ) # Add ones to all the trs within this event time frame epoch_file [ ppt_counter ] [ condition_counter , epoch_counter , tr_idxs ] = 1 # Start from this index tr_counter += end_idx # Increment epoch_counter += 1 # Increment the counter tr_counter += 1 # Convert to boolean epoch_file [ ppt_counter ] = epoch_file [ ppt_counter ] . astype ( 'bool' ) # Save the file np . save ( filename , epoch_file )
def fsplit ( pred , objs ) : """Split a list into two classes according to the predicate ."""
t = [ ] f = [ ] for obj in objs : if pred ( obj ) : t . append ( obj ) else : f . append ( obj ) return ( t , f )
def generate_challenge ( self ) : # local import to avoid circular import from two_factor . utils import totp_digits """Sends the current TOTP token to ` self . number ` using ` self . method ` ."""
no_digits = totp_digits ( ) token = str ( totp ( self . bin_key , digits = no_digits ) ) . zfill ( no_digits ) if self . method == 'call' : make_call ( device = self , token = token ) else : send_sms ( device = self , token = token )
def write_schema_to_file ( cls , schema , file_pointer = stdout , folder = MISSING , context = DEFAULT_DICT ) : """Given a Marshmallow schema , create a JSON Schema for it . Args : schema ( marshmallow . Schema | str ) : The Marshmallow schema , or the Python path to one , to create the JSON schema for . Keyword Args : file _ pointer ( file , optional ) : The pointer to the file to write this schema to . If not provided , the schema will be dumped to ` ` sys . stdout ` ` . folder ( str , optional ) : The folder in which to save the JSON schema . The name of the schema file can be optionally controlled my the schema ' s ` ` Meta . json _ schema _ filename ` ` . If that attribute is not set , the class ' s name will be used for the filename . If writing the schema to a specific file is desired , please pass in a ` ` file _ pointer ` ` . context ( dict , optional ) : The Marshmallow context to be pushed to the schema generates the JSONSchema . Returns : dict : The JSON schema in dictionary form ."""
schema = cls . _get_schema ( schema ) json_schema = cls . generate_json_schema ( schema , context = context ) if folder : schema_filename = getattr ( schema . Meta , 'json_schema_filename' , '.' . join ( [ schema . __class__ . __name__ , 'json' ] ) ) json_path = os . path . join ( folder , schema_filename ) file_pointer = open ( json_path , 'w' ) json . dump ( json_schema , file_pointer , indent = 2 ) return json_schema
def _get_commands_by_name ( self , blueprint , name ) : """Get all of the commands with a given name ."""
return list ( filter ( lambda value : value . name == name , blueprint . get_commands ( ) ) )
def repr_size ( n_bytes ) : """> > > repr _ size ( 1000) '1000 Bytes ' > > > repr _ size ( 8257332324597) '7.5 TiB '"""
if n_bytes < 1024 : return '{0} Bytes' . format ( n_bytes ) i = - 1 while n_bytes > 1023 : n_bytes /= 1024.0 i += 1 return '{0} {1}iB' . format ( round ( n_bytes , 1 ) , si_prefixes [ i ] )
def load_object ( target , namespace = None ) : """This helper function loads an object identified by a dotted - notation string . For example : # Load class Foo from example . objects load _ object ( ' example . objects : Foo ' ) If a plugin namespace is provided simple name references are allowed . For example : # Load the plugin named ' routing ' from the ' web . dispatch ' namespace load _ object ( ' routing ' , ' web . dispatch ' ) Providing a namespace does not prevent full object lookup ( dot - colon notation ) from working ."""
if namespace and ':' not in target : allowable = dict ( ( i . name , i ) for i in pkg_resources . iter_entry_points ( namespace ) ) if target not in allowable : raise ValueError ( 'Unknown plugin "' + target + '"; found: ' + ', ' . join ( allowable ) ) return allowable [ target ] . load ( ) parts , target = target . split ( ':' ) if ':' in target else ( target , None ) module = __import__ ( parts ) for part in parts . split ( '.' ) [ 1 : ] + ( [ target ] if target else [ ] ) : module = getattr ( module , part ) return module
def _upload_file_bytes ( self , conn , http_conn , fp , file_length , total_bytes_uploaded , cb , num_cb ) : """Makes one attempt to upload file bytes , using an existing resumable upload connection . Returns etag from server upon success . Raises ResumableUploadException if any problems occur ."""
buf = fp . read ( self . BUFFER_SIZE ) if cb : if num_cb > 2 : cb_count = file_length / self . BUFFER_SIZE / ( num_cb - 2 ) elif num_cb < 0 : cb_count = - 1 else : cb_count = 0 i = 0 cb ( total_bytes_uploaded , file_length ) # Build resumable upload headers for the transfer . Don ' t send a # Content - Range header if the file is 0 bytes long , because the # resumable upload protocol uses an * inclusive * end - range ( so , sending # ' bytes 0-0/1 ' would actually mean you ' re sending a 1 - byte file ) . put_headers = { } if file_length : range_header = self . _build_content_range_header ( '%d-%d' % ( total_bytes_uploaded , file_length - 1 ) , file_length ) put_headers [ 'Content-Range' ] = range_header # Set Content - Length to the total bytes we ' ll send with this PUT . put_headers [ 'Content-Length' ] = str ( file_length - total_bytes_uploaded ) http_request = AWSAuthConnection . build_base_http_request ( conn , 'PUT' , path = self . tracker_uri_path , auth_path = None , headers = put_headers , host = self . tracker_uri_host ) http_conn . putrequest ( 'PUT' , http_request . path ) for k in put_headers : http_conn . putheader ( k , put_headers [ k ] ) http_conn . endheaders ( ) # Turn off debug on http connection so upload content isn ' t included # in debug stream . http_conn . set_debuglevel ( 0 ) while buf : http_conn . send ( buf ) total_bytes_uploaded += len ( buf ) if cb : i += 1 if i == cb_count or cb_count == - 1 : cb ( total_bytes_uploaded , file_length ) i = 0 buf = fp . read ( self . BUFFER_SIZE ) if cb : cb ( total_bytes_uploaded , file_length ) if total_bytes_uploaded != file_length : # Abort ( and delete the tracker file ) so if the user retries # they ' ll start a new resumable upload rather than potentially # attempting to pick back up later where we left off . raise ResumableUploadException ( 'File changed during upload: EOF at %d bytes of %d byte file.' % ( total_bytes_uploaded , file_length ) , ResumableTransferDisposition . ABORT ) resp = http_conn . getresponse ( ) body = resp . read ( ) # Restore http connection debug level . http_conn . set_debuglevel ( conn . debug ) if resp . status == 200 : return resp . getheader ( 'etag' ) # Success # Retry timeout ( 408 ) and status 500 and 503 errors after a delay . elif resp . status in [ 408 , 500 , 503 ] : disposition = ResumableTransferDisposition . WAIT_BEFORE_RETRY else : # Catch all for any other error codes . disposition = ResumableTransferDisposition . ABORT raise ResumableUploadException ( 'Got response code %d while attempting ' 'upload (%s)' % ( resp . status , resp . reason ) , disposition )
def flatMap ( self , f , preservesPartitioning = False ) : """Return a new RDD by first applying a function to all elements of this RDD , and then flattening the results . > > > rdd = sc . parallelize ( [ 2 , 3 , 4 ] ) > > > sorted ( rdd . flatMap ( lambda x : range ( 1 , x ) ) . collect ( ) ) [1 , 1 , 1 , 2 , 2 , 3] > > > sorted ( rdd . flatMap ( lambda x : [ ( x , x ) , ( x , x ) ] ) . collect ( ) ) [ ( 2 , 2 ) , ( 2 , 2 ) , ( 3 , 3 ) , ( 3 , 3 ) , ( 4 , 4 ) , ( 4 , 4 ) ]"""
def func ( s , iterator ) : return chain . from_iterable ( map ( fail_on_stopiteration ( f ) , iterator ) ) return self . mapPartitionsWithIndex ( func , preservesPartitioning )
def get_url ( url : str , params : dict = { } , timeout : float = 5.0 , cache : bool = True ) : """Wrapper for requests . get ( url ) Args : url : url to retrieve params : query string parameters timeout : allow this much time for the request and time it out if over cache : Cache for up to a day unless this is false Returns : Requests Result obj or None if timed out"""
try : if not cache : with requests_cache . disabled ( ) : r = requests . get ( url , params = params , timeout = timeout ) else : r = requests . get ( url , params = params , timeout = timeout ) log . debug ( f"Response headers {r.headers} From cache {r.from_cache}" ) return r except requests . exceptions . Timeout : log . warn ( f"Timed out getting url in get_url: {url}" ) return None except Exception as e : log . warn ( f"Error getting url: {url} error: {e}" ) return None
def _norm_index ( dim , index , start , stop ) : """Return an index normalized to an farray start index ."""
length = stop - start if - length <= index < 0 : normindex = index + length elif start <= index < stop : normindex = index - start else : fstr = "expected dim {} index in range [{}, {})" raise IndexError ( fstr . format ( dim , start , stop ) ) return normindex
def outline ( self , inner , outer ) : """Compute region outline by differencing two dilations . Parameters inner : int Size of inner outline boundary ( in pixels ) outer : int Size of outer outline boundary ( in pixels )"""
return self . dilate ( outer ) . exclude ( self . dilate ( inner ) )
def n_relative_paths ( self , n ) : """return relative paths of n levels , including basename . eg : File ( ' a / b / c / d ' ) . n _ parent _ paths ( 3 ) = = ' b / c / d '"""
rv = os . path . join ( self . n_parent_paths ( n - 1 ) , self . basename ) return rv . replace ( '\\' , '/' )
async def _send_packet ( self , pkt ) : """Queue a packet to be sent to the server ."""
if self . state != 'connected' : return await self . queue . put ( pkt ) self . logger . info ( 'Sending packet %s data %s' , packet . packet_names [ pkt . packet_type ] , pkt . data if not isinstance ( pkt . data , bytes ) else '<binary>' )
def form_invalid ( self , form ) : """Handles an invalid form ."""
messages . error ( self . request , form . errors [ NON_FIELD_ERRORS ] ) return redirect ( reverse ( 'forum_conversation:topic' , kwargs = { 'forum_slug' : self . object . topic . forum . slug , 'forum_pk' : self . object . topic . forum . pk , 'slug' : self . object . topic . slug , 'pk' : self . object . topic . pk } , ) , )
def getFileDialogTitle ( msg , title ) : """Create nicely - formatted string based on arguments msg and title : param msg : the msg to be displayed : param title : the window title : return : None"""
if msg and title : return "%s - %s" % ( title , msg ) if msg and not title : return str ( msg ) if title and not msg : return str ( title ) return None
def do_file_download ( client , args ) : """Download file"""
# Sanity check if not os . path . isdir ( args . dest_path ) and not args . dest_path . endswith ( '/' ) : print ( "file-download: " "target '{}' is not a directory" . format ( args . dest_path ) ) if not os . path . exists ( args . dest_path ) : print ( "\tHint: add trailing / to create one" ) return None for src_uri in args . uris : print ( "Downloading {} to {}" . format ( src_uri , args . dest_path ) ) client . download_file ( src_uri , args . dest_path ) print ( "Downloaded {}" . format ( src_uri ) ) return True
def _add_node ( self , agent ) : """Add an Agent as a node to the graph ."""
if agent is None : return node_label = _get_node_label ( agent ) if isinstance ( agent , Agent ) and agent . bound_conditions : bound_agents = [ bc . agent for bc in agent . bound_conditions if bc . is_bound ] if bound_agents : bound_names = [ _get_node_label ( a ) for a in bound_agents ] node_label = _get_node_label ( agent ) + '/' + '/' . join ( bound_names ) self . _complex_nodes . append ( [ agent ] + bound_agents ) else : node_label = _get_node_label ( agent ) node_key = _get_node_key ( agent ) if node_key in self . existing_nodes : return self . existing_nodes . append ( node_key ) self . graph . add_node ( node_key , label = node_label , ** self . node_properties )
def find_if ( pred , iterable , default = None ) : """Returns a reference to the first element in the ` ` iterable ` ` range for which ` ` pred ` ` returns ` ` True ` ` . If no such element is found , the function returns ` ` default ` ` . > > > find _ if ( lambda x : x = = 3 , [ 1 , 2 , 3 , 4 ] ) : param pred : a predicate function to check a value from the iterable range : param iterable : an iterable range to check in : param default : a value that will be returned if no elements were found : returns : a reference to the first found element or default"""
return next ( ( i for i in iterable if pred ( i ) ) , default )
def i2osp ( x , x_len ) : '''Converts the integer x to its big - endian representation of length x _ len .'''
if x > 256 ** x_len : raise exceptions . IntegerTooLarge h = hex ( x ) [ 2 : ] if h [ - 1 ] == 'L' : h = h [ : - 1 ] if len ( h ) & 1 == 1 : h = '0%s' % h x = binascii . unhexlify ( h ) return b'\x00' * int ( x_len - len ( x ) ) + x
def addCellScalars ( self , scalars , name ) : """Add cell scalars to the actor ' s polydata assigning it a name ."""
poly = self . polydata ( False ) if isinstance ( scalars , str ) : scalars = vtk_to_numpy ( poly . GetPointData ( ) . GetArray ( scalars ) ) if len ( scalars ) != poly . GetNumberOfCells ( ) : colors . printc ( "~times Number of scalars != nr. of cells" , c = 1 ) exit ( ) arr = numpy_to_vtk ( np . ascontiguousarray ( scalars ) , deep = True ) arr . SetName ( name ) poly . GetCellData ( ) . AddArray ( arr ) poly . GetCellData ( ) . SetActiveScalars ( name ) self . mapper . SetScalarRange ( np . min ( scalars ) , np . max ( scalars ) ) self . mapper . ScalarVisibilityOn ( ) return self
def projection ( x , A , b ) : """Returns the vector xhat closest to x in 2 - norm , satisfying A . xhat = b . : param x : vector : param A , b : matrix and array characterizing the constraints on x ( A . x = b ) : return x _ hat : optimum angle vector , minimizing cost . : return cost : least square error of xhat , x : return constraints _ error : mse of constraint . : rtype : ( numpy . ndarray , float , float )"""
A_pseudoinv = pseudo_inverse ( A ) tmp_ = A . dot ( x ) tmp_ -= b x_hat = A_pseudoinv . dot ( tmp_ ) np . subtract ( x , x_hat , out = x_hat ) cost = mse ( x_hat , x ) A . dot ( x_hat , out = tmp_ ) constraints_error = mse ( tmp_ , b ) return x_hat , cost , constraints_error
def parse_emails ( emails ) : """A function that returns a list of valid email addresses . This function will also convert a single email address into a list of email addresses . None value is also converted into an empty list ."""
if isinstance ( emails , string_types ) : emails = [ emails ] elif emails is None : emails = [ ] for email in emails : try : validate_email_with_name ( email ) except ValidationError : raise ValidationError ( '%s is not a valid email address' % email ) return emails
def _hash_internal ( method , salt , password ) : """Internal password hash helper . Supports plaintext without salt , unsalted and salted passwords . In case salted passwords are used hmac is used ."""
if method == 'plain' : return password , method if isinstance ( password , text_type ) : password = password . encode ( 'utf-8' ) if method . startswith ( 'pbkdf2:' ) : args = method [ 7 : ] . split ( ':' ) if len ( args ) not in ( 1 , 2 ) : raise ValueError ( 'Invalid number of arguments for PBKDF2' ) method = args . pop ( 0 ) iterations = args and int ( args [ 0 ] or 0 ) or DEFAULT_PBKDF2_ITERATIONS is_pbkdf2 = True actual_method = 'pbkdf2:%s:%d' % ( method , iterations ) else : is_pbkdf2 = False actual_method = method hash_func = _hash_funcs . get ( method ) if hash_func is None : raise TypeError ( 'invalid method %r' % method ) if is_pbkdf2 : if not salt : raise ValueError ( 'Salt is required for PBKDF2' ) rv = pbkdf2_hex ( password , salt , iterations , hashfunc = hash_func ) elif salt : if isinstance ( salt , text_type ) : salt = salt . encode ( 'utf-8' ) rv = hmac . HMAC ( salt , password , hash_func ) . hexdigest ( ) else : h = hash_func ( ) h . update ( password ) rv = h . hexdigest ( ) return rv , actual_method
def return_dat ( self , chan , begsam , endsam ) : """Return the data as 2D numpy . ndarray . Parameters chan : list of int index ( indices ) of the channels to read begsam : int index of the first sample ( inclusively ) endsam : int index of the last sample ( exclusively ) Returns numpy . ndarray A 2d matrix , with dimension chan X samples ."""
# n _ sam = self . hdr [ 4] interval = endsam - begsam dat = empty ( ( len ( chan ) , interval ) ) # beg _ block = floor ( ( begsam / n _ sam ) * n _ block ) # end _ block = floor ( ( endsam / n _ sam ) * n _ block ) for i , chan in enumerate ( chan ) : k = 0 with open ( self . chan_files [ chan ] , 'rt' ) as f : f . readline ( ) for j , datum in enumerate ( f ) : if begsam <= j + 1 < endsam : dat [ i , k ] = float64 ( datum ) k += 1 if k == interval : break # calibration phys_range = self . phys_max - self . phys_min dig_range = self . dig_max - self . dig_min gain = phys_range / dig_range dat *= gain return dat
def _addupdate_hdxobject ( self , hdxobjects , id_field , new_hdxobject ) : # type : ( List [ HDXObjectUpperBound ] , str , HDXObjectUpperBound ) - > HDXObjectUpperBound """Helper function to add a new HDX object to a supplied list of HDX objects or update existing metadata if the object already exists in the list Args : hdxobjects ( List [ T < = HDXObject ] ) : list of HDX objects to which to add new objects or update existing ones id _ field ( str ) : Field on which to match to determine if object already exists in list new _ hdxobject ( T < = HDXObject ) : The HDX object to be added / updated Returns : T < = HDXObject : The HDX object which was added or updated"""
for hdxobject in hdxobjects : if hdxobject [ id_field ] == new_hdxobject [ id_field ] : merge_two_dictionaries ( hdxobject , new_hdxobject ) return hdxobject hdxobjects . append ( new_hdxobject ) return new_hdxobject
def load_file ( self , filename ) : """load file which contains yaml configuration entries . and merge it by current instance : param files : files to load and merge into existing configuration instance : type files : list"""
if not path . exists ( filename ) : raise FileNotFoundError ( filename ) loaded_yaml = load_yaml ( filename , self . context ) if loaded_yaml : self . merge ( loaded_yaml )
def rndstr ( size = 16 ) : """Returns a string of random ascii characters or digits : param size : The length of the string : return : string"""
_basech = string . ascii_letters + string . digits return "" . join ( [ rnd . choice ( _basech ) for _ in range ( size ) ] )
def to_cell_table ( self , merged = True ) : """Returns a list of lists of Cells with the cooked value and note for each cell ."""
new_rows = [ ] for row_index , row in enumerate ( self . rows ( CellMode . cooked ) ) : new_row = [ ] for col_index , cell_value in enumerate ( row ) : new_row . append ( Cell ( cell_value , self . get_note ( ( col_index , row_index ) ) ) ) new_rows . append ( new_row ) if merged : for cell_low , cell_high in self . merged_cell_ranges ( ) : anchor_cell = new_rows [ cell_low [ 1 ] ] [ cell_low [ 0 ] ] for row_index in range ( cell_low [ 1 ] , cell_high [ 1 ] ) : for col_index in range ( cell_low [ 0 ] , cell_high [ 0 ] ) : # NOTE : xlrd occassionally returns ranges that don ' t have cells . try : new_rows [ row_index ] [ col_index ] = anchor_cell . copy ( ) except IndexError : pass return new_rows
def register ( self , app : 'Quart' , first_registration : bool , * , url_prefix : Optional [ str ] = None , ) -> None : """Register this blueprint on the app given ."""
state = self . make_setup_state ( app , first_registration , url_prefix = url_prefix ) if self . has_static_folder : state . add_url_rule ( self . static_url_path + '/<path:filename>' , view_func = self . send_static_file , endpoint = 'static' , ) for func in self . deferred_functions : func ( state )
def qteDisconnectHook ( self , hookName : str , slot : ( types . FunctionType , types . MethodType ) ) : """Disconnect ` ` slot ` ` from ` ` hookName ` ` . If ` ` hookName ` ` does not exist , or ` ` slot ` ` is not connected to ` ` hookName ` ` then return * * False * * , otherwise disassociate ` ` slot ` ` with ` ` hookName ` ` and return * * True * * . | Args | * ` ` hookName ` ` ( * * str * * ) : name of the hook . * ` ` slot ` ` ( * * function * * , * * method * * ) : the routine to execute when the hook triggers . | Returns | * * * bool * * : * * True * * if ` ` slot ` ` was disconnected from ` ` hookName ` ` , and * * False * * in all other cases . | Raises | * * * QtmacsArgumentError * * if at least one argument has an invalid type ."""
# Shorthand . reg = self . _qteRegistryHooks # Return immediately if no hook with that name exists . if hookName not in reg : msg = 'There is no hook called <b>{}</b>.' self . qteLogger . info ( msg . format ( hookName ) ) return False # Return immediately if the ` ` slot ` ` is not connected to the hook . if slot not in reg [ hookName ] : msg = 'Slot <b>{}</b> is not connected to hook <b>{}</b>.' self . qteLogger . info ( msg . format ( str ( slot ) [ 1 : - 1 ] , hookName ) ) return False # Remove ` ` slot ` ` from the list . reg [ hookName ] . remove ( slot ) # If the list is now empty , then remove it altogether . if len ( reg [ hookName ] ) == 0 : reg . pop ( hookName ) return True
def connection_lost ( self , reason ) : """Stops all timers and notifies peer that connection is lost ."""
if self . _peer : state = self . _peer . state . bgp_state if self . _is_bound or state == BGP_FSM_OPEN_SENT : self . _peer . connection_lost ( reason ) self . _peer = None if reason : LOG . info ( reason ) else : LOG . info ( 'Connection to peer closed for unknown reasons.' )
def check_siblings ( graph , outputs ) : """Check that all outputs have their siblings listed ."""
siblings = set ( ) for node in outputs : siblings |= graph . siblings ( node ) siblings = { node . path for node in siblings } missing = siblings - { node . path for node in outputs } if missing : msg = ( 'Include the files above in the command ' 'or use the --with-siblings option.' ) raise click . ClickException ( 'There are missing output siblings:\n\n' '\t{0}\n\n{1}' . format ( '\n\t' . join ( click . style ( path , fg = 'red' ) for path in missing ) , msg , ) , ) return outputs
def normalize_unicode ( text ) : """Normalize any unicode characters to ascii equivalent https : / / docs . python . org / 2 / library / unicodedata . html # unicodedata . normalize"""
if isinstance ( text , six . text_type ) : return unicodedata . normalize ( 'NFKD' , text ) . encode ( 'ascii' , 'ignore' ) . decode ( 'utf8' ) else : return text
def addSection ( self , data , name = ".pype32\x00" , flags = 0x60000000 ) : """Adds a new section to the existing L { PE } instance . @ type data : str @ param data : The data to be added in the new section . @ type name : str @ param name : ( Optional ) The name for the new section . @ type flags : int @ param flags : ( Optional ) The attributes for the new section ."""
fa = self . ntHeaders . optionalHeader . fileAlignment . value sa = self . ntHeaders . optionalHeader . sectionAlignment . value padding = "\xcc" * ( fa - len ( data ) ) sh = SectionHeader ( ) if len ( self . sectionHeaders ) : # get the va , vz , ra and rz of the last section in the array of section headers vaLastSection = self . sectionHeaders [ - 1 ] . virtualAddress . value sizeLastSection = self . sectionHeaders [ - 1 ] . misc . value pointerToRawDataLastSection = self . sectionHeaders [ - 1 ] . pointerToRawData . value sizeOfRawDataLastSection = self . sectionHeaders [ - 1 ] . sizeOfRawData . value sh . virtualAddress . value = self . _adjustSectionAlignment ( vaLastSection + sizeLastSection , fa , sa ) sh . pointerToRawData . value = self . _adjustFileAlignment ( pointerToRawDataLastSection + sizeOfRawDataLastSection , fa ) sh . misc . value = self . _adjustSectionAlignment ( len ( data ) , fa , sa ) or consts . DEFAULT_PAGE_SIZE sh . sizeOfRawData . value = self . _adjustFileAlignment ( len ( data ) , fa ) or consts . DEFAULT_FILE_ALIGNMENT sh . characteristics . value = flags sh . name . value = name self . sectionHeaders . append ( sh ) self . sections . append ( data + padding ) self . ntHeaders . fileHeader . numberOfSections . value += 1
def get_file_by_id ( self , file_id ) : """Get folder details for a file id . : param file _ id : str : uuid of the file : return : File"""
return self . _create_item_response ( self . data_service . get_file ( file_id ) , File )
def checkpoint ( key = 0 , unpickler = pickle . load , pickler = pickle . dump , work_dir = gettempdir ( ) , refresh = False ) : """A utility decorator to save intermediate results of a function . It is the caller ' s responsibility to specify a key naming scheme such that the output of each function call with different arguments is stored in a separate file . : param key : The key to store the computed intermediate output of the decorated function . if key is a string , it is used directly as the name . if key is a string . Template object , you can specify your file - naming convention using the standard string . Template conventions . Since string . Template uses named substitutions , it can handle only keyword arguments . Therfore , in addition to the standard Template conventions , an additional feature is provided to help with non - keyword arguments . For instance if you have a function definition as f ( m , n , arg3 = ' myarg3 ' , arg4 = ' myarg4 ' ) . Say you want your key to be : n followed by an _ followed by ' text ' followed by arg3 followed by a . followed by arg4. Let n = 3 , arg3 = ' out ' , arg4 = ' txt ' , then you are interested in getting ' 3 _ textout . txt ' . This is written as key = Template ( ' { 1 } _ text $ arg3 . $ arg4 ' ) The filename is first generated by substituting the kwargs , i . e key _ id . substitute ( kwargs ) , this would give the string ' { 1 } _ textout . txt ' as output . This is further processed by a call to format with args as the argument , where the second argument is picked ( since counting starts from 0 ) , and we get 3 _ textout . txt . if key is a callable function , it is called with the same arguments as that of the function , in a special format . key must be of the form lambda arg , kwarg : . . . your definition . arg is an iterable containing the un - named arguments of the function , and kwarg is a dictionary containing the keyword arguments . For instance , the above example can be written as : key = lambda arg , kwarg : ' % d _ text % s . % s ' . format ( arg [ 1 ] , kwarg [ ' arg3 ' ] , kwarg [ ' arg4 ' ] ) Or one can define a function that takes the same arguments : def key _ namer ( args , kwargs ) : return ' % d _ text % s . % s ' . format ( arg [ 1 ] , kwarg [ ' arg3 ' ] , kwarg [ ' arg4 ' ] ) This way you can do complex argument processing and name generation . : param pickler : The function that loads the saved object and returns . This should ideally be of the same format as the one that is computed . However , in certain cases , it is enough as long as it provides the information necessary for the caller , even if it is not exactly same as the object returned by the function . : param unpickler : The function that saves the computed object into a file . : param work _ dir : The location where the checkpoint files are stored . : param do _ refresh : If enabled , this will not skip , effectively disabling the decoration @ checkpoint . REFRESHING : One of the intended ways to use the refresh feature is as follows : Say you are checkpointing a function f1 , f2 ; have a file or a place where you define refresh variables : defs . py : REFRESH _ f1 = True REFRESH _ f2 = os . environ [ ' F2 _ REFRESH ' ] # can set this externally code . py : @ checkpoint ( . . . , refresh = REFRESH _ f1) def f1 ( . . . ) : your code . @ checkpoint ( . . . , refresh = REFRESH _ f2) def f2 ( . . . ) : your code . This way , you have control on what to refresh without modifying the code , by setting the defs either via input or by modifying defs . py ."""
def decorator ( func ) : def wrapped ( * args , ** kwargs ) : # If first arg is a string , use it directly . if isinstance ( key , str ) : save_file = os . path . join ( work_dir , key ) elif isinstance ( key , Template ) : save_file = os . path . join ( work_dir , key . substitute ( kwargs ) ) save_file = save_file . format ( * args ) elif isinstance ( key , types . FunctionType ) : save_file = os . path . join ( work_dir , key ( args , kwargs ) ) else : logging . warn ( 'Using 0-th argument as default.' ) save_file = os . path . join ( work_dir , '{0}' ) save_file = save_file . format ( args [ key ] ) logging . info ( 'checkpoint@ %s' % save_file ) # cache _ file doesn ' t exist , run the function and save output in checkpoint . if isinstance ( refresh , types . FunctionType ) : do_refresh = refresh ( ) else : do_refresh = refresh if do_refresh or not os . path . exists ( path = save_file ) : # Otherwise compute it save it and return it . # If the program fails , don ' t checkpoint . try : out = func ( * args , ** kwargs ) except : # a blank raise re - raises the last exception . raise else : # If the program is successful , then go ahead and call the save function . with open ( save_file , 'wb' ) as f : pickler ( out , f ) return out # Otherwise , load the checkpoint file and send it . else : logging . info ( "Checkpoint exists. Loading from: %s" % save_file ) with open ( save_file , 'rb' ) as f : return unpickler ( f ) # Todo : Sending options to load / save functions . return wrapped return decorator