signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def progression_linear ( week , start_weight , final_weight , start_week , end_week ) : """A linear progression function going through the points ( ' start _ week ' , ' start _ weight ' ) and ( ' end _ week ' , ' final _ weight ' ) , evaluated in ' week ' . Parameters week The week to evaluate the linear function at . start _ weight The weight at ' start _ week ' . final _ weight The weight at ' end _ week ' . start _ week The number of the first week , typically 1. end _ week The number of the final week , e . g . 8. Returns weight The weight at ' week ' . Examples > > > progression _ linear ( week = 2 , start _ weight = 100 , final _ weight = 120, . . . start _ week = 1 , end _ week = 3) 110.0 > > > progression _ linear ( 3 , 100 , 140 , 1 , 5) 120.0"""
# Calculate the slope of the linear function slope = ( start_weight - final_weight ) / ( start_week - end_week ) # Return the answer y = slope ( x - x _ 0 ) + y _ 0 return slope * ( week - start_week ) + start_weight
def _replace_image ( image_url , image_tag , ebook_folder , image_name = None ) : """Replaces the src of an image to link to the local copy in the images folder of the ebook . Tightly coupled with bs4 package . Args : image _ url ( str ) : The url of the image . image _ tag ( bs4 . element . Tag ) : The bs4 tag containing the image . ebook _ folder ( str ) : The directory where the ebook files are being saved . This must contain a subdirectory called " images " . image _ name ( Option [ str ] ) : The short name to save the image as . Should not contain a directory or an extension ."""
try : assert isinstance ( image_tag , bs4 . element . Tag ) except AssertionError : raise TypeError ( "image_tag cannot be of type " + str ( type ( image_tag ) ) ) if image_name is None : image_name = str ( uuid . uuid4 ( ) ) try : image_full_path = os . path . join ( ebook_folder , 'images' ) assert os . path . exists ( image_full_path ) image_extension = save_image ( image_url , image_full_path , image_name ) image_tag [ 'src' ] = 'images' + '/' + image_name + '.' + image_extension except ImageErrorException : image_tag . decompose ( ) except AssertionError : raise ValueError ( '%s doesn\'t exist or doesn\'t contain a subdirectory images' % ebook_folder ) except TypeError : image_tag . decompose ( )
def multi_layer_feature ( body , from_layers , num_filters , strides , pads , min_filter = 128 ) : """Wrapper function to extract features from base network , attaching extra layers and SSD specific layers Parameters from _ layers : list of str feature extraction layers , use ' ' for add extra layers For example : from _ layers = [ ' relu4_3 ' , ' fc7 ' , ' ' , ' ' , ' ' , ' ' ] which means extract feature from relu4_3 and fc7 , adding 4 extra layers on top of fc7 num _ filters : list of int number of filters for extra layers , you can use - 1 for extracted features , however , if normalization and scale is applied , the number of filter for that layer must be provided . For example : num _ filters = [ 512 , - 1 , 512 , 256 , 256 , 256] strides : list of int strides for the 3x3 convolution appended , - 1 can be used for extracted feature layers pads : list of int paddings for the 3x3 convolution , - 1 can be used for extracted layers min _ filter : int minimum number of filters used in 1x1 convolution Returns list of mx . Symbols"""
# arguments check assert len ( from_layers ) > 0 assert isinstance ( from_layers [ 0 ] , str ) and len ( from_layers [ 0 ] . strip ( ) ) > 0 assert len ( from_layers ) == len ( num_filters ) == len ( strides ) == len ( pads ) internals = body . get_internals ( ) layers = [ ] for k , params in enumerate ( zip ( from_layers , num_filters , strides , pads ) ) : from_layer , num_filter , s , p = params if from_layer . strip ( ) : # extract from base network layer = internals [ from_layer . strip ( ) + '_output' ] layers . append ( layer ) else : # attach from last feature layer assert len ( layers ) > 0 assert num_filter > 0 layer = layers [ - 1 ] num_1x1 = max ( min_filter , num_filter // 2 ) conv_1x1 = conv_act_layer ( layer , 'multi_feat_%d_conv_1x1' % ( k ) , num_1x1 , kernel = ( 1 , 1 ) , pad = ( 0 , 0 ) , stride = ( 1 , 1 ) , act_type = 'relu' ) conv_3x3 = conv_act_layer ( conv_1x1 , 'multi_feat_%d_conv_3x3' % ( k ) , num_filter , kernel = ( 3 , 3 ) , pad = ( p , p ) , stride = ( s , s ) , act_type = 'relu' ) layers . append ( conv_3x3 ) return layers
def cell_styles ( self ) : """dict of { ( row name , col name ) : style }"""
styles = { } for colname , col in self . dataframe . items ( ) : for rowname , value in col . items ( ) : if isinstance ( value , Value ) and value . style is not None : style = value . style if not isinstance ( style , CellStyle ) : style = self . _named_styles [ style ] styles [ ( rowname , colname ) ] = style return styles
def get_error_probability ( self ) : """This means for the base we are talking about how many errors between 0 and 1 do we attribute to it ? For the ' unobserved ' errors , these can only count when one is adjacent to base : returns : error probability p ( error _ observed ) + ( 1 - p _ error _ observed ) * error _ unobserved : rtype : float"""
a = self . _observable . get_error_probability ( ) b = self . _unobservable . get_error_probability ( ) return a + ( 1 - a ) * b
def add_reorganize_data ( self , name , input_name , output_name , mode = 'SPACE_TO_DEPTH' , block_size = 2 ) : """Add a data reorganization layer of type " SPACE _ TO _ DEPTH " or " DEPTH _ TO _ SPACE " . Parameters name : str The name of this layer . input _ name : str The input blob name of this layer . output _ name : str The output blob name of this layer . mode : str - If mode = = ' SPACE _ TO _ DEPTH ' : data is moved from the spatial to the channel dimension . Input is spatially divided into non - overlapping blocks of size block _ size X block _ size and data from each block is moved to the channel dimension . Output CHW dimensions are : [ C * block _ size * block _ size , H / block _ size , C / block _ size ] . - If mode = = ' DEPTH _ TO _ SPACE ' : data is moved from the channel to the spatial dimension . Reverse of the operation ' SPACE _ TO _ DEPTH ' . Output CHW dimensions are : [ C / ( block _ size * block _ size ) , H * block _ size , C * block _ size ] . block _ size : int Must be greater than 1 . Must divide H and W , when mode is ' SPACE _ TO _ DEPTH ' . ( block _ size * block _ size ) must divide C when mode is ' DEPTH _ TO _ SPACE ' . See Also add _ flatten , add _ reshape"""
spec = self . spec nn_spec = self . nn_spec # Add a new layer spec_layer = nn_spec . layers . add ( ) spec_layer . name = name spec_layer . input . append ( input_name ) spec_layer . output . append ( output_name ) spec_layer_params = spec_layer . reorganizeData # Set the parameters if block_size < 2 : raise ValueError ( "Invalid block_size value %d. Must be greater than 1." % block_size ) spec_layer_params . blockSize = block_size if mode == 'SPACE_TO_DEPTH' : spec_layer_params . mode = _NeuralNetwork_pb2 . ReorganizeDataLayerParams . ReorganizationType . Value ( 'SPACE_TO_DEPTH' ) elif mode == 'DEPTH_TO_SPACE' : spec_layer_params . mode = _NeuralNetwork_pb2 . ReorganizeDataLayerParams . ReorganizationType . Value ( 'DEPTH_TO_SPACE' ) else : raise NotImplementedError ( 'Unknown reorganization mode %s ' % mode )
def connect_widget ( self , wid , getter = None , setter = None , signal = None , arg = None , update = True , flavour = None ) : """Finish set - up by connecting the widget . The model was already specified in the constructor . * wid * is a widget instance . * getter * is a callable . It is passed * wid * and must return its current value . * setter * is a callable . It is passed * wid * and the current value of the model property and must update the widget . * signal * is a string naming the signal to connect to on * wid * . When it is emitted we update the model . * getter * , * setter * and * signal * are optional . Missing values are guessed from * wid * using : meth : ` gtkmvc3 . adapters . default . search _ adapter _ info ` . If nothing is found this raises : exc : ` TypeError ` . * arg * is an optional value passed to the handler for * signal * . This doesn ' t do anything unless a subclass overrides the handler . * update * denotes whether to update the widget from the model immediately . Otherwise the widget stays unchanged until the first notification . * flavour * can be used to select special behaviours about the adaptation when twice or more possibilities are possibly handled for the same widget type . See adapters . default for further information ."""
if wid in self . _wid_info : raise ValueError ( "Widget " + str ( wid ) + " was already connected" ) wid_type = None if None in ( getter , setter , signal ) : w = search_adapter_info ( wid , flavour ) if getter is None : getter = w [ GETTER ] if setter is None : setter = w [ SETTER ] wid_type = w [ WIDTYPE ] if signal is None : signal = w [ SIGNAL ] # saves information about the widget self . _wid_info [ wid ] = ( getter , setter , wid_type ) # connects the widget if signal : if arg : wid . connect ( signal , self . _on_wid_changed , arg ) else : wid . connect ( signal , self . _on_wid_changed ) self . _wid = wid # updates the widget : if update : self . update_widget ( )
def get_state_search_path_list ( saltenv = 'base' ) : '''For the state file system , return a list of paths to search for states'''
# state cache should be updated before running this method search_list = [ ] cachedir = __opts__ . get ( 'cachedir' , None ) log . info ( "Searching for files in saltenv: %s" , saltenv ) path = cachedir + os . sep + "files" + os . sep + saltenv search_list . append ( path ) return search_list
def update_ids ( self , docids ) : """Update id - > pos mapping with new document ids ."""
logger . info ( "updating %i id mappings" % len ( docids ) ) for docid in docids : if docid is not None : pos = self . id2pos . get ( docid , None ) if pos is not None : logger . info ( "replacing existing document %r in %s" % ( docid , self ) ) del self . pos2id [ pos ] self . id2pos [ docid ] = self . length try : del self . id2sims [ docid ] except : pass self . length += 1 self . id2sims . sync ( ) self . update_mappings ( )
def background_bin_from_string ( background_bins , data ) : """Return template ids for each bin as defined by the format string Parameters bins : list of strings List of strings which define how a background bin is taken from the list of templates . data : dict of numpy . ndarrays Dict with parameter key values and numpy . ndarray values which define the parameters of the template bank to bin up . Returns bins : dict Dictionary of location indices indexed by a bin name"""
used = numpy . array ( [ ] , dtype = numpy . uint32 ) bins = { } for mbin in background_bins : name , bin_type , boundary = tuple ( mbin . split ( ':' ) ) if boundary [ 0 : 2 ] == 'lt' : member_func = lambda vals , bd = boundary : vals < float ( bd [ 2 : ] ) elif boundary [ 0 : 2 ] == 'gt' : member_func = lambda vals , bd = boundary : vals > float ( bd [ 2 : ] ) else : raise RuntimeError ( "Can't parse boundary condition! Must begin " "with 'lt' or 'gt'" ) if bin_type == 'component' and boundary [ 0 : 2 ] == 'lt' : # maximum component mass is less than boundary value vals = numpy . maximum ( data [ 'mass1' ] , data [ 'mass2' ] ) if bin_type == 'component' and boundary [ 0 : 2 ] == 'gt' : # minimum component mass is greater than bdary vals = numpy . minimum ( data [ 'mass1' ] , data [ 'mass2' ] ) elif bin_type == 'total' : vals = data [ 'mass1' ] + data [ 'mass2' ] elif bin_type == 'chirp' : vals = pycbc . pnutils . mass1_mass2_to_mchirp_eta ( data [ 'mass1' ] , data [ 'mass2' ] ) [ 0 ] elif bin_type == 'SEOBNRv2Peak' : vals = pycbc . pnutils . get_freq ( 'fSEOBNRv2Peak' , data [ 'mass1' ] , data [ 'mass2' ] , data [ 'spin1z' ] , data [ 'spin2z' ] ) elif bin_type == 'SEOBNRv4Peak' : vals = pycbc . pnutils . get_freq ( 'fSEOBNRv4Peak' , data [ 'mass1' ] , data [ 'mass2' ] , data [ 'spin1z' ] , data [ 'spin2z' ] ) elif bin_type == 'SEOBNRv2duration' : vals = pycbc . pnutils . get_imr_duration ( data [ 'mass1' ] , data [ 'mass2' ] , data [ 'spin1z' ] , data [ 'spin2z' ] , data [ 'f_lower' ] , approximant = 'SEOBNRv2' ) else : raise ValueError ( 'Invalid bin type %s' % bin_type ) locs = member_func ( vals ) del vals # make sure we don ' t reuse anything from an earlier bin locs = numpy . where ( locs ) [ 0 ] locs = numpy . delete ( locs , numpy . where ( numpy . in1d ( locs , used ) ) [ 0 ] ) used = numpy . concatenate ( [ used , locs ] ) bins [ name ] = locs return bins
def _docstring_key ( self , line ) : """Returns the key to use for the docblock immediately preceding the specified line ."""
decormatch = self . docparser . RE_DECOR . match ( line ) if decormatch is not None : key = "{}.{}" . format ( self . docelement . name , decormatch . group ( "name" ) ) else : key = self . element . name return key
def print_tweets ( tweets ) : """Print a list of tweets one by one separated by " = " s . Parameters tweets : list ( dict ) A list of tweets . Each tweet is a dict containing the username of the tweet ' s author , the post time , and the tweet body ."""
print ( '=' * 60 ) for index , tweet in enumerate ( tweets ) : print ( '-' * 60 ) print ( 'Tweet {}:' . format ( index ) ) print ( 'Username:' , tweet [ pytwis_constants . USERNAME_KEY ] ) print ( 'Time:' , datetime . datetime . fromtimestamp ( int ( tweet [ pytwis_constants . TWEET_UNIXTIME_KEY ] ) ) . strftime ( '%Y-%m-%d %H:%M:%S' ) ) print ( 'Body:\n\t' , tweet [ pytwis_constants . TWEET_BODY_KEY ] ) print ( '-' * 60 ) print ( '=' * 60 )
def send_metrics_to_cloudwatch ( self , rule , metric , dimensions ) : """Send metrics to CloudWatch for the given dimensions"""
timestamp = datetime . datetime . utcfromtimestamp ( metric . timestamp ) self . log . debug ( "CloudWatch: Attempting to publish metric: %s to %s " "with value (%s) for dimensions %s @%s" , rule [ 'name' ] , rule [ 'namespace' ] , str ( metric . value ) , str ( dimensions ) , str ( metric . timestamp ) ) try : self . connection . put_metric_data ( str ( rule [ 'namespace' ] ) , str ( rule [ 'name' ] ) , str ( metric . value ) , timestamp , str ( rule [ 'unit' ] ) , dimensions ) self . log . debug ( "CloudWatch: Successfully published metric: %s to" " %s with value (%s) for dimensions %s" , rule [ 'name' ] , rule [ 'namespace' ] , str ( metric . value ) , str ( dimensions ) ) except AttributeError as e : self . log . error ( "CloudWatch: Failed publishing - %s " , str ( e ) ) except Exception as e : # Rough connection re - try logic . self . log . error ( "CloudWatch: Failed publishing - %s\n%s " , str ( e ) , str ( sys . exc_info ( ) [ 0 ] ) ) self . _bind ( )
def parse_signature ( self , statement , element , module = None ) : """Parses the specified line as a new version of the signature for ' element ' . : arg statement : the string that has the new signature . : arg element : the code element whose signature will be changed ."""
# If the signature changes , the user might not have had a chance to add the # detailed member information for it yet . Here # we will just update the modifiers and attributes . Also , since all the mods # etc . will be overwritten , we don ' t need to handle replace separately . smatch = self . RE_SIG . match ( statement ) result = ( None , None , None ) eresult = None if smatch is not None : name = smatch . group ( "name" ) . strip ( ) modifiers = smatch . group ( "modifiers" ) or [ ] codetype = smatch . group ( "codetype" ) # If the exec is a function , we also may have a type and kind specified . if codetype . lower ( ) == "function" : dtype = smatch . group ( "type" ) kind = smatch . group ( "kind" ) if module is None : element . update ( name , modifiers , dtype , kind ) else : eresult = Function ( name , modifiers , dtype , kind , module ) else : if module is None : element . update ( name , modifiers ) else : eresult = Subroutine ( name , modifiers , module ) # The parameter sets are actually driven by the body of the executable # rather than the call signature . However , the declarations will be # interpreted as members if we don ' t add the parameters to the ordered # list of parameter names . Overwrite that list with the new names . params = re . split ( "[\s,]+" , smatch . group ( "parameters" ) . lower ( ) ) if eresult is None : element . paramorder = params else : eresult . paramorder = params result = ( eresult , smatch . start ( ) , smatch . end ( ) ) return result
def compute_results ( self , voting_method , votes = None , winners = 1 , ** kwargs ) : """Compute voting results to decide the winner ( s ) from the : attr : ` votes ` . The votes should have been made for the current : attr : ` ~ creamas . vote . VoteOrganizer . candidates ` . : param voting _ method : A function which computes the results from the votes . Should accept at least three parameters : candidates , votes and number of vote winners . The function should return at least a list of vote winners . See , e . g . : func : ` ~ creamas . vote . vote _ mean ` or : func : ` ~ creamas . vote . vote _ best ` . Additional ` ` * * kwargs ` ` are passed down to the voting method . : param list votes : A list of votes by which the voting is performed . Each vote should have the same set of artifacts in them . If ` ` None ` ` the results are computed for the current list of : attr : ` ~ creamas . vote . VoteOrganizer . votes ` . : param int winners : The number of vote winners : returns : list of : py : class : ` ~ creamas . core . artifact . Artifact ` objects , the winning artifacts . Some voting methods may also return a score associated with each winning artifact . : rtype : list"""
if votes is None : votes = self . votes if len ( votes ) == 0 : self . _log ( logging . DEBUG , "Could not compute results as there are " "no votes!" ) return [ ] self . _log ( logging . DEBUG , "Computing results from {} votes." . format ( len ( votes ) ) ) return voting_method ( self . candidates , votes , winners , ** kwargs )
def reporter ( self ) : """Creates a report of the results"""
logging . info ( 'Creating {} report' . format ( self . analysistype ) ) # Create the path in which the reports are stored make_path ( self . reportpath ) header = 'Strain,Serotype\n' data = '' with open ( os . path . join ( self . reportpath , '{}.csv' . format ( self . analysistype ) ) , 'w' ) as report : for sample in self . runmetadata . samples : if sample . general . bestassemblyfile != 'NA' : data += sample . name + ',' if sample [ self . analysistype ] . results : # Set the O - type as either the appropriate attribute , or O - untypable if ';' . join ( sample . serosippr . o_set ) == '-' : otype = 'O-untypeable' else : otype = '{oset} ({opid})' . format ( oset = ';' . join ( sample . serosippr . o_set ) , opid = sample . serosippr . best_o_pid ) # Same as above , but for the H - type if ';' . join ( sample . serosippr . h_set ) == '-' : htype = 'H-untypeable' else : htype = '{hset} ({hpid})' . format ( hset = ';' . join ( sample . serosippr . h_set ) , hpid = sample . serosippr . best_h_pid ) serotype = '{otype}:{htype}' . format ( otype = otype , htype = htype ) # Populate the data string data += serotype if serotype != 'O-untypeable:H-untypeable' else 'ND' data += '\n' else : data += '\n' report . write ( header ) report . write ( data )
def _create_config_signature ( config ) : """return the signature for a config object . The signature is computed as sha1 digest of the contents of working _ directory , include _ paths , define _ symbols and undefine _ symbols . : param config : Configuration object : type config : : class : ` parser . xml _ generator _ configuration _ t ` : rtype : str"""
m = hashlib . sha1 ( ) m . update ( config . working_directory . encode ( "utf-8" ) ) for p in config . include_paths : m . update ( p . encode ( "utf-8" ) ) for p in config . define_symbols : m . update ( p . encode ( "utf-8" ) ) for p in config . undefine_symbols : m . update ( p . encode ( "utf-8" ) ) for p in config . cflags : m . update ( p . encode ( "utf-8" ) ) return m . digest ( )
def query_most_pic ( num , kind = '1' ) : '''Query most pics .'''
return TabPost . select ( ) . where ( ( TabPost . kind == kind ) & ( TabPost . logo != "" ) ) . order_by ( TabPost . view_count . desc ( ) ) . limit ( num )
def save_object ( self , obj ) : """Save an object with Discipline Only argument is a Django object . This function saves the object ( regardless of whether it already exists or not ) and registers with Discipline , creating a new Action object . Do not use obj . save ( ) !"""
obj . save ( ) try : save_object ( obj , editor = self ) except DisciplineException : pass
def get_daily ( self , date = None ) : """Get time entries for a date ( defaults to today ) ."""
if date == None : return self . get ( "/daily.json" ) url = "/daily/{}/{}/{}.json" . format ( date . year , date . month , date . day ) return self . get ( url )
def _author_uid_get ( post ) : """Get the UID of the post author . : param Post post : The post object to determine authorship of : return : Author UID : rtype : str"""
u = post . meta ( 'author.uid' ) return u if u else str ( current_user . uid )
def shorten_text ( self , text ) : """Shortens text to fit into the : attr : ` width ` ."""
if len ( text ) > self . width : return text [ : self . width - 3 ] + '...' return text
def insert_asm ( self , addr , asm_code , before_label = False ) : """Insert some assembly code at the specific address . There must be an instruction starting at that address . : param int addr : Address of insertion : param str asm _ code : The assembly code to insert : return : None"""
if before_label : self . _inserted_asm_before_label [ addr ] . append ( asm_code ) else : self . _inserted_asm_after_label [ addr ] . append ( asm_code )
def profile_validation ( self , status ) : """Return run total value ."""
self . selected_profile . data . setdefault ( 'validation_pass_count' , 0 ) self . selected_profile . data . setdefault ( 'validation_fail_count' , 0 ) if status : self . selected_profile . data [ 'validation_pass_count' ] += 1 else : self . selected_profile . data [ 'validation_fail_count' ] += 1
def _from_dict ( cls , _dict ) : """Initialize a DocStructure object from a json dictionary ."""
args = { } if 'section_titles' in _dict : args [ 'section_titles' ] = [ SectionTitles . _from_dict ( x ) for x in ( _dict . get ( 'section_titles' ) ) ] if 'leading_sentences' in _dict : args [ 'leading_sentences' ] = [ LeadingSentence . _from_dict ( x ) for x in ( _dict . get ( 'leading_sentences' ) ) ] return cls ( ** args )
def items ( self , folder_id , subfolder_id , ann_id = None ) : '''Yields an unodered generator of items in a subfolder . The generator yields items , which are represented by a tuple of ` ` content _ id ` ` and ` ` subtopic _ id ` ` . The format of these identifiers is unspecified . By default ( with ` ` ann _ id = None ` ` ) , subfolders are shown for all anonymous users . Optionally , ` ` ann _ id ` ` can be set to a username , which restricts the list to only subfolders owned by that user . : param str folder _ id : Folder id : param str subfolder _ id : Subfolder id : param str ann _ id : Username : rtype : generator of ` ` ( content _ id , subtopic _ id ) ` `'''
self . assert_valid_folder_id ( folder_id ) self . assert_valid_folder_id ( subfolder_id ) ann_id = self . _annotator ( ann_id ) folder_cid = self . wrap_folder_content_id ( ann_id , folder_id ) subfolder_sid = self . wrap_subfolder_subtopic_id ( subfolder_id ) ident = ( folder_cid , subfolder_sid ) if self . store . get ( folder_cid ) is None : raise KeyError ( folder_id ) for lab in self . label_store . directly_connected ( ident ) : cid = lab . other ( folder_cid ) subid = lab . subtopic_for ( cid ) yield ( cid , subid )
def check ( self , func = None , name = None ) : """A decorator to register a new Dockerflow check to be run when the / _ _ heartbeat _ _ endpoint is called . , e . g . : : from dockerflow . flask import checks @ dockerflow . check def storage _ reachable ( ) : try : acme . storage . ping ( ) except SlowConnectionException as exc : return [ checks . Warning ( exc . msg , id = ' acme . health . 0002 ' ) ] except StorageException as exc : return [ checks . Error ( exc . msg , id = ' acme . health . 0001 ' ) ] or using a custom name : : @ dockerflow . check ( name = ' acme - storage - check ) def storage _ reachable ( ) :"""
if func is None : return functools . partial ( self . check , name = name ) if name is None : name = func . __name__ self . logger . info ( 'Registered Dockerflow check %s' , name ) @ functools . wraps ( func ) def decorated_function ( * args , ** kwargs ) : self . logger . info ( 'Called Dockerflow check %s' , name ) return func ( * args , ** kwargs ) self . checks [ name ] = decorated_function return decorated_function
def _is_qstring ( message ) : """Check if its a QString without adding any dep to PyQt5."""
my_class = str ( message . __class__ ) my_class_name = my_class . replace ( '<class \'' , '' ) . replace ( '\'>' , '' ) if my_class_name == 'PyQt5.QtCore.QString' : return True return False
def ne ( self , other , axis = "columns" , level = None ) : """Checks element - wise that this is not equal to other . Args : other : A DataFrame or Series or scalar to compare to . axis : The axis to perform the ne over . level : The Multilevel index level to apply ne over . Returns : A new DataFrame filled with Booleans ."""
return self . _binary_op ( "ne" , other , axis = axis , level = level )
def set_autocamera ( self , mode = 'density' ) : """- set _ autocamera ( mode = ' density ' ) : By default , Scene defines its own Camera . However , there is no a general way for doing so . Scene uses a density criterion for getting the point of view . If this is not a good option for your problem , you can choose among : | ' minmax ' | ' density ' | ' median ' | ' mean ' | . If None of the previous methods work well , you may define the camera params by yourself ."""
self . Camera . set_autocamera ( self . _Particles , mode = mode ) self . _camera_params = self . Camera . get_params ( ) self . _x , self . _y , self . _hsml , self . _kview = self . __compute_scene ( ) self . _m = self . _Particles . _mass [ self . _kview ]
def cat ( args ) : """% prog cat * . pdf - o output . pdf Concatenate pages from pdf files into a single pdf file . Page ranges refer to the previously - named file . A file not followed by a page range means all the pages of the file . PAGE RANGES are like Python slices . { page _ range _ help } EXAMPLES pdfcat - o output . pdf head . pdf content . pdf : 6 7 : tail . pdf - 1 Concatenate all of head . pdf , all but page seven of content . pdf , and the last page of tail . pdf , producing output . pdf . pdfcat chapter * . pdf > book . pdf You can specify the output file by redirection . pdfcat chapter ? . pdf chapter10 . pdf > book . pdf In case you don ' t want chapter 10 before chapter 2."""
p = OptionParser ( cat . __doc__ . format ( page_range_help = PAGE_RANGE_HELP ) ) p . add_option ( "--nosort" , default = False , action = "store_true" , help = "Do not sort file names" ) p . add_option ( "--cleanup" , default = False , action = "store_true" , help = "Remove individual pdfs after merging" ) p . set_outfile ( ) p . set_verbose ( help = "Show page ranges as they are being read" ) opts , args = p . parse_args ( args ) if len ( args ) < 1 : sys . exit ( not p . print_help ( ) ) outfile = opts . outfile if outfile in args : args . remove ( outfile ) if not opts . nosort : args = natsorted ( args ) filename_page_ranges = parse_filename_page_ranges ( args ) verbose = opts . verbose fw = must_open ( outfile , "wb" ) merger = PdfFileMerger ( ) in_fs = { } try : for ( filename , page_range ) in filename_page_ranges : if verbose : print ( filename , page_range , file = sys . stderr ) if filename not in in_fs : in_fs [ filename ] = open ( filename , "rb" ) merger . append ( in_fs [ filename ] , pages = page_range ) except : print ( traceback . format_exc ( ) , file = sys . stderr ) print ( "Error while reading " + filename , file = sys . stderr ) sys . exit ( 1 ) merger . write ( fw ) fw . close ( ) if opts . cleanup : logging . debug ( "Cleaning up {} files" . format ( len ( args ) ) ) for arg in args : os . remove ( arg )
def is_binary ( var , allow_none = False ) : """Returns True if var is a binary ( bytes ) objects Result py - 2 py - 3 b ' bytes literal ' True True ' string literal ' True False u ' unicode literal ' False False Also works with the corresponding numpy types ."""
return isinstance ( var , six . binary_type ) or ( var is None and allow_none )
def confidence_intervals ( self , X , width = .95 , quantiles = None ) : """estimate confidence intervals for the model . Parameters X : array - like of shape ( n _ samples , m _ features ) Input data matrix width : float on [ 0,1 ] , optional quantiles : array - like of floats in ( 0 , 1 ) , optional Instead of specifying the prediciton width , one can specify the quantiles . So ` ` width = . 95 ` ` is equivalent to ` ` quantiles = [ . 025 , . 975 ] ` ` Returns intervals : np . array of shape ( n _ samples , 2 or len ( quantiles ) ) Notes Wood 2006 , section 4.9 Confidence intervals based on section 4.8 rely on large sample results to deal with non - Gaussian distributions , and treat the smoothing parameters as fixed , when in reality they are estimated from the data ."""
if not self . _is_fitted : raise AttributeError ( 'GAM has not been fitted. Call fit first.' ) X = check_X ( X , n_feats = self . statistics_ [ 'm_features' ] , edge_knots = self . edge_knots_ , dtypes = self . dtype , features = self . feature , verbose = self . verbose ) return self . _get_quantiles ( X , width , quantiles , prediction = False )
def check_common_elements_order ( list1 , list2 ) : """Function to verify if the common elements between two given lists maintain the same sequence . Examples : check _ common _ elements _ order ( [ ' red ' , ' green ' , ' black ' , ' orange ' ] , [ ' red ' , ' pink ' , ' green ' , ' white ' , ' black ' ] ) - > True check _ common _ elements _ order ( [ ' red ' , ' pink ' , ' green ' , ' white ' , ' black ' ] , [ ' white ' , ' orange ' , ' pink ' , ' black ' ] ) - > False check _ common _ elements _ order ( [ ' red ' , ' green ' , ' black ' , ' orange ' ] , [ ' red ' , ' pink ' , ' green ' , ' white ' , ' black ' ] ) - > True Args : list1 and list2 : Two lists Returns : Boolean value indicating whether common elements between list1 and list2 have the same order in both lists ."""
shared_elements = set ( list1 ) & set ( list2 ) ordered_list1 = [ item for item in list1 if item in shared_elements ] ordered_list2 = [ item for item in list2 if item in shared_elements ] return ordered_list1 == ordered_list2
def get_quant_NAs ( quantdata , quantheader ) : """Takes quantdata in a dict and header with quantkeys ( eg iTRAQ isotopes ) . Returns dict of quant intensities with missing keys set to NA ."""
out = { } for qkey in quantheader : out [ qkey ] = quantdata . get ( qkey , 'NA' ) return out
def components ( self , extra_params = None ) : """All components in this Space"""
return self . api . _get_json ( Component , space = self , rel_path = self . _build_rel_path ( 'ticket_components' ) , extra_params = extra_params , )
def infer_modifications ( stmts ) : """Return inferred Modification from RegulateActivity + ActiveForm . This function looks for combinations of Activation / Inhibition Statements and ActiveForm Statements that imply a Modification Statement . For example , if we know that A activates B , and phosphorylated B is active , then we can infer that A leads to the phosphorylation of B . An additional requirement when making this assumption is that the activity of B should only be dependent on the modified state and not other context - otherwise the inferred Modification is not necessarily warranted . Parameters stmts : list [ indra . statements . Statement ] A list of Statements to infer Modifications from . Returns linked _ stmts : list [ indra . mechlinker . LinkedStatement ] A list of LinkedStatements representing the inferred Statements ."""
linked_stmts = [ ] for act_stmt in _get_statements_by_type ( stmts , RegulateActivity ) : for af_stmt in _get_statements_by_type ( stmts , ActiveForm ) : if not af_stmt . agent . entity_matches ( act_stmt . obj ) : continue mods = af_stmt . agent . mods # Make sure the ActiveForm only involves modified sites if af_stmt . agent . mutations or af_stmt . agent . bound_conditions or af_stmt . agent . location : continue if not af_stmt . agent . mods : continue for mod in af_stmt . agent . mods : evs = act_stmt . evidence + af_stmt . evidence for ev in evs : ev . epistemics [ 'direct' ] = False if mod . is_modified : mod_type_name = mod . mod_type else : mod_type_name = modtype_to_inverse [ mod . mod_type ] mod_class = modtype_to_modclass [ mod_type_name ] if not mod_class : continue st = mod_class ( act_stmt . subj , act_stmt . obj , mod . residue , mod . position , evidence = evs ) ls = LinkedStatement ( [ act_stmt , af_stmt ] , st ) linked_stmts . append ( ls ) logger . info ( 'inferred: %s' % st ) return linked_stmts
def AddSpecification ( self , specification ) : """Adds a format specification . Args : specification ( FormatSpecification ) : format specification . Raises : KeyError : if the store already contains a specification with the same identifier ."""
if specification . identifier in self . _format_specifications : raise KeyError ( 'Format specification {0:s} is already defined in store.' . format ( specification . identifier ) ) self . _format_specifications [ specification . identifier ] = specification for signature in specification . signatures : signature_index = len ( self . _signature_map ) signature_identifier = '{0:s}:{1:d}' . format ( specification . identifier , signature_index ) if signature_identifier in self . _signature_map : raise KeyError ( 'Signature {0:s} is already defined in map.' . format ( signature_identifier ) ) signature . SetIdentifier ( signature_identifier ) self . _signature_map [ signature_identifier ] = specification
def parse_string ( self , string ) : """Parse ASCII output of JPrintMeta"""
self . log . info ( "Parsing ASCII data" ) if not string : self . log . warning ( "Empty metadata" ) return lines = string . splitlines ( ) application_data = [ ] application = lines [ 0 ] . split ( ) [ 0 ] self . log . debug ( "Reading meta information for '%s'" % application ) for line in lines : if application is None : self . log . debug ( "Reading meta information for '%s'" % application ) application = line . split ( ) [ 0 ] application_data . append ( line ) if line . startswith ( application + b' Linux' ) : self . _record_app_data ( application_data ) application_data = [ ] application = None
def models ( self ) : """Return all the models defined for this module"""
app = get_app ( self . __class__ . __module__ . split ( '.' ) [ - 2 ] ) return get_models ( app )
def _append ( self , menu ) : '''append this menu item to a menu'''
menu . AppendCheckItem ( self . id ( ) , self . name , self . description ) menu . Check ( self . id ( ) , self . checked )
def split_by_idxs ( seq , idxs ) : '''A generator that returns sequence pieces , seperated by indexes specified in idxs .'''
last = 0 for idx in idxs : if not ( - len ( seq ) <= idx < len ( seq ) ) : raise KeyError ( f'Idx {idx} is out-of-bounds' ) yield seq [ last : idx ] last = idx yield seq [ last : ]
def cmd_relay ( self , args ) : '''set relays'''
if len ( args ) == 0 or args [ 0 ] not in [ 'set' , 'repeat' ] : print ( "Usage: relay <set|repeat>" ) return if args [ 0 ] == "set" : if len ( args ) < 3 : print ( "Usage: relay set <RELAY_NUM> <0|1>" ) return self . master . mav . command_long_send ( self . target_system , self . target_component , mavutil . mavlink . MAV_CMD_DO_SET_RELAY , 0 , int ( args [ 1 ] ) , int ( args [ 2 ] ) , 0 , 0 , 0 , 0 , 0 ) if args [ 0 ] == "repeat" : if len ( args ) < 4 : print ( "Usage: relay repeat <RELAY_NUM> <COUNT> <PERIOD>" ) return self . master . mav . command_long_send ( self . target_system , self . target_component , mavutil . mavlink . MAV_CMD_DO_REPEAT_RELAY , 0 , int ( args [ 1 ] ) , int ( args [ 2 ] ) , float ( args [ 3 ] ) , 0 , 0 , 0 , 0 )
def create_doc_id_from_json ( doc ) -> str : """Docs with identical contents get the same ID . Args : doc : Returns : a string with the hash of the given document ."""
return hashlib . sha256 ( json . dumps ( doc , sort_keys = True ) . encode ( 'utf-8' ) ) . hexdigest ( )
def asDirect ( self ) : """Returns the image data as a direct representation of an ` ` x * y * planes ` ` array . This method is intended to remove the need for callers to deal with palettes and transparency themselves . Images with a palette ( colour type 3) are converted to RGB or RGBA ; images with transparency ( a ` ` tRNS ` ` chunk ) are converted to LA or RGBA as appropriate . When returned in this format the pixel values represent the colour value directly without needing to refer to palettes or transparency information . Like the : meth : ` read ` method this method returns a 4 - tuple : ( * width * , * height * , * pixels * , * meta * ) This method normally returns pixel values with the bit depth they have in the source image , but when the source PNG has an ` ` sBIT ` ` chunk it is inspected and can reduce the bit depth of the result pixels ; pixel values will be reduced according to the bit depth specified in the ` ` sBIT ` ` chunk ( PNG nerds should note a single result bit depth is used for all channels ; the maximum of the ones specified in the ` ` sBIT ` ` chunk . An RGB565 image will be rescaled to 6 - bit RGB666 ) . The * meta * dictionary that is returned reflects the ` direct ` format and not the original source image . For example , an RGB source image with a ` ` tRNS ` ` chunk to represent a transparent colour , will have ` ` planes = 3 ` ` and ` ` alpha = False ` ` for the source image , but the * meta * dictionary returned by this method will have ` ` planes = 4 ` ` and ` ` alpha = True ` ` because an alpha channel is synthesized and added . * pixels * is the pixel data in boxed row flat pixel format ( just like the : meth : ` read ` method ) . All the other aspects of the image data are not changed ."""
self . preamble ( ) # Simple case , no conversion necessary . if not self . colormap and not self . trns and not self . sbit : return self . read ( ) x , y , pixels , meta = self . read ( ) if self . colormap : meta [ 'colormap' ] = False meta [ 'alpha' ] = bool ( self . trns ) meta [ 'bitdepth' ] = 8 meta [ 'planes' ] = 3 + bool ( self . trns ) plte = self . palette ( ) def iterpal ( pixels ) : for row in pixels : row = map ( plte . __getitem__ , row ) yield array ( 'B' , itertools . chain ( * row ) ) pixels = iterpal ( pixels ) elif self . trns : # It would be nice if there was some reasonable way # of doing this without generating a whole load of # intermediate tuples . But tuples does seem like the # easiest way , with no other way clearly much simpler or # much faster . ( Actually , the L to LA conversion could # perhaps go faster ( all those 1 - tuples ! ) , but I still # wonder whether the code proliferation is worth it ) it = self . transparent maxval = 2 ** meta [ 'bitdepth' ] - 1 planes = meta [ 'planes' ] meta [ 'alpha' ] = True meta [ 'planes' ] += 1 typecode = 'BH' [ meta [ 'bitdepth' ] > 8 ] def itertrns ( pixels ) : for row in pixels : # For each row we group it into pixels , then form a # characterisation vector that says whether each # pixel is opaque or not . Then we convert # True / False to 0 / maxval ( by multiplication ) , # and add it as the extra channel . row = group ( row , planes ) opa = map ( it . __ne__ , row ) opa = map ( maxval . __mul__ , opa ) opa = zip ( opa ) # convert to 1 - tuples yield array ( typecode , itertools . chain ( * map ( operator . add , row , opa ) ) ) pixels = itertrns ( pixels ) targetbitdepth = None if self . sbit : sbit = struct . unpack ( '%dB' % len ( self . sbit ) , self . sbit ) targetbitdepth = max ( sbit ) if targetbitdepth > meta [ 'bitdepth' ] : raise Error ( 'sBIT chunk %r exceeds bitdepth %d' % ( sbit , self . bitdepth ) ) if min ( sbit ) <= 0 : raise Error ( 'sBIT chunk %r has a 0-entry' % sbit ) if targetbitdepth == meta [ 'bitdepth' ] : targetbitdepth = None if targetbitdepth : shift = meta [ 'bitdepth' ] - targetbitdepth meta [ 'bitdepth' ] = targetbitdepth def itershift ( pixels ) : for row in pixels : yield map ( shift . __rrshift__ , row ) pixels = itershift ( pixels ) return x , y , pixels , meta
def ordering_step ( self , oneway = False ) : """iterator that computes all vertices ordering in their layers ( one layer after the other from top to bottom , to top again unless oneway is True ) ."""
self . dirv = - 1 crossings = 0 for l in self . layers : mvmt = l . order ( ) crossings += mvmt yield ( l , mvmt ) if oneway or ( crossings == 0 ) : return self . dirv = + 1 while l : mvmt = l . order ( ) yield ( l , mvmt ) l = l . nextlayer ( )
def _AddMessageMethods ( message_descriptor , cls ) : """Adds implementations of all Message methods to cls ."""
_AddListFieldsMethod ( message_descriptor , cls ) _AddHasFieldMethod ( message_descriptor , cls ) _AddClearFieldMethod ( message_descriptor , cls ) if message_descriptor . is_extendable : _AddClearExtensionMethod ( cls ) _AddHasExtensionMethod ( cls ) _AddEqualsMethod ( message_descriptor , cls ) _AddStrMethod ( message_descriptor , cls ) _AddReprMethod ( message_descriptor , cls ) _AddUnicodeMethod ( message_descriptor , cls ) _AddByteSizeMethod ( message_descriptor , cls ) _AddSerializeToStringMethod ( message_descriptor , cls ) _AddSerializePartialToStringMethod ( message_descriptor , cls ) _AddMergeFromStringMethod ( message_descriptor , cls ) _AddIsInitializedMethod ( message_descriptor , cls ) _AddMergeFromMethod ( cls ) _AddWhichOneofMethod ( message_descriptor , cls ) _AddReduceMethod ( cls ) # Adds methods which do not depend on cls . cls . Clear = _Clear cls . DiscardUnknownFields = _DiscardUnknownFields cls . _SetListener = _SetListener
def info ( self , section = 'default' ) : """Get information and statistics about the server . If called without argument will return default set of sections . For available sections , see http : / / redis . io / commands / INFO : raises ValueError : if section is invalid"""
if not section : raise ValueError ( "invalid section" ) fut = self . execute ( b'INFO' , section , encoding = 'utf-8' ) return wait_convert ( fut , parse_info )
async def is_ready ( self ) : """Check if the multi - environment has been fully initialized . This calls each slave environment managers ' : py : meth : ` is _ ready ` and checks if the multi - environment itself is ready by calling : py : meth : ` ~ creamas . mp . MultiEnvironment . check _ ready ` . . . seealso : : : py : meth : ` creamas . core . environment . Environment . is _ ready `"""
async def slave_task ( addr , timeout ) : try : r_manager = await self . env . connect ( addr , timeout = timeout ) ready = await r_manager . is_ready ( ) if not ready : return False except : return False return True if not self . env . is_ready ( ) : return False if not self . check_ready ( ) : return False rets = await create_tasks ( slave_task , self . addrs , 0.5 ) if not all ( rets ) : return False return True
def _prepare_sort_options ( self , has_pk ) : """Prepare sort options for _ values attributes . If we manager sort by score after getting the result , we do not want to get values from the first sort call , but only from the last one , after converting results in zset into keys"""
sort_options = super ( ExtendedCollectionManager , self ) . _prepare_sort_options ( has_pk ) if self . _values : # if we asked for values , we have to use the redis ' sort ' # command , which is able to return other fields . if not sort_options : sort_options = { } sort_options [ 'get' ] = self . _values [ 'fields' ] [ 'keys' ] if self . _sort_by_sortedset_after : for key in ( 'get' , 'store' ) : if key in self . _sort_by_sortedset : del self . _sort_by_sortedset [ key ] if sort_options and ( not has_pk or self . _want_score_value ) : for key in ( 'get' , 'store' ) : if key in sort_options : self . _sort_by_sortedset [ key ] = sort_options . pop ( key ) if not sort_options : sort_options = None return sort_options
def calc_2d_ellipse_properties ( cov , nstd = 2 ) : """Calculate the properties for 2d ellipse given the covariance matrix ."""
def eigsorted ( cov ) : vals , vecs = np . linalg . eigh ( cov ) order = vals . argsort ( ) [ : : - 1 ] return vals [ order ] , vecs [ : , order ] vals , vecs = eigsorted ( cov ) width , height = 2 * nstd * np . sqrt ( vals [ : 2 ] ) normal = vecs [ : , 2 ] if vecs [ 2 , 2 ] > 0 else - vecs [ : , 2 ] d = np . cross ( normal , ( 0 , 0 , 1 ) ) M = rotation_matrix ( d ) x_trans = np . dot ( M , ( 1 , 0 , 0 ) ) cos_val = np . dot ( vecs [ : , 0 ] , x_trans ) / np . linalg . norm ( vecs [ : , 0 ] ) / np . linalg . norm ( x_trans ) theta = np . degrees ( np . arccos ( np . clip ( cos_val , - 1 , 1 ) ) ) # if you really want the angle return { 'width' : width , 'height' : height , 'angle' : theta } , normal
def to_java_doubles ( m ) : '''to _ java _ doubles ( m ) yields a java array object for the vector or matrix m .'''
global _java if _java is None : _init_registration ( ) m = np . asarray ( m ) dims = len ( m . shape ) if dims > 2 : raise ValueError ( '1D and 2D arrays supported only' ) bindat = serialize_numpy ( m , 'd' ) return ( _java . jvm . nben . util . Numpy . double2FromBytes ( bindat ) if dims == 2 else _java . jvm . nben . util . Numpy . double1FromBytes ( bindat ) )
def build_pdf ( source , texinputs = [ ] , builder = None ) : """Builds a LaTeX source to PDF . Will automatically instantiate an available builder ( or raise a : class : ` exceptions . RuntimeError ` if none are available ) and build the supplied source with it . Parameters are passed on to the builder ' s : meth : ` ~ latex . build . LatexBuilder . build _ pdf ` function . : param builder : Specify which builder should be used - ` ` latexmk ` ` , ` ` pdflatex ` ` or ` ` xelatexmk ` ` ."""
if builder is None : builders = PREFERRED_BUILDERS elif builder not in BUILDERS : raise RuntimeError ( 'Invalid Builder specified' ) else : builders = ( builder , ) for bld in builders : bld_cls = BUILDERS [ bld ] builder = bld_cls ( ) if not builder . is_available ( ) : continue return builder . build_pdf ( source , texinputs ) else : raise RuntimeError ( 'No available builder could be instantiated. ' 'Please make sure LaTeX is installed.' )
def stop_containers ( self ) : """Stops all containers used by this instance of the backend ."""
while len ( self . _containers ) : container = self . _containers . pop ( ) try : container . kill ( signal . SIGKILL ) except docker . errors . APIError : # probably doesn ' t exist anymore pass
def ckgpav ( inst , sclkdp , tol , ref ) : """Get pointing ( attitude ) and angular velocity for a specified spacecraft clock time . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / ckgpav _ c . html : param inst : NAIF ID of instrument , spacecraft , or structure . : type inst : int : param sclkdp : Encoded spacecraft clock time . : type sclkdp : float : param tol : Time tolerance . : type tol : float : param ref : Reference frame . : type ref : str : return : C - matrix pointing data , Angular velocity vector , Output encoded spacecraft clock time . : rtype : tuple"""
inst = ctypes . c_int ( inst ) sclkdp = ctypes . c_double ( sclkdp ) tol = ctypes . c_double ( tol ) ref = stypes . stringToCharP ( ref ) cmat = stypes . emptyDoubleMatrix ( ) av = stypes . emptyDoubleVector ( 3 ) clkout = ctypes . c_double ( ) found = ctypes . c_int ( ) libspice . ckgpav_c ( inst , sclkdp , tol , ref , cmat , av , ctypes . byref ( clkout ) , ctypes . byref ( found ) ) return stypes . cMatrixToNumpy ( cmat ) , stypes . cVectorToPython ( av ) , clkout . value , bool ( found . value )
def gen_locustfile ( testcase_file_path ) : """generate locustfile from template ."""
locustfile_path = 'locustfile.py' template_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , "templates" , "locustfile_template" ) with io . open ( template_path , encoding = 'utf-8' ) as template : with io . open ( locustfile_path , 'w' , encoding = 'utf-8' ) as locustfile : template_content = template . read ( ) template_content = template_content . replace ( "$TESTCASE_FILE" , testcase_file_path ) locustfile . write ( template_content ) return locustfile_path
def route ( regex , method , name ) : """Route the decorated view . : param regex : A string describing a regular expression to which the request path will be matched . : param method : A string describing the HTTP method that this view accepts . : param name : A string describing the name of the URL pattern . ` ` regex ` ` may also be a lambda that accepts the parent resource ' s ` ` prefix ` ` argument and returns a string describing a regular expression to which the request path will be matched . ` ` name ` ` may also be a lambda that accepts the parent resource ' s ` ` views ` ` argument and returns a string describing the name of the URL pattern ."""
def decorator ( function ) : function . route = routes . route ( regex = regex , view = function . __name__ , method = method , name = name ) @ wraps ( function ) def wrapper ( self , * args , ** kwargs ) : return function ( self , * args , ** kwargs ) return wrapper return decorator
def next ( self ) : """Pops and returns the first outgoing message from the list . If message list currently has no messages , the calling thread will be put to sleep until we have at - least one message in the list that can be popped and returned ."""
# We pick the first outgoing available and send it . outgoing_msg = self . outgoing_msg_list . pop_first ( ) # If we do not have any outgoing msg . , we wait . if outgoing_msg is None : self . outgoing_msg_event . clear ( ) self . outgoing_msg_event . wait ( ) outgoing_msg = self . outgoing_msg_list . pop_first ( ) return outgoing_msg
def download ( URLs , dest_dir = '.' , dest_file = None , decompress = False , max_jobs = 5 ) : '''Download files from specified URL , which should be space , tab or newline separated URLs . The files will be downloaded to specified destination . If ` filename . md5 ` files are downloaded , they are used to validate downloaded ` filename ` . Unless otherwise specified , compressed files are decompressed . If ` max _ jobs ` is given , a maximum of ` max _ jobs ` concurrent download jobs will be used for each domain . This restriction applies to domain names and will be applied to multiple download instances .'''
if env . config [ 'run_mode' ] == 'dryrun' : print ( f'HINT: download\n{URLs}\n' ) return None if isinstance ( URLs , str ) : urls = [ x . strip ( ) for x in URLs . split ( ) if x . strip ( ) ] else : urls = list ( URLs ) if not urls : env . logger . debug ( f'No download URL specified: {URLs}' ) return if dest_file is not None and len ( urls ) != 1 : raise RuntimeError ( 'Only one URL is allowed if a destination file is specified.' ) if dest_file is None : filenames = [ ] for idx , url in enumerate ( urls ) : token = urllib . parse . urlparse ( url ) # if no scheme or netloc , the URL is not acceptable if not all ( [ getattr ( token , qualifying_attr ) for qualifying_attr in ( 'scheme' , 'netloc' ) ] ) : raise ValueError ( f'Invalid URL {url}' ) filename = os . path . split ( token . path ) [ - 1 ] if not filename : raise ValueError ( f'Cannot determine destination file for {url}' ) filenames . append ( os . path . join ( dest_dir , filename ) ) else : token = urllib . parse . urlparse ( urls [ 0 ] ) if not all ( [ getattr ( token , qualifying_attr ) for qualifying_attr in ( 'scheme' , 'netloc' ) ] ) : raise ValueError ( f'Invalid URL {url}' ) filenames = [ dest_file ] succ = [ ( False , None ) for x in urls ] with ProcessPoolExecutor ( max_workers = max_jobs ) as executor : for idx , ( url , filename ) in enumerate ( zip ( urls , filenames ) ) : # if there is alot , start download succ [ idx ] = executor . submit ( downloadURL , url , filename , decompress , idx ) succ = [ x . result ( ) for x in succ ] # for su , url in zip ( succ , urls ) : # if not su : # env . logger . warning ( ' Failed to download { } ' . format ( url ) ) failed = [ y for x , y in zip ( succ , urls ) if not x ] if failed : if len ( urls ) == 1 : raise RuntimeError ( 'Failed to download {urls[0]}' ) else : raise RuntimeError ( f'Failed to download {failed[0]} ({len(failed)} out of {len(urls)})' ) return 0
def main ( github_token , github_api_url , progress ) : """A CLI to easily manage GitHub releases , assets and references ."""
global progress_reporter_cls progress_reporter_cls . reportProgress = sys . stdout . isatty ( ) and progress if progress_reporter_cls . reportProgress : progress_reporter_cls = _progress_bar global _github_token_cli_arg _github_token_cli_arg = github_token global _github_api_url _github_api_url = github_api_url
def get_max_muO2 ( self , min_voltage = None , max_voltage = None ) : """Maximum critical oxygen chemical potential along path . Args : min _ voltage : The minimum allowable voltage . max _ voltage : The maximum allowable voltage . Returns : Maximum critical oxygen chemical of all compounds along the insertion path ( a subset of the path can be chosen by the optional arguments ) ."""
data = [ ] for pair in self . _select_in_voltage_range ( min_voltage , max_voltage ) : if pair . muO2_discharge is not None : data . extend ( [ d [ 'chempot' ] for d in pair . muO2_discharge ] ) if pair . muO2_charge is not None : data . extend ( [ d [ 'chempot' ] for d in pair . muO2_discharge ] ) return max ( data ) if len ( data ) > 0 else None
def get_residuals ( ds , m ) : """Using the dataset and model object , calculate the residuals and return Parameters ds : dataset object m : model object Return residuals : array of residuals , spec minus model spec"""
model_spectra = get_model_spectra ( ds , m ) resid = ds . test_flux - model_spectra return resid
def next_builder ( self ) : """Create a new builder based off of this one with its sequence number incremented . : return : A new Builder instance : rtype : : class : ` Builder `"""
sequence = self . sequence + 1 next_builder = Builder ( horizon_uri = self . horizon . horizon_uri , address = self . address , network = self . network , sequence = sequence , fee = self . fee ) next_builder . keypair = self . keypair return next_builder
def get_default_config ( self ) : """Returns the default collector settings"""
config = super ( NetstatCollector , self ) . get_default_config ( ) config . update ( { 'path' : 'netstat' , } ) return config
def render_word ( self , text , tag , i ) : """Render individual word . text ( unicode ) : Word text . tag ( unicode ) : Part - of - speech tag . i ( int ) : Unique ID , typically word index . RETURNS ( unicode ) : Rendered SVG markup ."""
y = self . offset_y + self . word_spacing x = self . offset_x + i * self . distance if self . direction == "rtl" : x = self . width - x html_text = escape_html ( text ) return TPL_DEP_WORDS . format ( text = html_text , tag = tag , x = x , y = y )
def _get_singlekws ( skw_matches , spires = False ) : """Get single keywords . : var skw _ matches : dict of { keyword : [ info , . . . ] } : keyword spires : bool , to get the spires output : return : list of formatted keywords"""
output = { } for single_keyword , info in skw_matches : output [ single_keyword . output ( spires ) ] = len ( info [ 0 ] ) output = [ { 'keyword' : key , 'number' : value } for key , value in output . iteritems ( ) ] return sorted ( output , key = lambda x : x [ 'number' ] , reverse = True )
def offset ( self , offset ) : """Move all the intervals in the list by the given ` ` offset ` ` . : param offset : the shift to be applied : type offset : : class : ` ~ aeneas . exacttiming . TimeValue ` : raises TypeError : if ` ` offset ` ` is not an instance of ` ` TimeValue ` `"""
self . log ( u"Applying offset to all fragments..." ) self . log ( [ u" Offset %.3f" , offset ] ) for fragment in self . fragments : fragment . interval . offset ( offset = offset , allow_negative = False , min_begin_value = self . begin , max_end_value = self . end ) self . log ( u"Applying offset to all fragments... done" )
def mozjpeg ( ext_args ) : """Create argument list for mozjpeg ."""
args = copy . copy ( _MOZJPEG_ARGS ) if Settings . destroy_metadata : args += [ "-copy" , "none" ] else : args += [ "-copy" , "all" ] args += [ '-outfile' ] args += [ ext_args . new_filename , ext_args . old_filename ] extern . run_ext ( args ) return _JPEG_FORMAT
def _parse_css_color ( color ) : '''_ parse _ css _ color ( css _ color ) - > gtk . gdk . Color'''
if color . startswith ( "rgb(" ) and color . endswith ( ')' ) : r , g , b = [ int ( c ) * 257 for c in color [ 4 : - 1 ] . split ( ',' ) ] return gtk . gdk . Color ( r , g , b ) else : return gtk . gdk . color_parse ( color )
def _makeStoreOwnerPerson ( self ) : """Make a L { Person } representing the owner of the store that this L { Organizer } is installed in . @ rtype : L { Person }"""
if self . store is None : return None userInfo = self . store . findFirst ( signup . UserInfo ) name = u'' if userInfo is not None : name = userInfo . realName account = self . store . findUnique ( LoginAccount , LoginAccount . avatars == self . store , None ) ownerPerson = self . createPerson ( name ) if account is not None : for method in ( self . store . query ( LoginMethod , attributes . AND ( LoginMethod . account == account , LoginMethod . internal == False ) ) ) : self . createContactItem ( EmailContactType ( self . store ) , ownerPerson , dict ( email = method . localpart + u'@' + method . domain ) ) return ownerPerson
def update ( self , session , arrays = None , frame = None ) : '''Creates a frame and writes it to disk . Args : arrays : a list of np arrays . Use the " custom " option in the client . frame : a 2D np array . This way the plugin can be used for video of any kind , not just the visualization that comes with the plugin . frame can also be a function , which only is evaluated when the " frame " option is selected by the client .'''
new_config = self . _get_config ( ) if self . _enough_time_has_passed ( self . previous_config [ 'FPS' ] ) : self . visualizer . update ( new_config ) self . last_update_time = time . time ( ) final_image = self . _update_frame ( session , arrays , frame , new_config ) self . _update_recording ( final_image , new_config )
def _reset ( self , framer ) : """Reset the state for the framer . It is safe to call this method multiple times with the same framer ; the ID of the framer object will be saved and the state only reset if the IDs are different . After resetting the state , the framer ' s ` ` init _ state ( ) ` ` method will be called ."""
# Do nothing if we ' re already properly initialized if id ( framer ) == self . _framer_id : return # Reset the state self . _other = { } # Initialize the state and save the framer ID framer . init_state ( self ) self . _framer_id = id ( framer )
async def handle_exception ( self , exc : Exception , action : str , request_id ) : """Handle any exception that occurs , by sending an appropriate message"""
if isinstance ( exc , APIException ) : await self . reply ( action = action , errors = self . _format_errors ( exc . detail ) , status = exc . status_code , request_id = request_id ) elif exc == Http404 or isinstance ( exc , Http404 ) : await self . reply ( action = action , errors = self . _format_errors ( 'Not found' ) , status = 404 , request_id = request_id ) else : raise exc
def close_others ( self ) : """Closes every editors tabs except the current one ."""
current_widget = self . widget ( self . tab_under_menu ( ) ) if self . _try_close_dirty_tabs ( exept = current_widget ) : i = 0 while self . count ( ) > 1 : widget = self . widget ( i ) if widget != current_widget : self . remove_tab ( i ) else : i = 1
def grid_search ( script : str , params : typing . Iterable [ str ] , dry_run : bool = False ) -> None : """Build all grid search parameter configurations and optionally run them . : param script : String of command prefix , e . g . ` ` cxflow train - v - o log ` ` . : param params : Iterable collection of strings in standard * * cxflow * * param form , e . g . ` ` ' numerical _ param = [ 1 , 2 ] ' ` ` or ` ` ' text _ param = [ " hello " , " cio " ] ' ` ` . : param dry _ run : If set to ` ` True ` ` , the built commands will only be printed instead of executed ."""
commands = _build_grid_search_commands ( script = script , params = params ) if dry_run : logging . warning ( 'Dry run' ) for command in commands : logging . info ( command ) else : for command in commands : try : completed_process = subprocess . run ( command ) logging . info ( 'Command `%s` completed with exit code %d' , command , completed_process . returncode ) except Exception as _ : # pylint : disable = broad - except logging . error ( 'Command `%s` failed.' , command )
def single_node_env ( num_gpus = 1 ) : """Setup environment variables for Hadoop compatibility and GPU allocation"""
import tensorflow as tf # ensure expanded CLASSPATH w / o glob characters ( required for Spark 2.1 + JNI ) if 'HADOOP_PREFIX' in os . environ and 'TFOS_CLASSPATH_UPDATED' not in os . environ : classpath = os . environ [ 'CLASSPATH' ] hadoop_path = os . path . join ( os . environ [ 'HADOOP_PREFIX' ] , 'bin' , 'hadoop' ) hadoop_classpath = subprocess . check_output ( [ hadoop_path , 'classpath' , '--glob' ] ) . decode ( ) os . environ [ 'CLASSPATH' ] = classpath + os . pathsep + hadoop_classpath os . environ [ 'TFOS_CLASSPATH_UPDATED' ] = '1' # reserve GPU , if requested if tf . test . is_built_with_cuda ( ) : gpus_to_use = gpu_info . get_gpus ( num_gpus ) logging . info ( "Using gpu(s): {0}" . format ( gpus_to_use ) ) os . environ [ 'CUDA_VISIBLE_DEVICES' ] = gpus_to_use else : # CPU logging . info ( "Using CPU" ) os . environ [ 'CUDA_VISIBLE_DEVICES' ] = ''
def close ( self , reply_code = 0 , reply_text = '' , class_id = 0 , method_id = 0 ) : '''Close this channel . Routes to channel . close .'''
# In the off chance that we call this twice . A good example is if # there ' s an error in close listeners and so we ' re still inside a # single call to process _ frames , which will try to close this channel # if there ' s an exception . if hasattr ( self , 'channel' ) : self . channel . close ( reply_code , reply_text , class_id , method_id )
def tocimxml ( self ) : """Return the CIM - XML representation of this CIM property , as an object of an appropriate subclass of : term : ` Element ` . The returned CIM - XML representation is a ` PROPERTY ` , ` PROPERTY . REFERENCE ` , or ` PROPERTY . ARRAY ` element dependent on the property type , and consistent with : term : ` DSP0201 ` . Note that array properties cannot be of reference type . The order of qualifiers in the returned CIM - XML representation is preserved from the : class : ` ~ pywbem . CIMProperty ` object . Returns : The CIM - XML representation , as an object of an appropriate subclass of : term : ` Element ` ."""
qualifiers = [ q . tocimxml ( ) for q in self . qualifiers . values ( ) ] if self . is_array : # pylint : disable = no - else - return assert self . type != 'reference' if self . value is None : value_xml = None else : array_xml = [ ] for v in self . value : if v is None : if SEND_VALUE_NULL : array_xml . append ( cim_xml . VALUE_NULL ( ) ) else : array_xml . append ( cim_xml . VALUE ( None ) ) elif self . embedded_object is not None : assert isinstance ( v , ( CIMInstance , CIMClass ) ) array_xml . append ( cim_xml . VALUE ( v . tocimxml ( ) . toxml ( ) ) ) else : array_xml . append ( cim_xml . VALUE ( atomic_to_cim_xml ( v ) ) ) value_xml = cim_xml . VALUE_ARRAY ( array_xml ) return cim_xml . PROPERTY_ARRAY ( self . name , self . type , value_xml , self . array_size , self . class_origin , self . propagated , embedded_object = self . embedded_object , qualifiers = qualifiers ) elif self . type == 'reference' : # scalar if self . value is None : value_xml = None else : value_xml = cim_xml . VALUE_REFERENCE ( self . value . tocimxml ( ) ) return cim_xml . PROPERTY_REFERENCE ( self . name , value_xml , reference_class = self . reference_class , class_origin = self . class_origin , propagated = self . propagated , qualifiers = qualifiers ) else : # scalar non - reference if self . value is None : value_xml = None else : if self . embedded_object is not None : assert isinstance ( self . value , ( CIMInstance , CIMClass ) ) value_xml = cim_xml . VALUE ( self . value . tocimxml ( ) . toxml ( ) ) else : value_xml = cim_xml . VALUE ( atomic_to_cim_xml ( self . value ) ) return cim_xml . PROPERTY ( self . name , self . type , value_xml , class_origin = self . class_origin , propagated = self . propagated , embedded_object = self . embedded_object , qualifiers = qualifiers )
def ecg_systole ( ecg , rpeaks , t_waves_ends ) : """Returns the localization of systoles and diastoles . Parameters ecg : list or ndarray ECG signal ( preferably filtered ) . rpeaks : list or ndarray R peaks localization . t _ waves _ ends : list or ndarray T waves localization . Returns systole : ndarray Array indicating where systole ( 1 ) and diastole ( 0 ) . Example > > > import neurokit as nk > > > systole = nk . ecg _ systole ( ecg , rpeaks , t _ waves _ ends ) Notes * Authors * - ` Dominique Makowski < https : / / dominiquemakowski . github . io / > ` _ * Details * - * * Systole / Diastole * * : One prominent channel of body and brain communication is that conveyed by baroreceptors , pressure and stretch - sensitive receptors within the heart and surrounding arteries . Within each cardiac cycle , bursts of baroreceptor afferent activity encoding the strength and timing of each heartbeat are carried via the vagus and glossopharyngeal nerve afferents to the nucleus of the solitary tract . This is the principal route that communicates to the brain the dynamic state of the heart , enabling the representation of cardiovascular arousal within viscerosensory brain regions , and influence ascending neuromodulator systems implicated in emotional and motivational behaviour . Because arterial baroreceptors are activated by the arterial pulse pressure wave , their phasic discharge is maximal during and immediately after the cardiac systole , that is , when the blood is ejected from the heart , and minimal during cardiac diastole , that is , between heartbeats ( Azevedo , 2017 ) . References - Azevedo , R . T . , Garfinkel , S . N . , Critchley , H . D . , & Tsakiris , M . ( 2017 ) . Cardiac afferent activity modulates the expression of racial stereotypes . Nature communications , 8. - Edwards , L . , Ring , C . , McIntyre , D . , & Carroll , D . ( 2001 ) . Modulation of the human nociceptive flexion reflex across the cardiac cycle . Psychophysiology , 38(4 ) , 712-718. - Gray , M . A . , Rylander , K . , Harrison , N . A . , Wallin , B . G . , & Critchley , H . D . ( 2009 ) . Following one ' s heart : cardiac rhythms gate central initiation of sympathetic reflexes . Journal of Neuroscience , 29(6 ) , 1817-1825."""
waves = np . array ( [ "" ] * len ( ecg ) ) waves [ rpeaks ] = "R" waves [ t_waves_ends ] = "T" systole = [ 0 ] current = 0 for index , value in enumerate ( waves [ 1 : ] ) : if waves [ index - 1 ] == "R" : current = 1 if waves [ index - 1 ] == "T" : current = 0 systole . append ( current ) return ( systole )
def omero_bin ( self , command ) : """Runs the omero command - line client with an array of arguments using the old environment"""
assert isinstance ( command , list ) if not self . old_env : raise Exception ( 'Old environment not initialised' ) log . info ( "Running [old environment]: %s" , " " . join ( command ) ) self . run ( 'omero' , command , capturestd = True , env = self . old_env )
def has_entities ( status ) : """Returns true if a Status object has entities . Args : status : either a tweepy . Status object or a dict returned from Twitter API"""
try : if sum ( len ( v ) for v in status . entities . values ( ) ) > 0 : return True except AttributeError : if sum ( len ( v ) for v in status [ 'entities' ] . values ( ) ) > 0 : return True return False
def get_version ( dev_version = False ) : """Generates a version string . Arguments : dev _ version : Generate a verbose development version from git commits . Examples : 1.1 1.1 . dev43 # If ' dev _ version ' was passed ."""
if dev_version : version = git_dev_version ( ) if not version : raise RuntimeError ( "Could not generate dev version from git." ) return version return "1!%d.%d" % ( MAJOR , MINOR )
def play ( self ) : """Send signal to resume playback at the paused offset"""
self . _response [ 'shouldEndSession' ] = True self . _response [ 'action' ] [ 'audio' ] [ 'interface' ] = 'play' self . _response [ 'action' ] [ 'audio' ] [ 'sources' ] = [ ] return self
def limit ( self , maximum ) : """Return a new query , limited to a certain number of results . Unlike core reporting queries , you cannot specify a starting point for live queries , just the maximum results returned . ` ` ` python # first 50 query . limit ( 50)"""
self . meta [ 'limit' ] = maximum self . raw . update ( { 'max_results' : maximum , } ) return self
def add_triple ( self , subj : Union [ URIRef , str ] , pred : Union [ URIRef , str ] , obj : Union [ URIRef , Literal , str ] ) -> None : """Adds triple to rdflib Graph Triple can be of any subject , predicate , and object of the entity without a need for order . Args : subj : Entity subject pred : Entity predicate obj : Entity object Example : In [ 1 ] : add _ triple ( . . . : ' http : / / uri . interlex . org / base / ilx _ 0101431 ' , . . . : RDF . type , . . . : ' http : / / www . w3 . org / 2002/07 / owl # Class ' )"""
if obj in [ None , "" , " " ] : return # Empty objects are bad practice _subj = self . process_subj_or_pred ( subj ) _pred = self . process_subj_or_pred ( pred ) _obj = self . process_obj ( obj ) self . g . add ( ( _subj , _pred , _obj ) )
def xmoe2_v1_l4k_compressed_c4 ( ) : """With compressed attention ."""
hparams = xmoe2_v1_l4k ( ) hparams . decoder_layers = [ "compressed_att" if l == "att" else l for l in hparams . decoder_layers ] hparams . compression_factor = 4 return hparams
def get_broks ( self , broker_name ) : """Send a HTTP request to the satellite ( GET / _ broks ) Get broks from the satellite . Un - serialize data received . : param broker _ name : the concerned broker link : type broker _ name : BrokerLink : return : Broks list on success , [ ] on failure : rtype : list"""
res = self . con . get ( '_broks' , { 'broker_name' : broker_name } , wait = False ) logger . debug ( "Got broks from %s: %s" , self . name , res ) return unserialize ( res , True )
async def close_wallet_search ( wallet_search_handle : int ) -> None : """Close wallet search ( make search handle invalid ) : param wallet _ search _ handle : wallet wallet handle ( created by open _ wallet _ search ) : return : None"""
logger = logging . getLogger ( __name__ ) logger . debug ( "close_wallet_search: >>> wallet_search_handle: %r" , wallet_search_handle ) if not hasattr ( close_wallet_search , "cb" ) : logger . debug ( "close_wallet_search: Creating callback" ) close_wallet_search . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 ) ) c_wallet_search_handle = c_int32 ( wallet_search_handle ) res = await do_call ( 'indy_close_wallet_search' , c_wallet_search_handle , close_wallet_search . cb ) logger . debug ( "close_wallet_search: <<< res: %r" , res ) return res
def most_similar ( self , word , number = 5 ) : """Run a similarity query , retrieving number most similar words ."""
if self . word_vectors is None : raise Exception ( 'Model must be fit before querying' ) if self . dictionary is None : raise Exception ( 'No word dictionary supplied' ) try : word_idx = self . dictionary [ word ] except KeyError : raise Exception ( 'Word not in dictionary' ) return self . _similarity_query ( self . word_vectors [ word_idx ] , number ) [ 1 : ]
def audiorate ( filename ) : """Determines the samplerate of the given audio recording file : param filename : filename of the audiofile : type filename : str : returns : int - - samplerate of the recording"""
if '.wav' in filename . lower ( ) : wf = wave . open ( filename ) fs = wf . getframerate ( ) wf . close ( ) elif '.call' in filename . lower ( ) : fs = 333333 else : raise IOError ( "Unsupported audio format for file: {}" . format ( filename ) ) return fs
def _createBitpattern ( functioncode , value ) : """Create the bit pattern that is used for writing single bits . This is basically a storage of numerical constants . Args : * functioncode ( int ) : can be 5 or 15 * value ( int ) : can be 0 or 1 Returns : The bit pattern ( string ) . Raises : TypeError , ValueError"""
_checkFunctioncode ( functioncode , [ 5 , 15 ] ) _checkInt ( value , minvalue = 0 , maxvalue = 1 , description = 'inputvalue' ) if functioncode == 5 : if value == 0 : return '\x00\x00' else : return '\xff\x00' elif functioncode == 15 : if value == 0 : return '\x00' else : return '\x01'
def flush ( self , hard = False ) : """Drop existing entries from the cache . Args : hard ( bool ) : If True , all current entries are flushed from the server ( s ) , which affects all users . If False , only the local process is affected ."""
if not self . servers : return if hard : self . client . flush_all ( ) self . reset_stats ( ) else : from uuid import uuid4 tag = uuid4 ( ) . hex if self . debug : tag = "flushed" + tag self . current = tag
def split_arg_to_name_type_value ( self , args_list ) : """Split argument text to name , type , value ."""
for arg in args_list : arg_type = None arg_value = None has_type = False has_value = False pos_colon = arg . find ( ':' ) pos_equal = arg . find ( '=' ) if pos_equal > - 1 : has_value = True if pos_colon > - 1 : if not has_value : has_type = True elif pos_equal > pos_colon : # exception for def foo ( arg1 = " : " ) has_type = True if has_value and has_type : arg_name = arg [ 0 : pos_colon ] . strip ( ) arg_type = arg [ pos_colon + 1 : pos_equal ] . strip ( ) arg_value = arg [ pos_equal + 1 : ] . strip ( ) elif not has_value and has_type : arg_name = arg [ 0 : pos_colon ] . strip ( ) arg_type = arg [ pos_colon + 1 : ] . strip ( ) elif has_value and not has_type : arg_name = arg [ 0 : pos_equal ] . strip ( ) arg_value = arg [ pos_equal + 1 : ] . strip ( ) else : arg_name = arg . strip ( ) self . arg_name_list . append ( arg_name ) self . arg_type_list . append ( arg_type ) self . arg_value_list . append ( arg_value )
def n_orifices_per_row ( self ) : """Calculate number of orifices at each level given an orifice diameter ."""
# H is distance from the bottom of the next row of orifices to the # center of the current row of orifices H = self . b_rows - 0.5 * self . orifice_diameter flow_per_orifice = pc . flow_orifice_vert ( self . orifice_diameter , H , con . VC_ORIFICE_RATIO ) n = np . zeros ( self . n_rows ) for i in range ( self . n_rows ) : # calculate the ideal number of orifices at the current row without # constraining to an integer flow_needed = self . flow_ramp [ i ] - self . flow_actual ( i , n ) n_orifices_real = ( flow_needed / flow_per_orifice ) . to ( u . dimensionless ) # constrain number of orifices to be less than the max per row and # greater or equal to 0 n [ i ] = min ( ( max ( 0 , round ( n_orifices_real ) ) ) , self . n_orifices_per_row_max ) return n
def parts ( self ) : """Return an array of batch parts to submit"""
parts = [ ] upserts = dict ( ) deletes = [ ] # we keep track of the batch size as we go ( pretty close approximation ! ) so we can chunk it small enough # to limit the HTTP posts to under 700KB - server limits to 750KB , so play it safe max_upload_size = 700000 # loop upserts first - fit the deletes in afterward # ' { " replace _ all " : true , " complete " : false , " guid " : " 6659fbfc - 3f08-42ee - 998c - 9109f650f4b7 " , " upserts " : [ ] , " deletes " : [ ] } ' base_part_size = 118 if not self . replace_all : base_part_size += 1 # yeah , this is totally overkill : ) part_size = base_part_size for value in self . upserts : if ( part_size + self . upserts_size [ value ] ) >= max_upload_size : # this record would put us over the limit - close out the batch part and start a new one parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) upserts = dict ( ) deletes = [ ] part_size = base_part_size # for the new upserts dict , drop the lower - casing of value upserts [ self . lower_val_to_val [ value ] ] = self . upserts [ value ] part_size += self . upserts_size [ value ] # updating the approximate size of the batch for value in self . deletes : # delete adds length of string plus quotes , comma and space if ( part_size + len ( value ) + 4 ) >= max_upload_size : parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) upserts = dict ( ) deletes = [ ] part_size = base_part_size # for the new deletes set , drop the lower - casing of value deletes . append ( { 'value' : self . lower_val_to_val [ value ] } ) part_size += len ( value ) + 4 if len ( upserts ) + len ( deletes ) > 0 : # finish the batch parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) if len ( parts ) == 0 : if not self . replace_all : raise ValueError ( "Batch has no data, and 'replace_all' is False" ) parts . append ( BatchPart ( self . replace_all , dict ( ) , [ ] ) ) # last part finishes the batch parts [ - 1 ] . set_last_part ( ) return parts
def _keepVol ( self , vol ) : """Mark this volume to be kept in path ."""
if vol is None : return if vol in self . extraVolumes : del self . extraVolumes [ vol ] return if vol not in self . paths : raise Exception ( "%s not in %s" % ( vol , self ) ) paths = [ os . path . basename ( path ) for path in self . paths [ vol ] ] newPath = self . selectReceivePath ( paths ) if self . _skipDryRun ( logger , 'INFO' ) ( "Copy %s to %s" , vol , newPath ) : return self . butterVolumes [ vol . uuid ] . copy ( newPath )
def get_sls_opts ( opts , ** kwargs ) : '''Return a copy of the opts for use , optionally load a local config on top'''
opts = copy . deepcopy ( opts ) if 'localconfig' in kwargs : return salt . config . minion_config ( kwargs [ 'localconfig' ] , defaults = opts ) if 'saltenv' in kwargs : saltenv = kwargs [ 'saltenv' ] if saltenv is not None : if not isinstance ( saltenv , six . string_types ) : saltenv = six . text_type ( saltenv ) if opts [ 'lock_saltenv' ] and saltenv != opts [ 'saltenv' ] : raise CommandExecutionError ( 'lock_saltenv is enabled, saltenv cannot be changed' ) opts [ 'saltenv' ] = kwargs [ 'saltenv' ] pillarenv = None if kwargs . get ( 'pillarenv' ) : pillarenv = kwargs . get ( 'pillarenv' ) if opts . get ( 'pillarenv_from_saltenv' ) and not pillarenv : pillarenv = kwargs . get ( 'saltenv' ) if pillarenv is not None and not isinstance ( pillarenv , six . string_types ) : opts [ 'pillarenv' ] = six . text_type ( pillarenv ) else : opts [ 'pillarenv' ] = pillarenv return opts
def _create_latent_variables ( self ) : """Creates model latent variables Returns None ( changes model attributes )"""
for parm in range ( self . z_no ) : self . latent_variables . add_z ( 'Scale ' + self . X_names [ parm ] , fam . Flat ( transform = 'exp' ) , fam . Normal ( 0 , 3 ) ) self . latent_variables . z_list [ parm ] . start = - 5.0 self . z_no = len ( self . latent_variables . z_list )
def _get_by_id ( collection , id ) : '''Get item from a list by the id field'''
matches = [ item for item in collection if item . id == id ] if not matches : raise ValueError ( 'Could not find a matching item' ) elif len ( matches ) > 1 : raise ValueError ( 'The id matched {0} items, not 1' . format ( len ( matches ) ) ) return matches [ 0 ]
def getlang_by_native_name ( native_name ) : """Try to lookup a Language object by native _ name , e . g . ' English ' , in internal language list . Returns None if lookup by language name fails in resources / languagelookup . json ."""
direct_match = _iget ( native_name , _LANGUAGE_NATIVE_NAME_LOOKUP ) if direct_match : return direct_match else : simple_native_name = native_name . split ( ',' ) [ 0 ] # take part before comma simple_native_name = simple_native_name . split ( '(' ) [ 0 ] . strip ( ) # and before any bracket return _LANGUAGE_NATIVE_NAME_LOOKUP . get ( simple_native_name , None )